diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1052,6 +1052,23 @@ defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; +foreach vtiTowti = AllWidenableIntVectors in { + defvar vti = vtiTowti.Vti; + defvar wti = vtiTowti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in { + def : Pat< + (vti.Vector + (riscv_trunc_vector_vl + (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))), + (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2)))), + (vti.Mask true_mask), (XLenVT srcvalue))), + (!cast("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>; + } +} + // 11.12. Vector Widening Integer Multiply Instructions defm : VPatWidenBinarySDNode_VV_VX; diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll @@ -48,12 +48,8 @@ define @vrem_vv_nxv1i8_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv1i8_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vsext.vf2 v8, v9 -; CHECK-NEXT: vrem.vv v8, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -106,12 +102,8 @@ define @vrem_vv_nxv2i8_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv2i8_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vsext.vf2 v8, v9 -; CHECK-NEXT: vrem.vv v8, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -164,12 +156,8 @@ define @vrem_vv_nxv4i8_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv4i8_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vsext.vf2 v8, v9 -; CHECK-NEXT: vrem.vv v8, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -222,12 +210,8 @@ define @vrem_vv_nxv8i8_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv8i8_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vrem.vv v10, v10, v12 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -280,12 +264,8 @@ define @vrem_vv_nxv16i8_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv16i8_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vsext.vf2 v16, v10 -; CHECK-NEXT: vrem.vv v12, v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -338,12 +318,8 @@ define @vrem_vv_nxv32i8_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv32i8_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma -; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vsext.vf2 v24, v12 -; CHECK-NEXT: vrem.vv v16, v16, v24 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -450,12 +426,8 @@ define @vrem_vv_nxv1i16_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv1i16_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vsext.vf2 v8, v9 -; CHECK-NEXT: vrem.vv v8, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -521,12 +493,8 @@ define @vrem_vv_nxv2i16_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv2i16_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vsext.vf2 v8, v9 -; CHECK-NEXT: vrem.vv v8, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v8, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -592,12 +560,8 @@ define @vrem_vv_nxv4i16_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv4i16_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vrem.vv v10, v10, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v10, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -663,12 +627,8 @@ define @vrem_vv_nxv8i16_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv8i16_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vsext.vf2 v16, v10 -; CHECK-NEXT: vrem.vv v12, v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v12, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to @@ -734,12 +694,8 @@ define @vrem_vv_nxv16i16_sext_twice( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv16i16_sext_twice: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma -; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vsext.vf2 v24, v12 -; CHECK-NEXT: vrem.vv v16, v16, v24 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vnsrl.wi v8, v16, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %sext_va = sext %va to %sext_vb = sext %vb to