diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -2227,6 +2227,25 @@ defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; +foreach vtiTowti = AllWidenableIntVectors in { + defvar vti = vtiTowti.Vti; + defvar wti = vtiTowti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in { + def : Pat< + (vti.Vector + (riscv_trunc_vector_vl + (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2))), + (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1)))), + (vti.Mask true_mask), + (XLenVT srcvalue))), + (!cast("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW)) + (vti.Vector (IMPLICIT_DEF)), + vti.RegClass:$rs2, vti.RegClass:$rs1, + vti.AVL, vti.Log2SEW, TA_MA)>; + } +} + // 11.12. Vector Widening Integer Multiply Instructions defm : VPatBinaryWVL_VV_VX; defm : VPatBinaryWVL_VV_VX; diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll @@ -14,6 +14,19 @@ ret %vc } +define @vrem_vv_nxv1i8_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i8_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv1i8: ; CHECK: # %bb.0: @@ -55,6 +68,19 @@ ret %vc } +define @vrem_vv_nxv2i8_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i8_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv2i8: ; CHECK: # %bb.0: @@ -96,6 +122,19 @@ ret %vc } +define @vrem_vv_nxv4i8_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i8_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv4i8: ; CHECK: # %bb.0: @@ -137,6 +176,19 @@ ret %vc } +define @vrem_vv_nxv8i8_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i8_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv8i8: ; CHECK: # %bb.0: @@ -178,6 +230,19 @@ ret %vc } +define @vrem_vv_nxv16i8_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i8_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv16i8: ; CHECK: # %bb.0: @@ -219,6 +284,19 @@ ret %vc } +define @vrem_vv_nxv32i8_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv32i8_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv32i8: ; CHECK: # %bb.0: @@ -260,6 +338,20 @@ ret %vc } +define @vrem_vv_nxv64i8_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv64i8_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma +; CHECK-NEXT: vrem.vv v12, v12, v20 +; CHECK-NEXT: vrem.vv v8, v8, v16 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv64i8: ; CHECK: # %bb.0: @@ -301,6 +393,19 @@ ret %vc } +define @vrem_vv_nxv1i16_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv1i16_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv1i16: ; CHECK: # %bb.0: @@ -355,6 +460,19 @@ ret %vc } +define @vrem_vv_nxv2i16_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv2i16_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv2i16: ; CHECK: # %bb.0: @@ -409,6 +527,19 @@ ret %vc } +define @vrem_vv_nxv4i16_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv4i16_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v9 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv4i16: ; CHECK: # %bb.0: @@ -463,6 +594,19 @@ ret %vc } +define @vrem_vv_nxv8i16_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv8i16_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v10 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv8i16: ; CHECK: # %bb.0: @@ -517,6 +661,19 @@ ret %vc } +define @vrem_vv_nxv16i16_sext_twice( %va, %vb) { +; CHECK-LABEL: vrem_vv_nxv16i16_sext_twice: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vrem.vv v8, v8, v12 +; CHECK-NEXT: ret + %sext_va = sext %va to + %sext_vb = sext %vb to + %vc_ext = srem %sext_va, %sext_vb + %vc = trunc %vc_ext to + ret %vc +} + define @vrem_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv16i16: ; CHECK: # %bb.0: @@ -963,8 +1120,8 @@ ; ; RV64-V-LABEL: vrem_vi_nxv1i64_0: ; RV64-V: # %bb.0: -; RV64-V-NEXT: lui a0, %hi(.LCPI56_0) -; RV64-V-NEXT: ld a0, %lo(.LCPI56_0)(a0) +; RV64-V-NEXT: lui a0, %hi(.LCPI68_0) +; RV64-V-NEXT: ld a0, %lo(.LCPI68_0)(a0) ; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-V-NEXT: vmulh.vx v9, v8, a0 ; RV64-V-NEXT: li a0, 63 @@ -1048,8 +1205,8 @@ ; ; RV64-V-LABEL: vrem_vi_nxv2i64_0: ; RV64-V: # %bb.0: -; RV64-V-NEXT: lui a0, %hi(.LCPI59_0) -; RV64-V-NEXT: ld a0, %lo(.LCPI59_0)(a0) +; RV64-V-NEXT: lui a0, %hi(.LCPI71_0) +; RV64-V-NEXT: ld a0, %lo(.LCPI71_0)(a0) ; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-V-NEXT: vmulh.vx v10, v8, a0 ; RV64-V-NEXT: li a0, 63 @@ -1133,8 +1290,8 @@ ; ; RV64-V-LABEL: vrem_vi_nxv4i64_0: ; RV64-V: # %bb.0: -; RV64-V-NEXT: lui a0, %hi(.LCPI62_0) -; RV64-V-NEXT: ld a0, %lo(.LCPI62_0)(a0) +; RV64-V-NEXT: lui a0, %hi(.LCPI74_0) +; RV64-V-NEXT: ld a0, %lo(.LCPI74_0)(a0) ; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-V-NEXT: vmulh.vx v12, v8, a0 ; RV64-V-NEXT: li a0, 63 @@ -1218,8 +1375,8 @@ ; ; RV64-V-LABEL: vrem_vi_nxv8i64_0: ; RV64-V: # %bb.0: -; RV64-V-NEXT: lui a0, %hi(.LCPI65_0) -; RV64-V-NEXT: ld a0, %lo(.LCPI65_0)(a0) +; RV64-V-NEXT: lui a0, %hi(.LCPI77_0) +; RV64-V-NEXT: ld a0, %lo(.LCPI77_0)(a0) ; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-V-NEXT: vmulh.vx v16, v8, a0 ; RV64-V-NEXT: li a0, 63