diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -4543,6 +4543,30 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors, uimm5>; +foreach vti = AllIntegerVectors in { + // Emit shift by 1 as an add since it might be faster. + def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1), + (XLenVT 1), VLOpFrag)), + (!cast("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1, + vti.RegClass:$rs1, + GPR:$vl, + vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs1), + (XLenVT 1), + (vti.Mask V0), + VLOpFrag, + (XLenVT timm:$policy))), + (!cast("PseudoVADD_VV_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs1, + (vti.Mask V0), + GPR:$vl, + vti.Log2SEW, + (XLenVT timm:$policy))>; +} + //===----------------------------------------------------------------------===// // 12.7. Vector Narrowing Integer Right Shift Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -2000,6 +2000,21 @@ ret %a } +define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.nxv1i8( + %0, + i32 1, + i32 %1) + + ret %a +} + define @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -2017,6 +2032,23 @@ ret %a } +define @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.mask.nxv1i8( + %0, + %1, + i32 1, + %2, + i32 %3, i32 1) + + ret %a +} + define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -2000,6 +2000,21 @@ ret %a } +define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.nxv1i8( + %0, + i64 1, + i64 %1) + + ret %a +} + define @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry @@ -2017,6 +2032,23 @@ ret %a } +define @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsll.mask.nxv1i8( + %0, + %1, + i64 1, + %2, + i64 %3, i64 1) + + ret %a +} + define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry