diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -481,6 +481,21 @@ defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; +foreach vti = AllIntegerVectors in { + // Emit mul by 2 as an add since it might be faster. + def : Pat<(mul (vti.Vector vti.RegClass:$rs1), + (vti.Vector (splat_vector (XLenVT 2)))), + (!cast("PseudoVADD_VV_"# vti.LMul.MX) + vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; +} +foreach vti = [VI64M1, VI64M2, VI64M4, VI64M8] in { + def : Pat<(mul (vti.Vector vti.RegClass:$rs1), + (vti.Vector (rv32_splat_i64 (XLenVT 2)))), + (!cast("PseudoVADD_VV_"# vti.LMul.MX) + vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + +} + // 12.11. Vector Integer Divide Instructions defm : VPatBinarySDNode_VV_VX; defm : VPatBinarySDNode_VV_VX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -780,6 +780,16 @@ defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; +foreach vti = AllIntegerVectors in { + // Emit mul by 2 as an add since it might be faster. + def : Pat<(riscv_mul_vl (vti.Vector vti.RegClass:$rs1), + (riscv_vmv_v_x_vl 2, (XLenVT srcvalue)), + (vti.Mask true_mask), + VLOpFrag), + (!cast("PseudoVADD_VV_"# vti.LMul.MX) + vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; +} + // 12.11. Vector Integer Divide Instructions defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv32.ll @@ -673,6 +673,18 @@ ret %vc } +define @vmul_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + define @vmul_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv2i64: ; CHECK: # %bb.0: @@ -715,6 +727,18 @@ ret %vc } +define @vmul_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + define @vmul_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv4i64: ; CHECK: # %bb.0: @@ -757,6 +781,18 @@ ret %vc } +define @vmul_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + define @vmul_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv8i64: ; CHECK: # %bb.0: @@ -799,3 +835,14 @@ ret %vc } +define @vmul_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode-rv64.ll @@ -666,6 +666,18 @@ ret %vc } +define @vmul_vi_nxv1i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv1i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + define @vmul_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv2i64: ; CHECK: # %bb.0: @@ -701,6 +713,18 @@ ret %vc } +define @vmul_vi_nxv2i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv2i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + define @vmul_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv4i64: ; CHECK: # %bb.0: @@ -736,6 +760,18 @@ ret %vc } +define @vmul_vi_nxv4i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv4i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +} + define @vmul_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv8i64: ; CHECK: # %bb.0: @@ -771,3 +807,14 @@ ret %vc } +define @vmul_vi_nxv8i64_1( %va) { +; CHECK-LABEL: vmul_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 2, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = mul %va, %splat + ret %vc +}