diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -429,6 +429,21 @@ defm : VPatBinarySDNode_VV_VX_VI; defm : VPatBinarySDNode_VV_VX_VI; +foreach vti = AllIntegerVectors in { + // Emit shift by 1 as an add since it might be faster. + def : Pat<(shl (vti.Vector vti.RegClass:$rs1), + (vti.Vector (splat_vector (XLenVT 1)))), + (!cast("PseudoVADD_VV_"# vti.LMul.MX) + vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; +} +foreach vti = [VI64M1, VI64M2, VI64M4, VI64M8] in { + def : Pat<(shl (vti.Vector vti.RegClass:$rs1), + (vti.Vector (rv32_splat_i64 (XLenVT 1)))), + (!cast("PseudoVADD_VV_"# vti.LMul.MX) + vti.RegClass:$rs1, vti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>; + +} + // 12.8. Vector Integer Comparison Instructions defm : VPatIntegerSetCCSDNode_VV_VX_VI; defm : VPatIntegerSetCCSDNode_VV_VX_VI; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -683,7 +683,15 @@ defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX_VI; - +foreach vti = AllIntegerVectors in { + // Emit shift by 1 as an add since it might be faster. + def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), + (riscv_vmv_v_x_vl 1, (XLenVT srcvalue)), + (vti.Mask true_mask), + VLOpFrag), + (!cast("PseudoVADD_VV_"# vti.LMul.MX) + vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; +} // 12.7. Vector Narrowing Integer Right Shift Instructions foreach vtiTowti = AllWidenableIntVectors in { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -33,7 +33,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 5 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV32-NEXT: vand.vx v26, v25, a1 -; LMULMAX2-RV32-NEXT: vsll.vi v26, v26, 1 +; LMULMAX2-RV32-NEXT: vadd.vv v26, v26, v26 ; LMULMAX2-RV32-NEXT: lui a1, 11 ; LMULMAX2-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX2-RV32-NEXT: vand.vx v25, v25, a1 @@ -70,7 +70,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 5 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 ; LMULMAX2-RV64-NEXT: vand.vx v26, v25, a1 -; LMULMAX2-RV64-NEXT: vsll.vi v26, v26, 1 +; LMULMAX2-RV64-NEXT: vadd.vv v26, v26, v26 ; LMULMAX2-RV64-NEXT: lui a1, 11 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -1366 ; LMULMAX2-RV64-NEXT: vand.vx v25, v25, a1 @@ -107,7 +107,7 @@ ; LMULMAX1-RV32-NEXT: lui a1, 5 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX1-RV32-NEXT: vand.vx v26, v25, a1 -; LMULMAX1-RV32-NEXT: vsll.vi v26, v26, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v26, v26, v26 ; LMULMAX1-RV32-NEXT: lui a1, 11 ; LMULMAX1-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX1-RV32-NEXT: vand.vx v25, v25, a1 @@ -144,7 +144,7 @@ ; LMULMAX1-RV64-NEXT: lui a1, 5 ; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365 ; LMULMAX1-RV64-NEXT: vand.vx v26, v25, a1 -; LMULMAX1-RV64-NEXT: vsll.vi v26, v26, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v26 ; LMULMAX1-RV64-NEXT: lui a1, 11 ; LMULMAX1-RV64-NEXT: addiw a1, a1, -1366 ; LMULMAX1-RV64-NEXT: vand.vx v25, v25, a1 @@ -198,7 +198,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV32-NEXT: vand.vx v26, v25, a1 -; LMULMAX2-RV32-NEXT: vsll.vi v26, v26, 1 +; LMULMAX2-RV32-NEXT: vadd.vv v26, v26, v26 ; LMULMAX2-RV32-NEXT: lui a1, 699051 ; LMULMAX2-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX2-RV32-NEXT: vand.vx v25, v25, a1 @@ -248,7 +248,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 349525 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 ; LMULMAX2-RV64-NEXT: vand.vx v26, v25, a1 -; LMULMAX2-RV64-NEXT: vsll.vi v26, v26, 1 +; LMULMAX2-RV64-NEXT: vadd.vv v26, v26, v26 ; LMULMAX2-RV64-NEXT: lui a1, 171 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -1365 ; LMULMAX2-RV64-NEXT: slli a1, a1, 12 @@ -296,7 +296,7 @@ ; LMULMAX1-RV32-NEXT: lui a1, 349525 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX1-RV32-NEXT: vand.vx v26, v25, a1 -; LMULMAX1-RV32-NEXT: vsll.vi v26, v26, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v26, v26, v26 ; LMULMAX1-RV32-NEXT: lui a1, 699051 ; LMULMAX1-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX1-RV32-NEXT: vand.vx v25, v25, a1 @@ -346,7 +346,7 @@ ; LMULMAX1-RV64-NEXT: lui a1, 349525 ; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365 ; LMULMAX1-RV64-NEXT: vand.vx v26, v25, a1 -; LMULMAX1-RV64-NEXT: vsll.vi v26, v26, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v26 ; LMULMAX1-RV64-NEXT: lui a1, 171 ; LMULMAX1-RV64-NEXT: addiw a1, a1, -1365 ; LMULMAX1-RV64-NEXT: slli a1, a1, 12 @@ -452,7 +452,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v26, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX2-RV32-NEXT: vand.vv v26, v25, v26 -; LMULMAX2-RV32-NEXT: vsll.vi v26, v26, 1 +; LMULMAX2-RV32-NEXT: vadd.vv v26, v26, v26 ; LMULMAX2-RV32-NEXT: lui a1, 699051 ; LMULMAX2-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -550,7 +550,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 12 ; LMULMAX2-RV64-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV64-NEXT: vand.vx v26, v25, a1 -; LMULMAX2-RV64-NEXT: vsll.vi v26, v26, 1 +; LMULMAX2-RV64-NEXT: vadd.vv v26, v26, v26 ; LMULMAX2-RV64-NEXT: lui a1, 1026731 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -1365 ; LMULMAX2-RV64-NEXT: slli a1, a1, 12 @@ -652,7 +652,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v26, a1 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vand.vv v26, v25, v26 -; LMULMAX1-RV32-NEXT: vsll.vi v26, v26, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v26, v26, v26 ; LMULMAX1-RV32-NEXT: lui a1, 699051 ; LMULMAX1-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -750,7 +750,7 @@ ; LMULMAX1-RV64-NEXT: slli a1, a1, 12 ; LMULMAX1-RV64-NEXT: addi a1, a1, 1365 ; LMULMAX1-RV64-NEXT: vand.vx v26, v25, a1 -; LMULMAX1-RV64-NEXT: vsll.vi v26, v26, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v26 ; LMULMAX1-RV64-NEXT: lui a1, 1026731 ; LMULMAX1-RV64-NEXT: addiw a1, a1, -1365 ; LMULMAX1-RV64-NEXT: slli a1, a1, 12 @@ -801,7 +801,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 5 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV32-NEXT: vand.vx v28, v26, a1 -; LMULMAX2-RV32-NEXT: vsll.vi v28, v28, 1 +; LMULMAX2-RV32-NEXT: vadd.vv v28, v28, v28 ; LMULMAX2-RV32-NEXT: lui a1, 11 ; LMULMAX2-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX2-RV32-NEXT: vand.vx v26, v26, a1 @@ -838,7 +838,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 5 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 ; LMULMAX2-RV64-NEXT: vand.vx v28, v26, a1 -; LMULMAX2-RV64-NEXT: vsll.vi v28, v28, 1 +; LMULMAX2-RV64-NEXT: vadd.vv v28, v28, v28 ; LMULMAX2-RV64-NEXT: lui a1, 11 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -1366 ; LMULMAX2-RV64-NEXT: vand.vx v26, v26, a1 @@ -877,7 +877,7 @@ ; LMULMAX1-RV32-NEXT: lui a1, 5 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX1-RV32-NEXT: vand.vx v27, v25, a1 -; LMULMAX1-RV32-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV32-NEXT: lui a2, 11 ; LMULMAX1-RV32-NEXT: addi a2, a2, -1366 ; LMULMAX1-RV32-NEXT: vand.vx v25, v25, a2 @@ -897,7 +897,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v26, v26, 2 ; LMULMAX1-RV32-NEXT: vor.vv v26, v26, v27 ; LMULMAX1-RV32-NEXT: vand.vx v27, v26, a1 -; LMULMAX1-RV32-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV32-NEXT: vand.vx v26, v26, a2 ; LMULMAX1-RV32-NEXT: vsrl.vi v26, v26, 1 ; LMULMAX1-RV32-NEXT: vor.vv v26, v26, v27 @@ -935,7 +935,7 @@ ; LMULMAX1-RV64-NEXT: lui a1, 5 ; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365 ; LMULMAX1-RV64-NEXT: vand.vx v27, v25, a1 -; LMULMAX1-RV64-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV64-NEXT: lui a2, 11 ; LMULMAX1-RV64-NEXT: addiw a2, a2, -1366 ; LMULMAX1-RV64-NEXT: vand.vx v25, v25, a2 @@ -955,7 +955,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vi v26, v26, 2 ; LMULMAX1-RV64-NEXT: vor.vv v26, v26, v27 ; LMULMAX1-RV64-NEXT: vand.vx v27, v26, a1 -; LMULMAX1-RV64-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV64-NEXT: vand.vx v26, v26, a2 ; LMULMAX1-RV64-NEXT: vsrl.vi v26, v26, 1 ; LMULMAX1-RV64-NEXT: vor.vv v26, v26, v27 @@ -1008,7 +1008,7 @@ ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV32-NEXT: vand.vx v28, v26, a1 -; LMULMAX2-RV32-NEXT: vsll.vi v28, v28, 1 +; LMULMAX2-RV32-NEXT: vadd.vv v28, v28, v28 ; LMULMAX2-RV32-NEXT: lui a1, 699051 ; LMULMAX2-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX2-RV32-NEXT: vand.vx v26, v26, a1 @@ -1058,7 +1058,7 @@ ; LMULMAX2-RV64-NEXT: lui a1, 349525 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 1365 ; LMULMAX2-RV64-NEXT: vand.vx v28, v26, a1 -; LMULMAX2-RV64-NEXT: vsll.vi v28, v28, 1 +; LMULMAX2-RV64-NEXT: vadd.vv v28, v28, v28 ; LMULMAX2-RV64-NEXT: lui a1, 171 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -1365 ; LMULMAX2-RV64-NEXT: slli a1, a1, 12 @@ -1108,7 +1108,7 @@ ; LMULMAX1-RV32-NEXT: lui a2, 349525 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1365 ; LMULMAX1-RV32-NEXT: vand.vx v27, v25, a2 -; LMULMAX1-RV32-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV32-NEXT: lui a4, 699051 ; LMULMAX1-RV32-NEXT: addi a4, a4, -1366 ; LMULMAX1-RV32-NEXT: vand.vx v25, v25, a4 @@ -1134,7 +1134,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v26, v26, 2 ; LMULMAX1-RV32-NEXT: vor.vv v26, v26, v27 ; LMULMAX1-RV32-NEXT: vand.vx v27, v26, a2 -; LMULMAX1-RV32-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV32-NEXT: vand.vx v26, v26, a4 ; LMULMAX1-RV32-NEXT: vsrl.vi v26, v26, 1 ; LMULMAX1-RV32-NEXT: vor.vv v26, v26, v27 @@ -1185,7 +1185,7 @@ ; LMULMAX1-RV64-NEXT: lui a2, 349525 ; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365 ; LMULMAX1-RV64-NEXT: vand.vx v27, v25, a2 -; LMULMAX1-RV64-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV64-NEXT: lui a4, 171 ; LMULMAX1-RV64-NEXT: addiw a4, a4, -1365 ; LMULMAX1-RV64-NEXT: slli a4, a4, 12 @@ -1213,7 +1213,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vi v26, v26, 2 ; LMULMAX1-RV64-NEXT: vor.vv v26, v26, v27 ; LMULMAX1-RV64-NEXT: vand.vx v27, v26, a2 -; LMULMAX1-RV64-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV64-NEXT: vand.vx v26, v26, a4 ; LMULMAX1-RV64-NEXT: vsrl.vi v26, v26, 1 ; LMULMAX1-RV64-NEXT: vor.vv v26, v26, v27 @@ -1316,7 +1316,7 @@ ; LMULMAX2-RV32-NEXT: vmv.v.x v28, a1 ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; LMULMAX2-RV32-NEXT: vand.vv v28, v26, v28 -; LMULMAX2-RV32-NEXT: vsll.vi v28, v28, 1 +; LMULMAX2-RV32-NEXT: vadd.vv v28, v28, v28 ; LMULMAX2-RV32-NEXT: lui a1, 699051 ; LMULMAX2-RV32-NEXT: addi a1, a1, -1366 ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu @@ -1414,7 +1414,7 @@ ; LMULMAX2-RV64-NEXT: slli a1, a1, 12 ; LMULMAX2-RV64-NEXT: addi a1, a1, 1365 ; LMULMAX2-RV64-NEXT: vand.vx v28, v26, a1 -; LMULMAX2-RV64-NEXT: vsll.vi v28, v28, 1 +; LMULMAX2-RV64-NEXT: vadd.vv v28, v28, v28 ; LMULMAX2-RV64-NEXT: lui a1, 1026731 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -1365 ; LMULMAX2-RV64-NEXT: slli a1, a1, 12 @@ -1518,7 +1518,7 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; LMULMAX1-RV32-NEXT: vand.vv v12, v31, v10 -; LMULMAX1-RV32-NEXT: vsll.vi v12, v12, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v12, v12, v12 ; LMULMAX1-RV32-NEXT: lui a5, 699051 ; LMULMAX1-RV32-NEXT: addi a5, a5, -1366 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu @@ -1559,7 +1559,7 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v25, v25, 2 ; LMULMAX1-RV32-NEXT: vor.vv v25, v25, v26 ; LMULMAX1-RV32-NEXT: vand.vv v26, v25, v10 -; LMULMAX1-RV32-NEXT: vsll.vi v26, v26, 1 +; LMULMAX1-RV32-NEXT: vadd.vv v26, v26, v26 ; LMULMAX1-RV32-NEXT: vand.vv v25, v25, v13 ; LMULMAX1-RV32-NEXT: vsrl.vi v25, v25, 1 ; LMULMAX1-RV32-NEXT: vor.vv v25, v25, v26 @@ -1659,7 +1659,7 @@ ; LMULMAX1-RV64-NEXT: slli a1, a1, 12 ; LMULMAX1-RV64-NEXT: addi a1, a1, 1365 ; LMULMAX1-RV64-NEXT: vand.vx v27, v26, a1 -; LMULMAX1-RV64-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV64-NEXT: lui s0, 1026731 ; LMULMAX1-RV64-NEXT: addiw s0, s0, -1365 ; LMULMAX1-RV64-NEXT: slli s0, s0, 12 @@ -1703,7 +1703,7 @@ ; LMULMAX1-RV64-NEXT: vsrl.vi v25, v25, 2 ; LMULMAX1-RV64-NEXT: vor.vv v25, v25, v27 ; LMULMAX1-RV64-NEXT: vand.vx v27, v25, a1 -; LMULMAX1-RV64-NEXT: vsll.vi v27, v27, 1 +; LMULMAX1-RV64-NEXT: vadd.vv v27, v27, v27 ; LMULMAX1-RV64-NEXT: vand.vx v25, v25, s0 ; LMULMAX1-RV64-NEXT: vsrl.vi v25, v25, 1 ; LMULMAX1-RV64-NEXT: vor.vv v25, v25, v27 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -68,7 +68,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vid.v v25 -; CHECK-NEXT: vsll.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v25, v25, v25 ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: vse8.v v25, (a1) ; CHECK-NEXT: vse8.v v25, (a2) @@ -86,7 +86,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vid.v v25 -; CHECK-NEXT: vsll.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v25, v25, v25 ; CHECK-NEXT: vadd.vi v25, v25, 1 ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: vse8.v v25, (a1) @@ -127,7 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vid.v v25 -; CHECK-NEXT: vsll.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v25, v25, v25 ; CHECK-NEXT: vrsub.vi v25, v25, 0 ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: vse8.v v25, (a1) @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vid.v v25 -; CHECK-NEXT: vsll.vi v25, v25, 1 +; CHECK-NEXT: vadd.vv v25, v25, v25 ; CHECK-NEXT: vrsub.vi v25, v25, 3 ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret @@ -232,7 +232,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vid.v v25 -; RV64-NEXT: vsll.vi v8, v25, 1 +; RV64-NEXT: vadd.vv v8, v25, v25 ; RV64-NEXT: vadd.vi v9, v8, 4 ; RV64-NEXT: ret ret <4 x i64> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -470,7 +470,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -480,7 +480,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 @@ -495,7 +495,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -505,7 +505,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 @@ -521,7 +521,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -531,7 +531,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 @@ -547,7 +547,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -557,7 +557,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 @@ -1385,7 +1385,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -1395,7 +1395,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 @@ -1410,7 +1410,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -1420,7 +1420,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 @@ -1436,7 +1436,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -1446,7 +1446,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 @@ -1462,7 +1462,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v8 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV32-NEXT: vluxei32.v v9, (a0), v26, v0.t ; RV32-NEXT: vmv1r.v v8, v9 @@ -1472,7 +1472,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v8 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, tu, mu ; RV64-NEXT: vluxei64.v v9, (a0), v28, v0.t ; RV64-NEXT: vmv1r.v v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -345,7 +345,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -354,7 +354,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret @@ -368,7 +368,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -377,7 +377,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret @@ -392,7 +392,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -401,7 +401,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret @@ -416,7 +416,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -425,7 +425,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret @@ -1137,7 +1137,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -1146,7 +1146,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret @@ -1160,7 +1160,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf4 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -1169,7 +1169,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf8 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret @@ -1184,7 +1184,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vzext.vf4 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -1193,7 +1193,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vzext.vf8 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret @@ -1208,7 +1208,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsext.vf2 v26, v9 -; RV32-NEXT: vsll.vi v26, v26, 1 +; RV32-NEXT: vadd.vv v26, v26, v26 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v26, v0.t ; RV32-NEXT: ret @@ -1217,7 +1217,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsext.vf4 v28, v9 -; RV64-NEXT: vsll.vi v28, v28, 1 +; RV64-NEXT: vadd.vv v28, v28, v28 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v28, v0.t ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -474,7 +474,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -484,7 +484,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 @@ -499,7 +499,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -509,7 +509,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 @@ -525,7 +525,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -535,7 +535,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 @@ -551,7 +551,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -561,7 +561,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 @@ -1457,7 +1457,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -1467,7 +1467,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 @@ -1482,7 +1482,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -1492,7 +1492,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 @@ -1508,7 +1508,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -1518,7 +1518,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 @@ -1534,7 +1534,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v8 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV32-NEXT: vluxei32.v v10, (a0), v28, v0.t ; RV32-NEXT: vmv2r.v v8, v10 @@ -1544,7 +1544,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v8 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, tu, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: vmv2r.v v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -345,7 +345,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -354,7 +354,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -368,7 +368,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -377,7 +377,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -392,7 +392,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -401,7 +401,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -416,7 +416,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -425,7 +425,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -1137,7 +1137,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -1146,7 +1146,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -1160,7 +1160,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf4 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -1169,7 +1169,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf8 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -1184,7 +1184,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vzext.vf4 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -1193,7 +1193,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vzext.vf8 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -1208,7 +1208,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsext.vf2 v28, v10 -; RV32-NEXT: vsll.vi v28, v28, 1 +; RV32-NEXT: vadd.vv v28, v28, v28 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t ; RV32-NEXT: ret @@ -1217,7 +1217,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsext.vf4 v16, v10 -; RV64-NEXT: vsll.vi v16, v16, 1 +; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv32.ll @@ -470,6 +470,18 @@ ret %vc } +define @vshl_vx_nxv1i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + define @vshl_vx_nxv2i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv2i64: ; CHECK: # %bb.0: @@ -507,6 +519,18 @@ ret %vc } +define @vshl_vx_nxv2i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + define @vshl_vx_nxv4i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv4i64: ; CHECK: # %bb.0: @@ -544,6 +568,18 @@ ret %vc } +define @vshl_vx_nxv4i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + define @vshl_vx_nxv8i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv8i64: ; CHECK: # %bb.0: @@ -581,3 +617,14 @@ ret %vc } +define @vshl_vx_nxv8i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode-rv64.ll @@ -470,6 +470,18 @@ ret %vc } +define @vshl_vx_nxv1i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv1i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + define @vshl_vx_nxv2i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv2i64: ; CHECK: # %bb.0: @@ -507,6 +519,18 @@ ret %vc } +define @vshl_vx_nxv2i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + define @vshl_vx_nxv4i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv4i64: ; CHECK: # %bb.0: @@ -544,6 +568,18 @@ ret %vc } +define @vshl_vx_nxv4i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv4i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} + define @vshl_vx_nxv8i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv8i64: ; CHECK: # %bb.0: @@ -581,3 +617,14 @@ ret %vc } +define @vshl_vx_nxv8i64_2( %va) { +; CHECK-LABEL: vshl_vx_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = shl %va, %splat + ret %vc +} diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -575,7 +575,7 @@ ; RV32MV-NEXT: vid.v v27 ; RV32MV-NEXT: vsub.vv v25, v25, v27 ; RV32MV-NEXT: vmul.vv v25, v25, v26 -; RV32MV-NEXT: vsll.vi v26, v25, 1 +; RV32MV-NEXT: vadd.vv v26, v25, v25 ; RV32MV-NEXT: addi a1, zero, 9 ; RV32MV-NEXT: vmv.v.i v27, 10 ; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu @@ -640,7 +640,7 @@ ; RV64MV-NEXT: vid.v v27 ; RV64MV-NEXT: vsub.vv v25, v25, v27 ; RV64MV-NEXT: vmul.vv v25, v25, v26 -; RV64MV-NEXT: vsll.vi v26, v25, 1 +; RV64MV-NEXT: vadd.vv v26, v25, v25 ; RV64MV-NEXT: addi a1, zero, 9 ; RV64MV-NEXT: vmv.v.i v27, 10 ; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu