diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12449,6 +12449,26 @@ break; } + case RISCVISD::VMV_S_X_VL: { + // Combine vmv.s.x of a constant into a vmv.v.i if the constant + // fits into the OPIVI format. For now, only handle tail + // undefined vmv.s.x nodes (i.e. passthru is undef) + if (isa(N->getOperand(1)) && N->getOperand(0).isUndef()) { + uint64_t C = N->getConstantOperandVal(1); + if (C != 0 && isInt<5>(C)) { + SDValue VL = N->getOperand(2); + // If VL is a constant > 0, then change it to 1 to increase + // the chances of a toggle being removed by the insert vsetvli + // pass. + if (isa(N->getOperand(2)) && + N->getConstantOperandVal(2) > 0) + VL = DAG.getConstant(1, SDLoc(N), N->getOperand(2).getValueType()); + return DAG.getNode(RISCVISD::VMV_V_X_VL, SDLoc(N), N->getValueType(0), + N->getOperand(0), N->getOperand(1), VL); + } + } + break; + } case RISCVISD::VFMV_S_F_VL: { SDValue Src = N->getOperand(1); // Try to remove vector->scalar->vector if the scalar->vector is inserting diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -328,6 +328,35 @@ } } + // A vmv.v.i with VL=1 can be treated in the same way as vmv.s.x, + // i.e. we don't care about LMUL/VL and can expand SEW, provided + // that it's tail undefined (it has an undef passthru, or it doesn't + // have a passthru operand at all). + if (getRVVMCOpcode(MI.getOpcode()) == RISCV::VMV_V_I) { + bool IsTailUndef; + if (!RISCVII::hasMergeOp(TSFlags)) { + IsTailUndef = true; + } else { + MachineInstr *VRegDef = MRI->getVRegDef(MI.getOperand(RISCVII::getMergeOpNum(MI.getDesc())).getReg()); + IsTailUndef = VRegDef && VRegDef->isImplicitDef(); + } + + // Even if the VL operand we're reading is potentially stale and + // might not match the actual current value of VL, if VL=1 was + // specified then we know that the vmv.v.i only cares about the + // first element. Therefore we can use any non-zero VL, + // irrespective of what the current VL value actually is. + const MachineOperand &VLOp = MI.getOperand(RISCVII::getVLOpNum(MI.getDesc())); + + if (IsTailUndef && VLOp.isImm() && VLOp.getImm() == 1) { + Res.LMUL = false; + Res.SEWLMULRatio = false; + Res.VLAny = false; + Res.SEW = DemandedFields::SEWGreaterThanOrEqual; + Res.TailPolicy = false; + } + } + return Res; } diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll @@ -59,13 +59,9 @@ define @combine_vec_shl_shl( %x) { ; CHECK-LABEL: combine_vec_shl_shl: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsll.vv v8, v8, v10 -; CHECK-NEXT: vsll.vv v8, v8, v12 +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vsll.vi v8, v8, 2 +; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: ret %ins1 = insertelement poison, i32 2, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-transpose.ll @@ -110,8 +110,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrgather.vv v10, v8, v11 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vadd.vi v8, v11, -1 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -126,8 +125,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vadd.vi v12, v11, 1 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -181,9 +179,8 @@ define <2 x i32> @trn2.v2i32(<2 x i32> %v0, <2 x i32> %v1) { ; CHECK-LABEL: trn2.v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -198,8 +195,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrgather.vv v10, v8, v11 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vadd.vi v8, v11, -1 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -214,8 +210,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vadd.vi v12, v11, 1 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -237,9 +232,8 @@ define <2 x i64> @trn2.v2i64(<2 x i64> %v0, <2 x i64> %v1) { ; CHECK-LABEL: trn2.v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -261,9 +255,8 @@ define <2 x float> @trn2.v2f32(<2 x float> %v0, <2 x float> %v1) { ; CHECK-LABEL: trn2.v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -278,8 +271,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrgather.vv v10, v8, v11 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vadd.vi v8, v11, -1 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -294,8 +286,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vadd.vi v12, v11, 1 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -317,9 +308,8 @@ define <2 x double> @trn2.v2f64(<2 x double> %v0, <2 x double> %v1) { ; CHECK-LABEL: trn2.v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vrgather.vi v10, v8, 1 ; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -334,8 +324,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrgather.vv v10, v8, v11 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vadd.vi v8, v11, -1 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -350,8 +339,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vadd.vi v12, v11, 1 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 10 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll @@ -258,6 +258,33 @@ ret <4 x double> %v1 } +define <16 x i64> @vslide1up_16xf64(<16 x i64> %v, i64 %b) { +; RV32-LABEL: vslide1up_16xf64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vslideup.vi v16, v8, 15 +; RV32-NEXT: vmv.v.v v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vslide1up_16xf64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vmv.v.x v16, a0 +; RV64-NEXT: vslideup.vi v16, v8, 15 +; RV64-NEXT: vmv.v.v v8, v16 +; RV64-NEXT: ret + %vb = insertelement <16 x i64> poison, i64 %b, i64 0 + %v1 = shufflevector <16 x i64> %v, <16 x i64> %vb, <16 x i32> + ret <16 x i64> %v1 +} + define <4 x i8> @vslide1up_4xi8_with_splat(<4 x i8> %v, i8 %b) { ; CHECK-LABEL: vslide1up_4xi8_with_splat: ; CHECK: # %bb.0: @@ -373,8 +400,8 @@ define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert(<4 x i8> %v, i8 %b) { ; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI23_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI23_0) +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI24_0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -399,8 +426,8 @@ define <4 x i8> @vslide1up_4xi8_neg_incorrect_insert3(<4 x i8> %v, i8 %b) { ; CHECK-LABEL: vslide1up_4xi8_neg_incorrect_insert3: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI25_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI25_0) +; CHECK-NEXT: lui a0, %hi(.LCPI26_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI26_0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vrgather.vv v9, v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll @@ -1399,9 +1399,8 @@ ; RV32-NEXT: lui a4, 4080 ; RV32-NEXT: vand.vx v11, v11, a4, v0.t ; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t -; RV32-NEXT: li a5, 5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.s.x v0, a5 +; RV32-NEXT: vmv.v.i v0, 5 ; RV32-NEXT: vmv.v.i v13, 0 ; RV32-NEXT: lui a5, 1044480 ; RV32-NEXT: vmerge.vxm v13, v13, a5, v0 @@ -1515,38 +1514,37 @@ ; RV32: # %bb.0: ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v9, v8, a1 -; RV32-NEXT: li a2, 40 -; RV32-NEXT: vsrl.vx v10, v8, a2 -; RV32-NEXT: lui a3, 16 -; RV32-NEXT: addi a3, a3, -256 -; RV32-NEXT: vand.vx v10, v10, a3 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsrl.vi v10, v8, 8 -; RV32-NEXT: li a4, 5 +; RV32-NEXT: vsll.vx v9, v8, a1 +; RV32-NEXT: lui a2, 16 +; RV32-NEXT: addi a2, a2, -256 +; RV32-NEXT: vand.vx v10, v8, a2 +; RV32-NEXT: li a3, 40 +; RV32-NEXT: vsll.vx v10, v10, a3 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v10, v8, a4 +; RV32-NEXT: vsll.vi v10, v10, 24 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.s.x v0, a4 +; RV32-NEXT: vmv.v.i v0, 5 ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v11, v11, a4, v0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vand.vv v10, v10, v11 -; RV32-NEXT: vsrl.vi v12, v8, 24 -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v12, v12, a4 -; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsll.vx v10, v8, a1 -; RV32-NEXT: vand.vx v12, v8, a3 -; RV32-NEXT: vsll.vx v12, v12, a2 +; RV32-NEXT: vand.vv v12, v8, v11 +; RV32-NEXT: vsll.vi v12, v12, 8 ; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vand.vx v12, v8, a4 -; RV32-NEXT: vsll.vi v12, v12, 24 -; RV32-NEXT: vand.vv v8, v8, v11 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vor.vv v8, v10, v8 -; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: vsrl.vx v10, v8, a1 +; RV32-NEXT: vsrl.vx v12, v8, a3 +; RV32-NEXT: vand.vx v12, v12, a2 +; RV32-NEXT: vor.vv v10, v12, v10 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vand.vv v11, v12, v11 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: vand.vx v8, v8, a4 +; RV32-NEXT: vor.vv v8, v11, v8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -173,30 +173,29 @@ ; RV32-NEXT: addi a3, a3, -256 ; RV32-NEXT: vand.vx v10, v10, a3 ; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsrl.vi v10, v8, 24 -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v10, v10, a4 -; RV32-NEXT: li a5, 5 -; RV32-NEXT: vmv.s.x v0, a5 +; RV32-NEXT: vmv.v.i v0, 5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: lui a5, 1044480 -; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 +; RV32-NEXT: vmv.v.i v10, 0 +; RV32-NEXT: lui a4, 1044480 +; RV32-NEXT: vmerge.vxm v10, v10, a4, v0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vsrl.vi v12, v8, 8 -; RV32-NEXT: vand.vv v12, v12, v11 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsll.vx v10, v8, a1 +; RV32-NEXT: vsrl.vi v11, v8, 8 +; RV32-NEXT: vand.vv v11, v11, v10 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v12, v12, a4 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vor.vv v9, v11, v9 +; RV32-NEXT: vsll.vx v11, v8, a1 ; RV32-NEXT: vand.vx v12, v8, a3 ; RV32-NEXT: vsll.vx v12, v12, a2 -; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vand.vx v12, v8, a4 -; RV32-NEXT: vsll.vi v12, v12, 24 -; RV32-NEXT: vand.vv v8, v8, v11 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vor.vv v8, v10, v8 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vand.vv v10, v8, v10 +; RV32-NEXT: vsll.vi v10, v10, 8 +; RV32-NEXT: vand.vx v8, v8, a4 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v11, v8 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: lui a1, 61681 @@ -827,29 +826,28 @@ ; LMULMAX1-RV32-NEXT: addi a4, a4, -256 ; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4 ; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v9 -; LMULMAX1-RV32-NEXT: vsrl.vi v9, v10, 24 -; LMULMAX1-RV32-NEXT: lui a5, 4080 -; LMULMAX1-RV32-NEXT: vand.vx v12, v9, a5 -; LMULMAX1-RV32-NEXT: li a6, 5 -; LMULMAX1-RV32-NEXT: vmv.s.x v0, a6 +; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v9, 0 -; LMULMAX1-RV32-NEXT: lui a6, 1044480 -; LMULMAX1-RV32-NEXT: vmerge.vxm v9, v9, a6, v0 +; LMULMAX1-RV32-NEXT: lui a5, 1044480 +; LMULMAX1-RV32-NEXT: vmerge.vxm v9, v9, a5, v0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vsrl.vi v13, v10, 8 -; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v9 -; LMULMAX1-RV32-NEXT: vor.vv v12, v13, v12 +; LMULMAX1-RV32-NEXT: vsrl.vi v12, v10, 8 +; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v9 +; LMULMAX1-RV32-NEXT: vsrl.vi v13, v10, 24 +; LMULMAX1-RV32-NEXT: lui a5, 4080 +; LMULMAX1-RV32-NEXT: vand.vx v13, v13, a5 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 ; LMULMAX1-RV32-NEXT: vor.vv v11, v12, v11 ; LMULMAX1-RV32-NEXT: vsll.vx v12, v10, a2 ; LMULMAX1-RV32-NEXT: vand.vx v13, v10, a4 ; LMULMAX1-RV32-NEXT: vsll.vx v13, v13, a3 ; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 -; LMULMAX1-RV32-NEXT: vand.vx v13, v10, a5 -; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24 -; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v9 -; LMULMAX1-RV32-NEXT: vsll.vi v10, v10, 8 -; LMULMAX1-RV32-NEXT: vor.vv v10, v13, v10 +; LMULMAX1-RV32-NEXT: vand.vv v13, v10, v9 +; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 8 +; LMULMAX1-RV32-NEXT: vand.vx v10, v10, a5 +; LMULMAX1-RV32-NEXT: vsll.vi v10, v10, 24 +; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v13 ; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 ; LMULMAX1-RV32-NEXT: vor.vv v10, v10, v11 ; LMULMAX1-RV32-NEXT: vsrl.vi v11, v10, 4 @@ -886,21 +884,21 @@ ; LMULMAX1-RV32-NEXT: vsrl.vx v15, v8, a3 ; LMULMAX1-RV32-NEXT: vand.vx v15, v15, a4 ; LMULMAX1-RV32-NEXT: vor.vv v11, v15, v11 -; LMULMAX1-RV32-NEXT: vsrl.vi v15, v8, 24 -; LMULMAX1-RV32-NEXT: vand.vx v15, v15, a5 -; LMULMAX1-RV32-NEXT: vsrl.vi v16, v8, 8 -; LMULMAX1-RV32-NEXT: vand.vv v16, v16, v9 -; LMULMAX1-RV32-NEXT: vor.vv v15, v16, v15 +; LMULMAX1-RV32-NEXT: vsrl.vi v15, v8, 8 +; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v9 +; LMULMAX1-RV32-NEXT: vsrl.vi v16, v8, 24 +; LMULMAX1-RV32-NEXT: vand.vx v16, v16, a5 +; LMULMAX1-RV32-NEXT: vor.vv v15, v15, v16 ; LMULMAX1-RV32-NEXT: vor.vv v11, v15, v11 ; LMULMAX1-RV32-NEXT: vsll.vx v15, v8, a2 ; LMULMAX1-RV32-NEXT: vand.vx v16, v8, a4 ; LMULMAX1-RV32-NEXT: vsll.vx v16, v16, a3 ; LMULMAX1-RV32-NEXT: vor.vv v15, v15, v16 -; LMULMAX1-RV32-NEXT: vand.vx v16, v8, a5 -; LMULMAX1-RV32-NEXT: vsll.vi v16, v16, 24 -; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 8 -; LMULMAX1-RV32-NEXT: vor.vv v8, v16, v8 +; LMULMAX1-RV32-NEXT: vand.vv v9, v8, v9 +; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 8 +; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a5 +; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 24 +; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vor.vv v8, v15, v8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll @@ -439,9 +439,8 @@ ; RV32-NEXT: lui a4, 4080 ; RV32-NEXT: vand.vx v11, v11, a4, v0.t ; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t -; RV32-NEXT: li a5, 5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.s.x v0, a5 +; RV32-NEXT: vmv.v.i v0, 5 ; RV32-NEXT: vmv.v.i v13, 0 ; RV32-NEXT: lui a5, 1044480 ; RV32-NEXT: vmerge.vxm v13, v13, a5, v0 @@ -504,38 +503,37 @@ ; RV32: # %bb.0: ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vsrl.vx v9, v8, a1 -; RV32-NEXT: li a2, 40 -; RV32-NEXT: vsrl.vx v10, v8, a2 -; RV32-NEXT: lui a3, 16 -; RV32-NEXT: addi a3, a3, -256 -; RV32-NEXT: vand.vx v10, v10, a3 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsrl.vi v10, v8, 8 -; RV32-NEXT: li a4, 5 +; RV32-NEXT: vsll.vx v9, v8, a1 +; RV32-NEXT: lui a2, 16 +; RV32-NEXT: addi a2, a2, -256 +; RV32-NEXT: vand.vx v10, v8, a2 +; RV32-NEXT: li a3, 40 +; RV32-NEXT: vsll.vx v10, v10, a3 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v10, v8, a4 +; RV32-NEXT: vsll.vi v10, v10, 24 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.s.x v0, a4 +; RV32-NEXT: vmv.v.i v0, 5 ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: lui a4, 1044480 -; RV32-NEXT: vmerge.vxm v11, v11, a4, v0 +; RV32-NEXT: lui a5, 1044480 +; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; RV32-NEXT: vand.vv v10, v10, v11 -; RV32-NEXT: vsrl.vi v12, v8, 24 -; RV32-NEXT: lui a0, 4080 -; RV32-NEXT: vand.vx v12, v12, a0 -; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsll.vx v10, v8, a1 -; RV32-NEXT: vand.vx v12, v8, a3 -; RV32-NEXT: vsll.vx v12, v12, a2 +; RV32-NEXT: vand.vv v12, v8, v11 +; RV32-NEXT: vsll.vi v12, v12, 8 ; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vand.vx v12, v8, a0 -; RV32-NEXT: vsll.vi v12, v12, 24 -; RV32-NEXT: vand.vv v8, v8, v11 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vor.vv v8, v10, v8 -; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vor.vv v9, v9, v10 +; RV32-NEXT: vsrl.vx v10, v8, a1 +; RV32-NEXT: vsrl.vx v12, v8, a3 +; RV32-NEXT: vand.vx v12, v12, a2 +; RV32-NEXT: vor.vv v10, v12, v10 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vand.vv v11, v12, v11 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: vand.vx v8, v8, a4 +; RV32-NEXT: vor.vv v8, v11, v8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bswap_v2i64_unmasked: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -79,30 +79,29 @@ ; RV32-NEXT: addi a3, a3, -256 ; RV32-NEXT: vand.vx v10, v10, a3 ; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsrl.vi v10, v8, 24 -; RV32-NEXT: lui a4, 4080 -; RV32-NEXT: vand.vx v10, v10, a4 -; RV32-NEXT: li a5, 5 -; RV32-NEXT: vmv.s.x v0, a5 +; RV32-NEXT: vmv.v.i v0, 5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: lui a5, 1044480 -; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 +; RV32-NEXT: vmv.v.i v10, 0 +; RV32-NEXT: lui a4, 1044480 +; RV32-NEXT: vmerge.vxm v10, v10, a4, v0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV32-NEXT: vsrl.vi v12, v8, 8 -; RV32-NEXT: vand.vv v12, v12, v11 -; RV32-NEXT: vor.vv v10, v12, v10 -; RV32-NEXT: vor.vv v9, v10, v9 -; RV32-NEXT: vsll.vx v10, v8, a1 +; RV32-NEXT: vsrl.vi v11, v8, 8 +; RV32-NEXT: vand.vv v11, v11, v10 +; RV32-NEXT: vsrl.vi v12, v8, 24 +; RV32-NEXT: lui a4, 4080 +; RV32-NEXT: vand.vx v12, v12, a4 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vor.vv v9, v11, v9 +; RV32-NEXT: vsll.vx v11, v8, a1 ; RV32-NEXT: vand.vx v12, v8, a3 ; RV32-NEXT: vsll.vx v12, v12, a2 -; RV32-NEXT: vor.vv v10, v10, v12 -; RV32-NEXT: vand.vx v12, v8, a4 -; RV32-NEXT: vsll.vi v12, v12, 24 -; RV32-NEXT: vand.vv v8, v8, v11 -; RV32-NEXT: vsll.vi v8, v8, 8 -; RV32-NEXT: vor.vv v8, v12, v8 -; RV32-NEXT: vor.vv v8, v10, v8 +; RV32-NEXT: vor.vv v11, v11, v12 +; RV32-NEXT: vand.vv v10, v8, v10 +; RV32-NEXT: vsll.vi v10, v10, 8 +; RV32-NEXT: vand.vx v8, v8, a4 +; RV32-NEXT: vsll.vi v8, v8, 24 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vor.vv v8, v11, v8 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret @@ -403,51 +402,50 @@ ; LMULMAX1-RV32-NEXT: addi a4, a4, -256 ; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4 ; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV32-NEXT: vsrl.vi v11, v8, 24 -; LMULMAX1-RV32-NEXT: lui a5, 4080 -; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5 -; LMULMAX1-RV32-NEXT: li a6, 5 -; LMULMAX1-RV32-NEXT: vmv.s.x v0, a6 +; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vmv.v.i v12, 0 -; LMULMAX1-RV32-NEXT: lui a6, 1044480 -; LMULMAX1-RV32-NEXT: vmerge.vxm v12, v12, a6, v0 +; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0 +; LMULMAX1-RV32-NEXT: lui a5, 1044480 +; LMULMAX1-RV32-NEXT: vmerge.vxm v11, v11, a5, v0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; LMULMAX1-RV32-NEXT: vsrl.vi v13, v8, 8 -; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12 -; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11 -; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV32-NEXT: vsll.vx v11, v8, a2 +; LMULMAX1-RV32-NEXT: vsrl.vi v12, v8, 8 +; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v11 +; LMULMAX1-RV32-NEXT: vsrl.vi v13, v8, 24 +; LMULMAX1-RV32-NEXT: lui a5, 4080 +; LMULMAX1-RV32-NEXT: vand.vx v13, v13, a5 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 +; LMULMAX1-RV32-NEXT: vsll.vx v12, v8, a2 ; LMULMAX1-RV32-NEXT: vand.vx v13, v8, a4 ; LMULMAX1-RV32-NEXT: vsll.vx v13, v13, a3 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v13 -; LMULMAX1-RV32-NEXT: vand.vx v13, v8, a5 -; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24 -; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v12 -; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 8 -; LMULMAX1-RV32-NEXT: vor.vv v8, v13, v8 -; LMULMAX1-RV32-NEXT: vor.vv v8, v11, v8 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vand.vv v13, v8, v11 +; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 8 +; LMULMAX1-RV32-NEXT: vand.vx v8, v8, a5 +; LMULMAX1-RV32-NEXT: vsll.vi v8, v8, 24 +; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v13 +; LMULMAX1-RV32-NEXT: vor.vv v8, v12, v8 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vx v10, v9, a2 -; LMULMAX1-RV32-NEXT: vsrl.vx v11, v9, a3 -; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a4 -; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV32-NEXT: vsrl.vi v11, v9, 24 -; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5 -; LMULMAX1-RV32-NEXT: vsrl.vi v13, v9, 8 -; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12 -; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11 -; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 -; LMULMAX1-RV32-NEXT: vsll.vx v11, v9, a2 +; LMULMAX1-RV32-NEXT: vsrl.vx v12, v9, a3 +; LMULMAX1-RV32-NEXT: vand.vx v12, v12, a4 +; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 +; LMULMAX1-RV32-NEXT: vsrl.vi v12, v9, 8 +; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v11 +; LMULMAX1-RV32-NEXT: vsrl.vi v13, v9, 24 +; LMULMAX1-RV32-NEXT: vand.vx v13, v13, a5 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vor.vv v10, v12, v10 +; LMULMAX1-RV32-NEXT: vsll.vx v12, v9, a2 ; LMULMAX1-RV32-NEXT: vand.vx v13, v9, a4 ; LMULMAX1-RV32-NEXT: vsll.vx v13, v13, a3 -; LMULMAX1-RV32-NEXT: vor.vv v11, v11, v13 -; LMULMAX1-RV32-NEXT: vand.vx v13, v9, a5 -; LMULMAX1-RV32-NEXT: vsll.vi v13, v13, 24 -; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v12 -; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 8 -; LMULMAX1-RV32-NEXT: vor.vv v9, v13, v9 -; LMULMAX1-RV32-NEXT: vor.vv v9, v11, v9 +; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v13 +; LMULMAX1-RV32-NEXT: vand.vv v11, v9, v11 +; LMULMAX1-RV32-NEXT: vsll.vi v11, v11, 8 +; LMULMAX1-RV32-NEXT: vand.vx v9, v9, a5 +; LMULMAX1-RV32-NEXT: vsll.vi v9, v9, 24 +; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v11 +; LMULMAX1-RV32-NEXT: vor.vv v9, v12, v9 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vse64.v v9, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v8, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -35,9 +35,8 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x, <8 x float> %y) optsize { ; LMULMAX1-LABEL: hang_when_merging_stores_after_legalization: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: li a0, 2 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; LMULMAX1-NEXT: vmv.s.x v0, a0 +; LMULMAX1-NEXT: vmv.v.i v0, 2 ; LMULMAX1-NEXT: vrgather.vi v12, v8, 0 ; LMULMAX1-NEXT: vrgather.vi v12, v9, 3, v0.t ; LMULMAX1-NEXT: vsetivli zero, 3, e32, m1, tu, ma @@ -54,8 +53,7 @@ ; LMULMAX2-NEXT: li a0, 7 ; LMULMAX2-NEXT: vmul.vx v14, v12, a0 ; LMULMAX2-NEXT: vrgather.vv v12, v8, v14 -; LMULMAX2-NEXT: li a0, 12 -; LMULMAX2-NEXT: vmv.s.x v0, a0 +; LMULMAX2-NEXT: vmv.v.i v0, 12 ; LMULMAX2-NEXT: vadd.vi v8, v14, -14 ; LMULMAX2-NEXT: vrgather.vv v12, v10, v8, v0.t ; LMULMAX2-NEXT: vmv1r.v v8, v12 @@ -150,10 +148,9 @@ define void @buildvec_merge0_v4f32(<4 x float>* %x, float %f) { ; CHECK-LABEL: buildvec_merge0_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.s.x v0, a1 ; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: vmv.v.i v0, 6 ; CHECK-NEXT: lui a1, 262144 ; CHECK-NEXT: vmerge.vxm v8, v8, a1, v0 ; CHECK-NEXT: vse32.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll @@ -44,8 +44,7 @@ ; RV32-V128-NEXT: vsrl.vi v14, v9, 1 ; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14 -; RV32-V128-NEXT: li a0, 10 -; RV32-V128-NEXT: vmv.s.x v0, a0 +; RV32-V128-NEXT: vmv.v.i v0, 10 ; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t ; RV32-V128-NEXT: vmv.v.v v8, v10 ; RV32-V128-NEXT: ret @@ -57,8 +56,7 @@ ; RV64-V128-NEXT: vid.v v10 ; RV64-V128-NEXT: vsrl.vi v14, v10, 1 ; RV64-V128-NEXT: vrgather.vv v10, v8, v14 -; RV64-V128-NEXT: li a0, 10 -; RV64-V128-NEXT: vmv.s.x v0, a0 +; RV64-V128-NEXT: vmv.v.i v0, 10 ; RV64-V128-NEXT: vrgather.vv v10, v12, v14, v0.t ; RV64-V128-NEXT: vmv.v.v v8, v10 ; RV64-V128-NEXT: ret @@ -69,8 +67,7 @@ ; RV32-V512-NEXT: vid.v v10 ; RV32-V512-NEXT: vsrl.vi v11, v10, 1 ; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-V512-NEXT: li a0, 10 -; RV32-V512-NEXT: vmv.s.x v0, a0 +; RV32-V512-NEXT: vmv.v.i v0, 10 ; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11 ; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t ; RV32-V512-NEXT: vmv.v.v v8, v10 @@ -81,8 +78,7 @@ ; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu ; RV64-V512-NEXT: vid.v v10 ; RV64-V512-NEXT: vsrl.vi v11, v10, 1 -; RV64-V512-NEXT: li a0, 10 -; RV64-V512-NEXT: vmv.s.x v0, a0 +; RV64-V512-NEXT: vmv.v.i v0, 10 ; RV64-V512-NEXT: vrgather.vv v10, v8, v11 ; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t ; RV64-V512-NEXT: vmv.v.v v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -5,9 +5,8 @@ define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) { ; CHECK-LABEL: shuffle_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 11 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 11 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> @@ -27,49 +26,27 @@ } define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) { -; RV32-LABEL: shuffle_fv_v4f64: -; RV32: # %bb.0: -; RV32-NEXT: li a0, 9 -; RV32-NEXT: lui a1, %hi(.LCPI2_0) -; RV32-NEXT: fld fa5, %lo(.LCPI2_0)(a1) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0 -; RV32-NEXT: ret -; -; RV64-LABEL: shuffle_fv_v4f64: -; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI2_0) -; RV64-NEXT: fld fa5, %lo(.LCPI2_0)(a0) -; RV64-NEXT: li a0, 9 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vfmerge.vfm v8, v8, fa5, v0 -; RV64-NEXT: ret +; CHECK-LABEL: shuffle_fv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmv.v.i v0, 9 +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret %s = shufflevector <4 x double> , <4 x double> %x, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) { -; RV32-LABEL: shuffle_vf_v4f64: -; RV32: # %bb.0: -; RV32-NEXT: li a0, 6 -; RV32-NEXT: lui a1, %hi(.LCPI3_0) -; RV32-NEXT: fld fa5, %lo(.LCPI3_0)(a1) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0 -; RV32-NEXT: ret -; -; RV64-LABEL: shuffle_vf_v4f64: -; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI3_0) -; RV64-NEXT: fld fa5, %lo(.LCPI3_0)(a0) -; RV64-NEXT: li a0, 6 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vfmerge.vfm v8, v8, fa5, v0 -; RV64-NEXT: ret +; CHECK-LABEL: shuffle_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI3_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vmv.v.i v0, 6 +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> ret <4 x double> %s } @@ -129,8 +106,7 @@ ; RV32-NEXT: addi a0, a0, %lo(.LCPI6_0) ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle16.v v14, (a0) -; RV32-NEXT: li a0, 8 -; RV32-NEXT: vmv.s.x v0, a0 +; RV32-NEXT: vmv.v.i v0, 8 ; RV32-NEXT: vrgatherei16.vv v12, v8, v14 ; RV32-NEXT: vrgather.vi v12, v10, 1, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -142,8 +118,7 @@ ; RV64-NEXT: addi a0, a0, %lo(.LCPI6_0) ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v14, (a0) -; RV64-NEXT: li a0, 8 -; RV64-NEXT: vmv.s.x v0, a0 +; RV64-NEXT: vmv.v.i v0, 8 ; RV64-NEXT: vrgather.vv v12, v8, v14 ; RV64-NEXT: vrgather.vi v12, v10, 1, v0.t ; RV64-NEXT: vmv.v.v v8, v12 @@ -155,14 +130,13 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) { ; RV32-LABEL: vrgather_shuffle_xv_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: li a0, 12 ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; RV32-NEXT: vmv.s.x v0, a0 +; RV32-NEXT: vid.v v12 ; RV32-NEXT: lui a0, %hi(.LCPI7_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI7_0) ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vid.v v12 ; RV32-NEXT: vrsub.vi v12, v12, 4 +; RV32-NEXT: vmv.v.i v0, 12 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t ; RV32-NEXT: vmv.v.v v8, v10 @@ -174,9 +148,8 @@ ; RV64-NEXT: lui a0, %hi(.LCPI7_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI7_0) ; RV64-NEXT: vlse64.v v10, (a0), zero -; RV64-NEXT: li a0, 12 -; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vid.v v12 +; RV64-NEXT: vmv.v.i v0, 12 ; RV64-NEXT: vrsub.vi v12, v12, 4 ; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t ; RV64-NEXT: vmv.v.v v8, v10 @@ -195,7 +168,7 @@ ; RV32-NEXT: addi a1, a1, %lo(.LCPI8_0) ; RV32-NEXT: vlse64.v v10, (a1), zero ; RV32-NEXT: vmul.vx v12, v12, a0 -; RV32-NEXT: vmv.s.x v0, a0 +; RV32-NEXT: vmv.v.i v0, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t ; RV32-NEXT: vmv.v.v v8, v10 @@ -209,7 +182,7 @@ ; RV64-NEXT: addi a0, a0, %lo(.LCPI8_0) ; RV64-NEXT: vlse64.v v10, (a0), zero ; RV64-NEXT: li a0, 3 -; RV64-NEXT: vmv.s.x v0, a0 +; RV64-NEXT: vmv.v.i v0, 3 ; RV64-NEXT: vmul.vx v12, v12, a0 ; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t ; RV64-NEXT: vmv.v.v v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -187,8 +187,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: li a2, -1 -; RV64-NEXT: vmv.s.x v12, a2 +; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: srli a1, a1, 32 ; RV64-NEXT: addi a2, a1, 1 @@ -224,8 +223,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: li a2, 6 -; RV32-NEXT: vmv.s.x v12, a2 +; RV32-NEXT: vmv.v.i v12, 6 ; RV32-NEXT: addi a2, a1, 1 ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vslideup.vx v8, v12, a1 @@ -237,8 +235,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: li a2, 6 -; RV64-NEXT: vmv.s.x v12, a2 +; RV64-NEXT: vmv.v.i v12, 6 ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: srli a1, a1, 32 ; RV64-NEXT: addi a2, a1, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll @@ -57,8 +57,7 @@ ; RV32-V128-NEXT: vsrl.vi v14, v9, 1 ; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14 -; RV32-V128-NEXT: li a0, 10 -; RV32-V128-NEXT: vmv.s.x v0, a0 +; RV32-V128-NEXT: vmv.v.i v0, 10 ; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t ; RV32-V128-NEXT: vmv.v.v v8, v10 ; RV32-V128-NEXT: ret @@ -70,8 +69,7 @@ ; RV64-V128-NEXT: vid.v v10 ; RV64-V128-NEXT: vsrl.vi v14, v10, 1 ; RV64-V128-NEXT: vrgather.vv v10, v8, v14 -; RV64-V128-NEXT: li a0, 10 -; RV64-V128-NEXT: vmv.s.x v0, a0 +; RV64-V128-NEXT: vmv.v.i v0, 10 ; RV64-V128-NEXT: vrgather.vv v10, v12, v14, v0.t ; RV64-V128-NEXT: vmv.v.v v8, v10 ; RV64-V128-NEXT: ret @@ -82,8 +80,7 @@ ; RV32-V512-NEXT: vid.v v10 ; RV32-V512-NEXT: vsrl.vi v11, v10, 1 ; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-V512-NEXT: li a0, 10 -; RV32-V512-NEXT: vmv.s.x v0, a0 +; RV32-V512-NEXT: vmv.v.i v0, 10 ; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11 ; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t ; RV32-V512-NEXT: vmv.v.v v8, v10 @@ -94,8 +91,7 @@ ; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu ; RV64-V512-NEXT: vid.v v10 ; RV64-V512-NEXT: vsrl.vi v11, v10, 1 -; RV64-V512-NEXT: li a0, 10 -; RV64-V512-NEXT: vmv.s.x v0, a0 +; RV64-V512-NEXT: vmv.v.i v0, 10 ; RV64-V512-NEXT: vrgather.vv v10, v8, v11 ; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t ; RV64-V512-NEXT: vmv.v.v v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -5,9 +5,8 @@ define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) { ; CHECK-LABEL: shuffle_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 11 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 11 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> @@ -29,9 +28,8 @@ define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) { ; CHECK-LABEL: shuffle_xv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 9 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 9 ; CHECK-NEXT: vmerge.vim v8, v8, 5, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> , <4 x i16> %x, <4 x i32> @@ -41,9 +39,8 @@ define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) { ; CHECK-LABEL: shuffle_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 6 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 6 ; CHECK-NEXT: vmerge.vim v8, v8, 5, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> , <4 x i32> @@ -85,8 +82,7 @@ ; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v11, (a0) -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 8 ; CHECK-NEXT: vrgather.vv v10, v8, v11 ; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -98,11 +94,10 @@ define <4 x i16> @vrgather_shuffle_xv_v4i16(<4 x i16> %x) { ; CHECK-LABEL: vrgather_shuffle_xv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 12 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 4 +; CHECK-NEXT: vmv.v.i v0, 12 ; CHECK-NEXT: vmv.v.i v9, 5 ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 @@ -118,7 +113,7 @@ ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v10, v9, a0 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 3 ; CHECK-NEXT: vmv.v.i v9, 5 ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 @@ -195,9 +190,8 @@ ; ; RV64-LABEL: vrgather_shuffle_vv_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: li a0, 5 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vmv.v.i v16, 5 ; RV64-NEXT: vmv.v.i v20, 2 ; RV64-NEXT: lui a0, %hi(.LCPI11_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI11_0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll @@ -229,9 +229,8 @@ ; ; LMULMAX1-RV32-LABEL: splat_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: li a3, 5 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; LMULMAX1-RV32-NEXT: vmv.s.x v0, a3 +; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5 ; LMULMAX1-RV32-NEXT: vmv.v.x v8, a2 ; LMULMAX1-RV32-NEXT: vmerge.vxm v8, v8, a1, v0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 @@ -914,9 +913,8 @@ ; LMULMAX1-RV32-NEXT: vle64.v v14, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v15, (a0) -; LMULMAX1-RV32-NEXT: li a0, 5 -; LMULMAX1-RV32-NEXT: vmv.s.x v0, a0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5 ; LMULMAX1-RV32-NEXT: vmv.v.x v16, a2 ; LMULMAX1-RV32-NEXT: vmerge.vxm v16, v16, a1, v0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -1727,8 +1727,7 @@ ; RV32-NEXT: vdiv.vv v9, v9, v10 ; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma ; RV32-NEXT: vslideup.vi v10, v9, 4 -; RV32-NEXT: li a1, 6 -; RV32-NEXT: vmv.s.x v0, a1 +; RV32-NEXT: vmv.v.i v0, 6 ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vmv.v.i v9, -7 ; RV32-NEXT: vmerge.vim v9, v9, 7, v0 @@ -1753,8 +1752,7 @@ ; RV64-NEXT: vslidedown.vi v9, v8, 4 ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vdiv.vv v9, v9, v10 -; RV64-NEXT: li a1, 6 -; RV64-NEXT: vmv.s.x v0, a1 +; RV64-NEXT: vmv.v.i v0, 6 ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64-NEXT: vmv.v.i v10, -7 ; RV64-NEXT: vmerge.vim v10, v10, 7, v0 @@ -1779,11 +1777,10 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: li a1, 5 -; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: lui a1, 419430 ; RV32-NEXT: addi a1, a1, 1639 ; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vmv.v.i v0, 5 ; RV32-NEXT: lui a1, 629146 ; RV32-NEXT: addi a1, a1, -1639 ; RV32-NEXT: vmerge.vxm v9, v9, a1, v0 @@ -6069,11 +6066,10 @@ ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a1) -; LMULMAX1-RV32-NEXT: li a2, 5 -; LMULMAX1-RV32-NEXT: vmv.s.x v0, a2 ; LMULMAX1-RV32-NEXT: lui a2, 419430 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1639 ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a2 +; LMULMAX1-RV32-NEXT: vmv.v.i v0, 5 ; LMULMAX1-RV32-NEXT: lui a2, 629146 ; LMULMAX1-RV32-NEXT: addi a2, a2, -1639 ; LMULMAX1-RV32-NEXT: vmerge.vxm v10, v10, a2, v0 @@ -6150,25 +6146,24 @@ ; LMULMAX2-RV64-LABEL: mulhs_v4i64: ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; LMULMAX2-RV64-NEXT: li a1, 5 -; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI188_0) ; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI188_0) ; LMULMAX2-RV64-NEXT: vlse64.v v8, (a1), zero ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI188_1) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI188_1)(a1) ; LMULMAX2-RV64-NEXT: vle64.v v10, (a0) -; LMULMAX2-RV64-NEXT: vmv.v.i v12, -1 -; LMULMAX2-RV64-NEXT: vmerge.vim v12, v12, 0, v0 +; LMULMAX2-RV64-NEXT: vmv.v.i v0, 5 ; LMULMAX2-RV64-NEXT: vmerge.vxm v8, v8, a1, v0 ; LMULMAX2-RV64-NEXT: vmulh.vv v8, v10, v8 -; LMULMAX2-RV64-NEXT: vmacc.vv v8, v10, v12 -; LMULMAX2-RV64-NEXT: li a1, 63 -; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 -; LMULMAX2-RV64-NEXT: vmv.v.i v12, 1 +; LMULMAX2-RV64-NEXT: vmv.v.i v12, -1 ; LMULMAX2-RV64-NEXT: vmerge.vim v12, v12, 0, v0 -; LMULMAX2-RV64-NEXT: vsra.vv v8, v8, v12 -; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10 +; LMULMAX2-RV64-NEXT: vmadd.vv v12, v10, v8 +; LMULMAX2-RV64-NEXT: li a1, 63 +; LMULMAX2-RV64-NEXT: vsrl.vx v8, v12, a1 +; LMULMAX2-RV64-NEXT: vmv.v.i v10, 1 +; LMULMAX2-RV64-NEXT: vmerge.vim v10, v10, 0, v0 +; LMULMAX2-RV64-NEXT: vsra.vv v10, v12, v10 +; LMULMAX2-RV64-NEXT: vadd.vv v8, v10, v8 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll @@ -16,10 +16,9 @@ ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v9, v8, v8 ; CHECK-NEXT: vrgather.vv v8, v10, v9 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v10, 4 +; CHECK-NEXT: vmv.v.i v0, 4 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v8, v12, 0, v0.t ; CHECK-NEXT: vadd.vi v11, v9, 1 @@ -907,15 +906,14 @@ ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV64-NEXT: li a1, 1 ; RV64-NEXT: vmv.v.i v8, 7 -; RV64-NEXT: csrr a2, vlenb -; RV64-NEXT: li a3, 21 -; RV64-NEXT: mul a2, a2, a3 -; RV64-NEXT: add a2, sp, a2 -; RV64-NEXT: addi a2, a2, 16 -; RV64-NEXT: vs4r.v v8, (a2) # Unknown-size Folded Spill -; RV64-NEXT: vmv.s.x v12, a1 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: li a2, 21 +; RV64-NEXT: mul a1, a1, a2 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vmv.v.i v12, 1 ; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma ; RV64-NEXT: vslideup.vi v8, v12, 5 ; RV64-NEXT: csrr a1, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -100,16 +100,14 @@ define <3 x i1> @buildvec_mask_v1i1() { ; CHECK-LABEL: buildvec_mask_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_v1i1: ; ZVE32F: # %bb.0: -; ZVE32F-NEXT: li a0, 2 ; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; ZVE32F-NEXT: vmv.s.x v0, a0 +; ZVE32F-NEXT: vmv.v.i v0, 2 ; ZVE32F-NEXT: ret ret <3 x i1> } @@ -117,16 +115,14 @@ define <3 x i1> @buildvec_mask_optsize_v1i1() optsize { ; CHECK-LABEL: buildvec_mask_optsize_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 2 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_optsize_v1i1: ; ZVE32F: # %bb.0: -; ZVE32F-NEXT: li a0, 2 ; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; ZVE32F-NEXT: vmv.s.x v0, a0 +; ZVE32F-NEXT: vmv.v.i v0, 2 ; ZVE32F-NEXT: ret ret <3 x i1> } @@ -134,16 +130,14 @@ define <4 x i1> @buildvec_mask_v4i1() { ; CHECK-LABEL: buildvec_mask_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 6 ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 6 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_v4i1: ; ZVE32F: # %bb.0: -; ZVE32F-NEXT: li a0, 6 ; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma -; ZVE32F-NEXT: vmv.s.x v0, a0 +; ZVE32F-NEXT: vmv.v.i v0, 6 ; ZVE32F-NEXT: ret ret <4 x i1> } @@ -151,9 +145,8 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1(i1 %x, i1 %y) { ; CHECK-LABEL: buildvec_mask_nonconst_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 3 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; CHECK-NEXT: vmv.s.x v0, a2 +; CHECK-NEXT: vmv.v.i v0, 3 ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vand.vi v8, v8, 1 @@ -162,9 +155,8 @@ ; ; ZVE32F-LABEL: buildvec_mask_nonconst_v4i1: ; ZVE32F: # %bb.0: -; ZVE32F-NEXT: li a2, 3 ; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; ZVE32F-NEXT: vmv.s.x v0, a2 +; ZVE32F-NEXT: vmv.v.i v0, 3 ; ZVE32F-NEXT: vmv.v.x v8, a1 ; ZVE32F-NEXT: vmerge.vxm v8, v8, a0, v0 ; ZVE32F-NEXT: vand.vi v8, v8, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -1431,8 +1431,8 @@ ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vmsne.vi v9, v9, 0 ; RV32-NEXT: vmand.mm v0, v9, v0 -; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vmv.v.i v9, 1 +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: mv a1, a2 @@ -1452,8 +1452,8 @@ ; RV64-NEXT: vmv.v.x v9, a1 ; RV64-NEXT: vmsne.vi v9, v9, 0 ; RV64-NEXT: vmand.mm v0, v9, v0 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vmv.v.i v9, 1 +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: mv a1, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -2199,9 +2199,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, -1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -2230,9 +2228,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, -1 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -2263,9 +2259,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, -1 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -2296,9 +2290,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -2332,9 +2324,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -2377,9 +2367,7 @@ ; RV32-NEXT: vand.vv v16, v24, v16 ; RV32-NEXT: vand.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -5686,9 +5674,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, -1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -5717,9 +5703,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, -1 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -5750,9 +5734,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, -1 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -5783,9 +5765,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -5819,9 +5799,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vminu.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -5864,9 +5842,7 @@ ; RV32-NEXT: vminu.vv v16, v24, v16 ; RV32-NEXT: vminu.vv v8, v8, v0 ; RV32-NEXT: vminu.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll @@ -181,8 +181,7 @@ ; ZVE32F-NEXT: vle32.v v8, (a0) ; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ZVE32F-NEXT: vslidedown.vi v9, v8, 2 -; ZVE32F-NEXT: li a0, 2 -; ZVE32F-NEXT: vmv.s.x v0, a0 +; ZVE32F-NEXT: vmv.v.i v0, 2 ; ZVE32F-NEXT: vrgather.vi v10, v8, 1 ; ZVE32F-NEXT: vrgather.vi v10, v9, 1, v0.t ; ZVE32F-NEXT: vse32.v v10, (a1) @@ -237,8 +236,7 @@ ; ZVE32F-NEXT: vle32.v v8, (a0) ; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ZVE32F-NEXT: vslidedown.vi v9, v8, 2 -; ZVE32F-NEXT: li a0, 2 -; ZVE32F-NEXT: vmv.s.x v0, a0 +; ZVE32F-NEXT: vmv.v.i v0, 2 ; ZVE32F-NEXT: vrgather.vi v10, v8, 1 ; ZVE32F-NEXT: vrgather.vi v10, v9, 1, v0.t ; ZVE32F-NEXT: vse32.v v10, (a1) @@ -282,8 +280,7 @@ ; V-NEXT: vle64.v v8, (a0) ; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; V-NEXT: vslidedown.vi v9, v8, 2 -; V-NEXT: li a0, 2 -; V-NEXT: vmv.s.x v0, a0 +; V-NEXT: vmv.v.i v0, 2 ; V-NEXT: vrgather.vi v10, v8, 1 ; V-NEXT: vrgather.vi v10, v9, 1, v0.t ; V-NEXT: vse64.v v10, (a1) @@ -335,8 +332,7 @@ ; V-NEXT: vle64.v v8, (a0) ; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; V-NEXT: vslidedown.vi v9, v8, 2 -; V-NEXT: li a0, 2 -; V-NEXT: vmv.s.x v0, a0 +; V-NEXT: vmv.v.i v0, 2 ; V-NEXT: vrgather.vi v10, v8, 1 ; V-NEXT: vrgather.vi v10, v9, 1, v0.t ; V-NEXT: vse64.v v10, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll @@ -781,9 +781,9 @@ define @insertelt_nxv2i64_imm_c10( %v) { ; CHECK-LABEL: insertelt_nxv2i64_imm_c10: ; CHECK: # %bb.0: -; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vmv.v.i v10, 10 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma -; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 3 @@ -793,9 +793,8 @@ define @insertelt_nxv2i64_idx_c10( %v, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2i64_idx_c10: ; CHECK: # %bb.0: -; CHECK-NEXT: li a1, 10 -; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma -; CHECK-NEXT: vmv.s.x v10, a1 +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma +; CHECK-NEXT: vmv.v.i v10, 10 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll @@ -49,8 +49,7 @@ ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrsub.vi v12, v11, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: li a0, 15 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 15 ; CHECK-NEXT: vrsub.vi v8, v11, 3 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv1r.v v8, v10 @@ -195,8 +194,7 @@ ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrsub.vi v12, v11, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: li a0, 15 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 15 ; CHECK-NEXT: vrsub.vi v8, v11, 3 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -341,9 +339,8 @@ ; CHECK-NEXT: vid.v v14 ; CHECK-NEXT: vrsub.vi v16, v14, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v16 -; CHECK-NEXT: li a0, 15 -; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vrsub.vi v8, v14, 3 +; CHECK-NEXT: vmv.v.i v0, 15 ; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -494,10 +491,9 @@ ; RV32-NEXT: vrsub.vi v19, v18, 7 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-NEXT: vrgatherei16.vv v12, v8, v19 -; RV32-NEXT: li a0, 15 -; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vrsub.vi v8, v18, 3 +; RV32-NEXT: vmv.v.i v0, 15 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -510,9 +506,8 @@ ; RV64-NEXT: vid.v v20 ; RV64-NEXT: vrsub.vi v24, v20, 7 ; RV64-NEXT: vrgather.vv v12, v8, v24 -; RV64-NEXT: li a0, 15 -; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vrsub.vi v8, v20, 3 +; RV64-NEXT: vmv.v.i v0, 15 ; RV64-NEXT: vrgather.vv v12, v16, v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -567,8 +562,7 @@ ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrsub.vi v12, v11, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: li a0, 15 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 15 ; CHECK-NEXT: vrsub.vi v8, v11, 3 ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 @@ -687,9 +681,8 @@ ; CHECK-NEXT: vid.v v14 ; CHECK-NEXT: vrsub.vi v16, v14, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v16 -; CHECK-NEXT: li a0, 15 -; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vrsub.vi v8, v14, 3 +; CHECK-NEXT: vmv.v.i v0, 15 ; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -787,10 +780,9 @@ ; RV32-NEXT: vrsub.vi v19, v18, 7 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-NEXT: vrgatherei16.vv v12, v8, v19 -; RV32-NEXT: li a0, 15 -; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vrsub.vi v8, v18, 3 +; RV32-NEXT: vmv.v.i v0, 15 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t ; RV32-NEXT: vmv.v.v v8, v12 @@ -803,9 +795,8 @@ ; RV64-NEXT: vid.v v20 ; RV64-NEXT: vrsub.vi v24, v20, 7 ; RV64-NEXT: vrgather.vv v12, v8, v24 -; RV64-NEXT: li a0, 15 -; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vrsub.vi v8, v20, 3 +; RV64-NEXT: vmv.v.i v0, 15 ; RV64-NEXT: vrgather.vv v12, v16, v8, v0.t ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll @@ -109,8 +109,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 2 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vrgather.vi v9, v10, 1, v0.t @@ -196,8 +195,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 2 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vmv.v.i v0, 2 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vrgather.vi v9, v10, 1, v0.t diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -662,10 +662,8 @@ ; RV32MV-NEXT: vmv.v.i v10, 1 ; RV32MV-NEXT: vmerge.vim v10, v10, -1, v0 ; RV32MV-NEXT: vand.vv v8, v8, v10 -; RV32MV-NEXT: li a0, 2 -; RV32MV-NEXT: vmv.s.x v10, a0 -; RV32MV-NEXT: li a0, 1 -; RV32MV-NEXT: vmv.s.x v12, a0 +; RV32MV-NEXT: vmv.v.i v10, 2 +; RV32MV-NEXT: vmv.v.i v12, 1 ; RV32MV-NEXT: vmv.v.i v14, 0 ; RV32MV-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV32MV-NEXT: vslideup.vi v14, v12, 2