Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2817,9 +2817,21 @@ MVT VT, SDLoc DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { SDValue Passthru = DAG.getUNDEF(VT); - if (VT.isFloatingPoint()) + if (VT.isFloatingPoint()) { // TODO: Use vmv.v.i for appropriate constants - return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL); + // Use M1 or smaller to avoid over constraining register allocation + const MVT XLenVT = Subtarget.getXLenVT(); + const MVT M1VT = getLMUL1VT(VT); + auto InnerVT = VT.bitsLE(M1VT) ? VT : M1VT; + SDValue Result = DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, InnerVT, + DAG.getUNDEF(InnerVT), Scalar, VL); + if (VT != InnerVT) + Result = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, + DAG.getUNDEF(VT), + Result, DAG.getConstant(0, DL, XLenVT)); + return Result; + } + // Avoid the tricky legalization cases by falling back to using the // splat code which already handles it gracefully. @@ -2844,7 +2856,17 @@ VT.bitsLE(getLMUL1VT(VT))) return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL); } - return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL); + // Use M1 or smaller to avoid over constraining register allocation + const MVT M1VT = getLMUL1VT(VT); + auto InnerVT = VT.bitsLE(M1VT) ? VT : M1VT; + SDValue Result = DAG.getNode(RISCVISD::VMV_S_X_VL, DL, InnerVT, + DAG.getUNDEF(InnerVT), Scalar, VL); + if (VT != InnerVT) + Result = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, + DAG.getUNDEF(VT), + Result, DAG.getConstant(0, DL, XLenVT)); + return Result; + } static bool isInterleaveShuffle(ArrayRef Mask, MVT VT, bool &SwapSources, Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll @@ -109,13 +109,13 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v12, 0 -; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 2, e8, m4, tu, ma -; CHECK-NEXT: vslideup.vi v12, v8, 1 +; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v12, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement <64 x i1> %x, i1 %elt, i64 1 @@ -127,14 +127,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v12, 0 -; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma -; CHECK-NEXT: vslideup.vx v12, v8, a1 +; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v12, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement <64 x i1> %x, i1 %elt, i32 %idx Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -202,8 +202,9 @@ ; RV64-LABEL: vrgather_shuffle_vv_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 5 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vsetivli zero, 8, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v16, a0 +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vmv.v.i v20, 2 ; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; RV64-NEXT: vslideup.vi v20, v16, 7 Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -2512,7 +2512,7 @@ ; RV64ZVE32F-NEXT: .LBB34_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 1 @@ -2521,7 +2521,7 @@ ; RV64ZVE32F-NEXT: .LBB34_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 2 @@ -2530,7 +2530,7 @@ ; RV64ZVE32F-NEXT: .LBB34_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 3 @@ -2539,7 +2539,7 @@ ; RV64ZVE32F-NEXT: .LBB34_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 4 @@ -2548,7 +2548,7 @@ ; RV64ZVE32F-NEXT: .LBB34_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 5 @@ -2557,7 +2557,7 @@ ; RV64ZVE32F-NEXT: .LBB34_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 6 @@ -2566,9 +2566,9 @@ ; RV64ZVE32F-NEXT: .LBB34_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 7 ; RV64ZVE32F-NEXT: ret %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -2618,7 +2618,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -2632,7 +2632,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -2654,7 +2654,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -2676,7 +2676,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -2688,7 +2688,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -2700,7 +2700,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -2713,9 +2713,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -2767,7 +2767,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -2781,7 +2781,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -2803,7 +2803,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -2825,7 +2825,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -2837,7 +2837,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -2849,7 +2849,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -2862,9 +2862,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -2919,7 +2919,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -2934,7 +2934,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -2957,7 +2957,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -2980,7 +2980,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -2993,7 +2993,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -3006,7 +3006,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -3020,9 +3020,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3076,7 +3076,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -3090,7 +3090,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -3112,7 +3112,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -3134,7 +3134,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -3146,7 +3146,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -3158,7 +3158,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -3171,9 +3171,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3226,7 +3226,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -3240,7 +3240,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -3262,7 +3262,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -3284,7 +3284,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -3296,7 +3296,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -3308,7 +3308,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -3321,9 +3321,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3381,7 +3381,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -3396,7 +3396,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -3419,7 +3419,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -3442,7 +3442,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -3455,7 +3455,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -3468,7 +3468,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -3482,9 +3482,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3605,7 +3605,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 @@ -8450,7 +8450,7 @@ ; RV64ZVE32F-NEXT: .LBB73_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 1 @@ -8459,7 +8459,7 @@ ; RV64ZVE32F-NEXT: .LBB73_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 2 @@ -8468,7 +8468,7 @@ ; RV64ZVE32F-NEXT: .LBB73_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 3 @@ -8477,7 +8477,7 @@ ; RV64ZVE32F-NEXT: .LBB73_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 4 @@ -8486,7 +8486,7 @@ ; RV64ZVE32F-NEXT: .LBB73_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 5 @@ -8495,7 +8495,7 @@ ; RV64ZVE32F-NEXT: .LBB73_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 6 @@ -8504,9 +8504,9 @@ ; RV64ZVE32F-NEXT: .LBB73_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 7 ; RV64ZVE32F-NEXT: ret %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -8556,7 +8556,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -8570,7 +8570,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -8592,7 +8592,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -8614,7 +8614,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -8626,7 +8626,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -8638,7 +8638,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -8651,9 +8651,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -8705,7 +8705,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -8719,7 +8719,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -8741,7 +8741,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -8763,7 +8763,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -8775,7 +8775,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -8787,7 +8787,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -8800,9 +8800,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -8857,7 +8857,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -8872,7 +8872,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -8895,7 +8895,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -8918,7 +8918,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -8931,7 +8931,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -8944,7 +8944,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -8958,9 +8958,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9014,7 +9014,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -9028,7 +9028,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -9050,7 +9050,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -9072,7 +9072,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -9084,7 +9084,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -9096,7 +9096,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -9109,9 +9109,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9164,7 +9164,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -9178,7 +9178,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -9200,7 +9200,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -9222,7 +9222,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -9234,7 +9234,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -9246,7 +9246,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -9259,9 +9259,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9319,7 +9319,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -9334,7 +9334,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 @@ -9357,7 +9357,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 @@ -9380,7 +9380,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 @@ -9393,7 +9393,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 @@ -9406,7 +9406,7 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 @@ -9420,9 +9420,9 @@ ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9543,7 +9543,7 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 @@ -12671,7 +12671,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 @@ -12685,7 +12685,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 2 @@ -12707,7 +12707,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 5 @@ -12734,7 +12734,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 9 @@ -12748,7 +12748,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 10 @@ -12764,7 +12764,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 11 @@ -12778,7 +12778,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 12 @@ -12792,7 +12792,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 13 @@ -12817,7 +12817,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 18, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 17 @@ -12831,7 +12831,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 19, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 18 @@ -12853,7 +12853,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 21 @@ -12880,7 +12880,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 26, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 25 @@ -12894,7 +12894,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 27, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 26 @@ -12916,7 +12916,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 30, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 29 @@ -12930,7 +12930,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 31, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 30 @@ -12945,9 +12945,9 @@ ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lb a0, 0(a0) ; RV64ZVE32F-NEXT: li a1, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, tu, ma +; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 31 ; RV64ZVE32F-NEXT: .LBB98_49: # %else92 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 @@ -12959,7 +12959,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 3 @@ -12971,7 +12971,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 4 @@ -12983,7 +12983,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 6 @@ -12996,7 +12996,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 7 @@ -13008,7 +13008,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 8 @@ -13020,7 +13020,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 14 @@ -13033,7 +13033,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 15 @@ -13045,7 +13045,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 17, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 16 @@ -13059,7 +13059,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 20, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 19 @@ -13071,7 +13071,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 21, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 20 @@ -13083,7 +13083,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 23, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 22 @@ -13096,7 +13096,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 24, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 23 @@ -13108,7 +13108,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 25, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 24 @@ -13122,7 +13122,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 28, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 27 @@ -13134,7 +13134,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 29, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 28 Index: llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll +++ llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll @@ -122,7 +122,7 @@ define @insertelt_nxv8f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv8f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -134,7 +134,7 @@ define @insertelt_nxv8f16_idx( %v, half %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv8f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma @@ -157,7 +157,7 @@ define @insertelt_nxv16f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv16f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -169,7 +169,7 @@ define @insertelt_nxv16f16_idx( %v, half %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv16f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma @@ -192,7 +192,7 @@ define @insertelt_nxv32f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv32f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -204,7 +204,7 @@ define @insertelt_nxv32f16_idx( %v, half %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv32f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma @@ -297,7 +297,7 @@ define @insertelt_nxv4f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv4f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -309,7 +309,7 @@ define @insertelt_nxv4f32_idx( %v, float %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv4f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma @@ -332,7 +332,7 @@ define @insertelt_nxv8f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv8f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -344,7 +344,7 @@ define @insertelt_nxv8f32_idx( %v, float %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv8f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma @@ -367,7 +367,7 @@ define @insertelt_nxv16f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv16f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -379,7 +379,7 @@ define @insertelt_nxv16f32_idx( %v, float %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv16f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma @@ -437,7 +437,7 @@ define @insertelt_nxv2f64_imm( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv2f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -449,7 +449,7 @@ define @insertelt_nxv2f64_idx( %v, double %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv2f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma @@ -472,7 +472,7 @@ define @insertelt_nxv4f64_imm( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv4f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -484,7 +484,7 @@ define @insertelt_nxv4f64_idx( %v, double %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv4f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma @@ -507,7 +507,7 @@ define @insertelt_nxv8f64_imm( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv8f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -519,7 +519,7 @@ define @insertelt_nxv8f64_idx( %v, double %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv8f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma Index: llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll +++ llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll @@ -146,13 +146,13 @@ ; CHECK-LABEL: insertelt_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 3, e8, m2, tu, ma -; CHECK-NEXT: vslideup.vi v10, v8, 2 +; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vand.vi v8, v10, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 @@ -163,14 +163,14 @@ ; CHECK-LABEL: insertelt_idx_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma -; CHECK-NEXT: vslideup.vx v10, v8, a1 +; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma -; CHECK-NEXT: vand.vi v8, v10, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx @@ -181,13 +181,13 @@ ; CHECK-LABEL: insertelt_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v12, 0 -; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 3, e8, m4, tu, ma -; CHECK-NEXT: vslideup.vi v12, v8, 2 +; CHECK-NEXT: vslideup.vi v8, v12, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v12, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 @@ -198,14 +198,14 @@ ; CHECK-LABEL: insertelt_idx_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v12, 0 -; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma -; CHECK-NEXT: vslideup.vx v12, v8, a1 +; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; CHECK-NEXT: vand.vi v8, v12, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx @@ -216,13 +216,13 @@ ; CHECK-LABEL: insertelt_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 3, e8, m8, tu, ma -; CHECK-NEXT: vslideup.vi v16, v8, 2 +; CHECK-NEXT: vslideup.vi v8, v16, 2 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 2 @@ -233,14 +233,14 @@ ; CHECK-LABEL: insertelt_idx_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma -; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma -; CHECK-NEXT: vslideup.vx v16, v8, a1 +; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma -; CHECK-NEXT: vand.vi v8, v16, 1 +; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %y = insertelement %x, i1 %elt, i64 %idx Index: llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll @@ -155,7 +155,7 @@ define @insertelt_nxv16i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -167,7 +167,7 @@ define @insertelt_nxv16i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma @@ -190,7 +190,7 @@ define @insertelt_nxv32i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -202,7 +202,7 @@ define @insertelt_nxv32i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma @@ -225,7 +225,7 @@ define @insertelt_nxv64i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv64i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -237,7 +237,7 @@ define @insertelt_nxv64i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv64i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma @@ -365,7 +365,7 @@ define @insertelt_nxv8i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -377,7 +377,7 @@ define @insertelt_nxv8i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma @@ -400,7 +400,7 @@ define @insertelt_nxv16i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -412,7 +412,7 @@ define @insertelt_nxv16i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma @@ -435,7 +435,7 @@ define @insertelt_nxv32i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -447,7 +447,7 @@ define @insertelt_nxv32i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma @@ -540,7 +540,7 @@ define @insertelt_nxv4i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv4i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -552,7 +552,7 @@ define @insertelt_nxv4i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv4i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma @@ -575,7 +575,7 @@ define @insertelt_nxv8i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv8i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -587,7 +587,7 @@ define @insertelt_nxv8i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv8i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma @@ -610,7 +610,7 @@ define @insertelt_nxv16i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv16i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -622,7 +622,7 @@ define @insertelt_nxv16i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv16i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma @@ -800,7 +800,7 @@ ; CHECK-LABEL: insertelt_nxv2i64_imm_c10: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -813,7 +813,7 @@ ; CHECK-LABEL: insertelt_nxv2i64_idx_c10: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 10 -; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma @@ -838,7 +838,7 @@ ; CHECK-LABEL: insertelt_nxv2i64_imm_cn1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -851,7 +851,7 @@ ; CHECK-LABEL: insertelt_nxv2i64_idx_cn1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma Index: llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll +++ llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll @@ -155,7 +155,7 @@ define @insertelt_nxv16i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -167,7 +167,7 @@ define @insertelt_nxv16i8_idx( %v, i8 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma @@ -190,7 +190,7 @@ define @insertelt_nxv32i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -202,7 +202,7 @@ define @insertelt_nxv32i8_idx( %v, i8 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma @@ -225,7 +225,7 @@ define @insertelt_nxv64i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv64i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -237,7 +237,7 @@ define @insertelt_nxv64i8_idx( %v, i8 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv64i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma @@ -365,7 +365,7 @@ define @insertelt_nxv8i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -377,7 +377,7 @@ define @insertelt_nxv8i16_idx( %v, i16 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma @@ -400,7 +400,7 @@ define @insertelt_nxv16i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -412,7 +412,7 @@ define @insertelt_nxv16i16_idx( %v, i16 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma @@ -435,7 +435,7 @@ define @insertelt_nxv32i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -447,7 +447,7 @@ define @insertelt_nxv32i16_idx( %v, i16 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv32i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma @@ -540,7 +540,7 @@ define @insertelt_nxv4i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -552,7 +552,7 @@ define @insertelt_nxv4i32_idx( %v, i32 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv4i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma @@ -575,7 +575,7 @@ define @insertelt_nxv8i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -587,7 +587,7 @@ define @insertelt_nxv8i32_idx( %v, i32 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv8i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma @@ -610,7 +610,7 @@ define @insertelt_nxv16i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -622,7 +622,7 @@ define @insertelt_nxv16i32_idx( %v, i32 signext %elt, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_nxv16i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma @@ -682,7 +682,7 @@ define @insertelt_nxv2i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv2i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 @@ -694,7 +694,7 @@ define @insertelt_nxv2i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: slli a1, a1, 32 ; CHECK-NEXT: srli a1, a1, 32 @@ -719,7 +719,7 @@ define @insertelt_nxv4i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv4i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 @@ -731,7 +731,7 @@ define @insertelt_nxv4i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv4i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: slli a1, a1, 32 ; CHECK-NEXT: srli a1, a1, 32 @@ -756,7 +756,7 @@ define @insertelt_nxv8i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv8i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 @@ -768,7 +768,7 @@ define @insertelt_nxv8i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv8i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: slli a1, a1, 32 ; CHECK-NEXT: srli a1, a1, 32