diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4480,19 +4480,20 @@ } MVT M1VT = getLMUL1VT(ContainerVT); + MVT XLenVT = Subtarget.getXLenVT(); SDValue Mask, VL; std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); - // FIXME: This is a VLMAX splat which might be too large and can prevent - // vsetvli removal. SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags()); - SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem); + SDValue IdentitySplat = lowerScalarSplat(NeutralElem, + DAG.getConstant(1, DL, XLenVT), + M1VT, DL, DAG, Subtarget); SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec, IdentitySplat, Mask, VL); SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, - DAG.getConstant(0, DL, Subtarget.getXLenVT())); + DAG.getConstant(0, DL, XLenVT)); return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); } @@ -4541,17 +4542,18 @@ } MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); + MVT XLenVT = Subtarget.getXLenVT(); SDValue Mask, VL; std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); - // FIXME: This is a VLMAX splat which might be too large and can prevent - // vsetvli removal. - SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal); + SDValue ScalarSplat = lowerScalarSplat(ScalarVal, + DAG.getConstant(1, DL, XLenVT), + M1VT, DL, DAG, Subtarget); SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), VectorVal, ScalarSplat, Mask, VL); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, - DAG.getConstant(0, DL, Subtarget.getXLenVT())); + DAG.getConstant(0, DL, XLenVT)); } static unsigned getRVVVPReductionOp(unsigned ISDOpcode) { @@ -4613,13 +4615,13 @@ MVT XLenVT = Subtarget.getXLenVT(); MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT; - // FIXME: This is a VLMAX splat which might be too large and can prevent - // vsetvli removal. - SDValue StartSplat = DAG.getSplatVector(M1VT, DL, Op.getOperand(0)); + SDValue StartSplat = lowerScalarSplat(Op.getOperand(0), + DAG.getConstant(1, DL, XLenVT), + M1VT, DL, DAG, Subtarget); SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL); SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction, - DAG.getConstant(0, DL, Subtarget.getXLenVT())); + DAG.getConstant(0, DL, XLenVT)); if (!VecVT.isInteger()) return Elt0; return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -9,7 +9,7 @@ define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -22,7 +22,7 @@ define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -37,7 +37,7 @@ define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -50,7 +50,7 @@ define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -65,7 +65,7 @@ define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -78,7 +78,7 @@ define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -93,7 +93,7 @@ define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -106,7 +106,7 @@ define float @vpreduce_ord_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -121,7 +121,7 @@ define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -134,7 +134,7 @@ define double @vpreduce_ord_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -149,7 +149,7 @@ define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t @@ -162,7 +162,7 @@ define double @vpreduce_ord_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>) @@ -22,7 +22,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -38,12 +38,12 @@ define half @vreduce_fadd_v2f16(<2 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI2_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI2_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -59,7 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -75,12 +75,12 @@ define half @vreduce_fadd_v4f16(<4 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI4_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI4_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -96,7 +96,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -112,12 +112,12 @@ define half @vreduce_fadd_v8f16(<8 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI6_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI6_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -149,12 +149,12 @@ define half @vreduce_fadd_v16f16(<16 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI8_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -170,7 +170,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -184,35 +184,20 @@ declare half @llvm.vector.reduce.fadd.v32f16(half, <32 x half>) define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) { -; RV32-LABEL: vreduce_fadd_v32f16: -; RV32: # %bb.0: -; RV32-NEXT: li a1, 32 -; RV32-NEXT: lui a2, %hi(.LCPI10_0) -; RV32-NEXT: flh ft0, %lo(.LCPI10_0)(a2) -; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; RV32-NEXT: vle16.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; RV32-NEXT: vfmv.v.f v12, ft0 -; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; RV32-NEXT: vfredusum.vs v8, v8, v12 -; RV32-NEXT: vfmv.f.s ft0, v8 -; RV32-NEXT: fadd.h fa0, fa0, ft0 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fadd_v32f16: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI10_0) -; RV64-NEXT: flh ft0, %lo(.LCPI10_0)(a1) -; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; RV64-NEXT: vle16.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; RV64-NEXT: vfmv.v.f v12, ft0 -; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; RV64-NEXT: vfredusum.vs v8, v8, v12 -; RV64-NEXT: vfmv.f.s ft0, v8 -; RV64-NEXT: fadd.h fa0, fa0, ft0 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fadd_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: lui a0, %hi(.LCPI10_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v12, (a0), zero +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vfredusum.vs v8, v8, v12 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: ret %v = load <32 x half>, <32 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v) ret half %red @@ -224,7 +209,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 @@ -238,35 +223,20 @@ declare half @llvm.vector.reduce.fadd.v64f16(half, <64 x half>) define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) { -; RV32-LABEL: vreduce_fadd_v64f16: -; RV32: # %bb.0: -; RV32-NEXT: li a1, 64 -; RV32-NEXT: lui a2, %hi(.LCPI12_0) -; RV32-NEXT: flh ft0, %lo(.LCPI12_0)(a2) -; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; RV32-NEXT: vle16.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; RV32-NEXT: vfmv.v.f v16, ft0 -; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; RV32-NEXT: vfredusum.vs v8, v8, v16 -; RV32-NEXT: vfmv.f.s ft0, v8 -; RV32-NEXT: fadd.h fa0, fa0, ft0 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fadd_v64f16: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI12_0) -; RV64-NEXT: flh ft0, %lo(.LCPI12_0)(a1) -; RV64-NEXT: li a1, 64 -; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; RV64-NEXT: vle16.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; RV64-NEXT: vfmv.v.f v16, ft0 -; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; RV64-NEXT: vfredusum.vs v8, v8, v16 -; RV64-NEXT: vfmv.f.s ft0, v8 -; RV64-NEXT: fadd.h fa0, fa0, ft0 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fadd_v64f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 64 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v16, (a0), zero +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfredusum.vs v8, v8, v16 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: fadd.h fa0, fa0, ft0 +; CHECK-NEXT: ret %v = load <64 x half>, <64 x half>* %x %red = call reassoc half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v) ret half %red @@ -278,7 +248,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 @@ -299,11 +269,11 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI14_0)(a0) ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI14_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI14_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -322,12 +292,12 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v24, fa0 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 @@ -358,7 +328,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -374,12 +344,12 @@ define float @vreduce_fadd_v2f32(<2 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI18_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI18_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -395,7 +365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -411,12 +381,12 @@ define float @vreduce_fadd_v4f32(<4 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI20_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI20_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -432,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -448,12 +418,12 @@ define float @vreduce_fadd_v8f32(<8 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI22_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI22_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI22_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI22_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -469,7 +439,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -485,12 +455,12 @@ define float @vreduce_fadd_v16f32(<16 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI24_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI24_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI24_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI24_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v12, (a0), zero ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -506,7 +476,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 @@ -520,35 +490,20 @@ declare float @llvm.vector.reduce.fadd.v32f32(float, <32 x float>) define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) { -; RV32-LABEL: vreduce_fadd_v32f32: -; RV32: # %bb.0: -; RV32-NEXT: li a1, 32 -; RV32-NEXT: lui a2, %hi(.LCPI26_0) -; RV32-NEXT: flw ft0, %lo(.LCPI26_0)(a2) -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV32-NEXT: vfmv.v.f v16, ft0 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; RV32-NEXT: vfredusum.vs v8, v8, v16 -; RV32-NEXT: vfmv.f.s ft0, v8 -; RV32-NEXT: fadd.s fa0, fa0, ft0 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fadd_v32f32: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI26_0) -; RV64-NEXT: flw ft0, %lo(.LCPI26_0)(a1) -; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; RV64-NEXT: vfmv.v.f v16, ft0 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; RV64-NEXT: vfredusum.vs v8, v8, v16 -; RV64-NEXT: vfmv.f.s ft0, v8 -; RV64-NEXT: fadd.s fa0, fa0, ft0 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fadd_v32f32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: lui a0, %hi(.LCPI26_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI26_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v16, (a0), zero +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfredusum.vs v8, v8, v16 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: fadd.s fa0, fa0, ft0 +; CHECK-NEXT: ret %v = load <32 x float>, <32 x float>* %x %red = call reassoc float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v) ret float %red @@ -560,7 +515,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 @@ -581,11 +536,11 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI28_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI28_0)(a0) ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI28_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI28_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -604,12 +559,12 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v24, fa0 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 @@ -640,9 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -656,12 +609,12 @@ define double @vreduce_fadd_v2f64(<2 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI32_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI32_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI32_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -677,7 +630,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -693,12 +646,12 @@ define double @vreduce_fadd_v4f64(<4 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI34_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI34_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI34_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -714,7 +667,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -730,12 +683,12 @@ define double @vreduce_fadd_v8f64(<8 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI36_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI36_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI36_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI36_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -751,7 +704,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 @@ -767,12 +720,12 @@ define double @vreduce_fadd_v16f64(<16 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI38_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI38_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI38_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI38_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -788,7 +741,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 @@ -808,11 +761,11 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI40_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI40_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -830,12 +783,12 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v24, fa0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v16, ft0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 @@ -851,12 +804,12 @@ define half @vreduce_fmin_v2f16(<2 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI42_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI42_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI42_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -871,12 +824,12 @@ define half @vreduce_fmin_v4f16(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI43_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI43_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI43_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -889,12 +842,12 @@ define half @vreduce_fmin_v4f16_nonans(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI44_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI44_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI44_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI44_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -907,12 +860,12 @@ define half @vreduce_fmin_v4f16_nonans_noinfs(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f16_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI45_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI45_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI45_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI45_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -932,11 +885,11 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI46_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI46_0)(a0) ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI46_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -951,12 +904,12 @@ define float @vreduce_fmin_v2f32(<2 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI47_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI47_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI47_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI47_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -971,12 +924,12 @@ define float @vreduce_fmin_v4f32(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI48_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI48_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI48_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI48_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -989,12 +942,12 @@ define float @vreduce_fmin_v4f32_nonans(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI49_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI49_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI49_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI49_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1007,12 +960,12 @@ define float @vreduce_fmin_v4f32_nonans_noinfs(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f32_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI50_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI50_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI50_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI50_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1029,20 +982,20 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a2, a0, 384 -; CHECK-NEXT: vle32.v v8, (a2) -; CHECK-NEXT: addi a2, a0, 128 ; CHECK-NEXT: vle32.v v16, (a2) +; CHECK-NEXT: addi a2, a0, 256 +; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v24, (a0) -; CHECK-NEXT: addi a0, a0, 256 -; CHECK-NEXT: vle32.v v0, (a0) -; CHECK-NEXT: vfmin.vv v8, v16, v8 +; CHECK-NEXT: vle32.v v0, (a2) +; CHECK-NEXT: vfmin.vv v16, v24, v16 +; CHECK-NEXT: vfmin.vv v8, v8, v0 +; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI51_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI51_0)(a0) -; CHECK-NEXT: vfmin.vv v16, v24, v0 -; CHECK-NEXT: vfmin.vv v8, v16, v8 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI51_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1057,12 +1010,12 @@ define double @vreduce_fmin_v2f64(<2 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI52_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI52_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI52_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI52_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1077,12 +1030,12 @@ define double @vreduce_fmin_v4f64(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI53_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI53_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI53_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI53_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1095,12 +1048,12 @@ define double @vreduce_fmin_v4f64_nonans(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI54_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI54_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI54_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI54_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1113,12 +1066,12 @@ define double @vreduce_fmin_v4f64_nonans_noinfs(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f64_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI55_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI55_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI55_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI55_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1137,11 +1090,11 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI56_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI56_0)(a0) ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI56_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI56_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1156,12 +1109,12 @@ define half @vreduce_fmax_v2f16(<2 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI57_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI57_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI57_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI57_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1176,12 +1129,12 @@ define half @vreduce_fmax_v4f16(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI58_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI58_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI58_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI58_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1194,12 +1147,12 @@ define half @vreduce_fmax_v4f16_nonans(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI59_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI59_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI59_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI59_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1212,12 +1165,12 @@ define half @vreduce_fmax_v4f16_nonans_noinfs(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f16_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI60_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI60_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI60_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI60_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1237,11 +1190,11 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI61_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI61_0)(a0) ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI61_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI61_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1256,12 +1209,12 @@ define float @vreduce_fmax_v2f32(<2 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI62_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI62_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI62_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI62_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1276,12 +1229,12 @@ define float @vreduce_fmax_v4f32(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI63_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI63_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI63_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI63_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1294,12 +1247,12 @@ define float @vreduce_fmax_v4f32_nonans(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI64_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI64_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI64_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI64_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1312,12 +1265,12 @@ define float @vreduce_fmax_v4f32_nonans_noinfs(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f32_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI65_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI65_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI65_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI65_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1334,20 +1287,20 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a2, a0, 384 -; CHECK-NEXT: vle32.v v8, (a2) -; CHECK-NEXT: addi a2, a0, 128 ; CHECK-NEXT: vle32.v v16, (a2) +; CHECK-NEXT: addi a2, a0, 256 +; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v24, (a0) -; CHECK-NEXT: addi a0, a0, 256 -; CHECK-NEXT: vle32.v v0, (a0) -; CHECK-NEXT: vfmax.vv v8, v16, v8 +; CHECK-NEXT: vle32.v v0, (a2) +; CHECK-NEXT: vfmax.vv v16, v24, v16 +; CHECK-NEXT: vfmax.vv v8, v8, v0 +; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI66_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI66_0)(a0) -; CHECK-NEXT: vfmax.vv v16, v24, v0 -; CHECK-NEXT: vfmax.vv v8, v16, v8 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI66_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1362,12 +1315,12 @@ define double @vreduce_fmax_v2f64(<2 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI67_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI67_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI67_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI67_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1382,12 +1335,12 @@ define double @vreduce_fmax_v4f64(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI68_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI68_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI68_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI68_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1400,12 +1353,12 @@ define double @vreduce_fmax_v4f64_nonans(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI69_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI69_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI69_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI69_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1418,12 +1371,12 @@ define double @vreduce_fmax_v4f64_nonans_noinfs(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f64_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI70_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI70_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI70_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI70_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1442,11 +1395,11 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI71_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI71_0)(a0) ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI71_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI71_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -9,7 +9,7 @@ define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -25,7 +25,7 @@ ; CHECK-LABEL: vpreduce_umax_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -40,7 +40,7 @@ define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -56,7 +56,7 @@ ; CHECK-LABEL: vpreduce_umin_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -71,7 +71,7 @@ define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -86,7 +86,7 @@ define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -101,7 +101,7 @@ define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -116,7 +116,7 @@ define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -131,7 +131,7 @@ define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -147,7 +147,7 @@ ; CHECK-LABEL: vpreduce_umax_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -162,7 +162,7 @@ define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -178,7 +178,7 @@ ; CHECK-LABEL: vpreduce_umin_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -193,7 +193,7 @@ define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -208,7 +208,7 @@ define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -223,7 +223,7 @@ define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -238,7 +238,7 @@ define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -253,7 +253,7 @@ define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -271,7 +271,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -283,7 +283,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -298,7 +298,7 @@ define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -316,7 +316,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -328,7 +328,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -343,7 +343,7 @@ define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -358,7 +358,7 @@ define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -373,7 +373,7 @@ define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -388,7 +388,7 @@ define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -403,7 +403,7 @@ define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -421,7 +421,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -433,7 +433,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -448,7 +448,7 @@ define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -466,7 +466,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -478,7 +478,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -493,7 +493,7 @@ define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -508,7 +508,7 @@ define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -523,7 +523,7 @@ define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -538,7 +538,7 @@ define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -553,7 +553,7 @@ define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -568,7 +568,7 @@ define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -579,7 +579,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -594,7 +594,7 @@ define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -609,7 +609,7 @@ define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -620,7 +620,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -635,7 +635,7 @@ define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -650,7 +650,7 @@ define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -665,7 +665,7 @@ define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -680,7 +680,7 @@ define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -695,7 +695,7 @@ define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -710,7 +710,7 @@ define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -721,7 +721,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -736,7 +736,7 @@ define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -751,7 +751,7 @@ define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -762,7 +762,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -777,7 +777,7 @@ define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -792,7 +792,7 @@ define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -807,7 +807,7 @@ define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -822,7 +822,7 @@ define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -841,7 +841,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -856,7 +856,7 @@ ; ; RV64-LABEL: vpreduce_add_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -875,7 +875,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -890,7 +890,7 @@ ; ; RV64-LABEL: vpreduce_umax_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -909,7 +909,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -924,7 +924,7 @@ ; ; RV64-LABEL: vpreduce_smax_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -943,7 +943,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -958,7 +958,7 @@ ; ; RV64-LABEL: vpreduce_umin_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -977,7 +977,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -992,7 +992,7 @@ ; ; RV64-LABEL: vpreduce_smin_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -1011,7 +1011,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1026,7 +1026,7 @@ ; ; RV64-LABEL: vpreduce_and_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t @@ -1045,7 +1045,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1060,7 +1060,7 @@ ; ; RV64-LABEL: vpreduce_or_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t @@ -1079,7 +1079,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1094,7 +1094,7 @@ ; ; RV64-LABEL: vpreduce_xor_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -1113,7 +1113,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1128,7 +1128,7 @@ ; ; RV64-LABEL: vpreduce_add_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t @@ -1147,7 +1147,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1162,7 +1162,7 @@ ; ; RV64-LABEL: vpreduce_umax_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t @@ -1181,7 +1181,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1196,7 +1196,7 @@ ; ; RV64-LABEL: vpreduce_smax_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t @@ -1215,7 +1215,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1230,7 +1230,7 @@ ; ; RV64-LABEL: vpreduce_umin_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t @@ -1249,7 +1249,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1264,7 +1264,7 @@ ; ; RV64-LABEL: vpreduce_smin_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t @@ -1283,7 +1283,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1298,7 +1298,7 @@ ; ; RV64-LABEL: vpreduce_and_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t @@ -1317,7 +1317,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1332,7 +1332,7 @@ ; ; RV64-LABEL: vpreduce_or_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t @@ -1351,7 +1351,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1366,7 +1366,7 @@ ; ; RV64-LABEL: vpreduce_xor_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -41,7 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -59,7 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -77,7 +77,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -96,7 +96,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -115,7 +115,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 @@ -134,7 +134,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 @@ -156,7 +156,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -206,7 +206,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -224,7 +224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -242,7 +242,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -261,7 +261,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 @@ -280,7 +280,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 @@ -302,7 +302,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -352,7 +352,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -370,7 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 @@ -407,7 +407,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 @@ -429,7 +429,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 @@ -471,7 +471,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v9 @@ -486,7 +486,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, 0 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v9 @@ -504,7 +504,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v10 @@ -519,7 +519,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, 0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v10 @@ -537,7 +537,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, 0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v12 @@ -552,7 +552,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, 0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v12 @@ -570,7 +570,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v16 @@ -585,7 +585,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v16 @@ -606,7 +606,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vadd.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v16 @@ -624,7 +624,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vadd.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v16 @@ -651,7 +651,7 @@ ; RV32-NEXT: vadd.vv v16, v24, v16 ; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: vadd.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredsum.vs v8, v8, v16 @@ -675,7 +675,7 @@ ; RV64-NEXT: vadd.vv v16, v24, v16 ; RV64-NEXT: vadd.vv v8, v8, v0 ; RV64-NEXT: vadd.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v16 @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -743,7 +743,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -761,7 +761,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -780,7 +780,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 @@ -799,7 +799,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 @@ -818,7 +818,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 @@ -840,7 +840,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 @@ -872,7 +872,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -890,7 +890,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -908,7 +908,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -926,7 +926,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 @@ -945,7 +945,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 @@ -964,7 +964,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 @@ -986,7 +986,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 @@ -1018,7 +1018,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -1036,7 +1036,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -1054,7 +1054,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 @@ -1072,7 +1072,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 @@ -1091,7 +1091,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 @@ -1113,7 +1113,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v16 @@ -1155,7 +1155,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, -1 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v9 @@ -1170,7 +1170,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, -1 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v9 @@ -1188,7 +1188,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, -1 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v10 @@ -1203,7 +1203,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, -1 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v10 @@ -1221,7 +1221,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, -1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v12 @@ -1236,7 +1236,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v12 @@ -1254,7 +1254,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 @@ -1269,7 +1269,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 @@ -1290,7 +1290,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 @@ -1308,7 +1308,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vand.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 @@ -1335,7 +1335,7 @@ ; RV32-NEXT: vand.vv v16, v24, v16 ; RV32-NEXT: vand.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredand.vs v8, v8, v16 @@ -1359,7 +1359,7 @@ ; RV64-NEXT: vand.vv v16, v24, v16 ; RV64-NEXT: vand.vv v8, v8, v0 ; RV64-NEXT: vand.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredand.vs v8, v8, v16 @@ -1391,7 +1391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1409,7 +1409,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1427,7 +1427,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1445,7 +1445,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1464,7 +1464,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -1483,7 +1483,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 @@ -1502,7 +1502,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 @@ -1524,7 +1524,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 @@ -1556,7 +1556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1574,7 +1574,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1592,7 +1592,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1610,7 +1610,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -1629,7 +1629,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 @@ -1648,7 +1648,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 @@ -1670,7 +1670,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 @@ -1702,7 +1702,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1720,7 +1720,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -1738,7 +1738,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -1756,7 +1756,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 @@ -1775,7 +1775,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 @@ -1797,7 +1797,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 @@ -1839,7 +1839,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v9 @@ -1854,7 +1854,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, 0 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v9 @@ -1872,7 +1872,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v10 @@ -1887,7 +1887,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, 0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v10 @@ -1905,7 +1905,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, 0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v12 @@ -1920,7 +1920,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, 0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v12 @@ -1938,7 +1938,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v16 @@ -1953,7 +1953,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v16 @@ -1974,7 +1974,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v16 @@ -1992,7 +1992,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v16 @@ -2019,7 +2019,7 @@ ; RV32-NEXT: vor.vv v16, v24, v16 ; RV32-NEXT: vor.vv v8, v8, v0 ; RV32-NEXT: vor.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredor.vs v8, v8, v16 @@ -2043,7 +2043,7 @@ ; RV64-NEXT: vor.vv v16, v24, v16 ; RV64-NEXT: vor.vv v8, v8, v0 ; RV64-NEXT: vor.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v16 @@ -2075,7 +2075,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2093,7 +2093,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2111,7 +2111,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2129,7 +2129,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2148,7 +2148,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -2167,7 +2167,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 @@ -2186,7 +2186,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 @@ -2208,7 +2208,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 @@ -2240,7 +2240,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2258,7 +2258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2276,7 +2276,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2294,7 +2294,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -2313,7 +2313,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 @@ -2332,7 +2332,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 @@ -2354,7 +2354,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 @@ -2386,7 +2386,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2404,7 +2404,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -2422,7 +2422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -2440,7 +2440,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 @@ -2459,7 +2459,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 @@ -2481,7 +2481,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 @@ -2523,7 +2523,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v9 @@ -2538,7 +2538,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, 0 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v9 @@ -2556,7 +2556,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v10 @@ -2571,7 +2571,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, 0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v10 @@ -2589,7 +2589,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, 0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v12 @@ -2604,7 +2604,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, 0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v12 @@ -2622,7 +2622,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v16 @@ -2637,7 +2637,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v16 @@ -2658,7 +2658,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vxor.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v16 @@ -2676,7 +2676,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vxor.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v16 @@ -2703,7 +2703,7 @@ ; RV32-NEXT: vxor.vv v16, v24, v16 ; RV32-NEXT: vxor.vv v8, v8, v0 ; RV32-NEXT: vxor.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredxor.vs v8, v8, v16 @@ -2727,7 +2727,7 @@ ; RV64-NEXT: vxor.vv v16, v24, v16 ; RV64-NEXT: vxor.vv v8, v8, v0 ; RV64-NEXT: vxor.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v16 @@ -2760,7 +2760,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -2779,7 +2779,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -2798,7 +2798,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -2817,7 +2817,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -2837,7 +2837,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 @@ -2857,7 +2857,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v12 @@ -2877,7 +2877,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v16 @@ -2900,7 +2900,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v16 @@ -2934,7 +2934,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 @@ -2947,7 +2947,7 @@ ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 @@ -2967,7 +2967,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 @@ -2980,7 +2980,7 @@ ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 @@ -3000,7 +3000,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 @@ -3013,7 +3013,7 @@ ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 @@ -3033,7 +3033,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v10 @@ -3046,7 +3046,7 @@ ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v10 @@ -3067,7 +3067,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v12, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 @@ -3081,7 +3081,7 @@ ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v12 @@ -3102,7 +3102,7 @@ ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 @@ -3116,7 +3116,7 @@ ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 @@ -3140,7 +3140,7 @@ ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 @@ -3157,7 +3157,7 @@ ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 @@ -3191,7 +3191,7 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 @@ -3204,7 +3204,7 @@ ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 @@ -3224,7 +3224,7 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v9 @@ -3237,7 +3237,7 @@ ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 @@ -3257,7 +3257,7 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v10 @@ -3270,7 +3270,7 @@ ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v10 @@ -3290,7 +3290,7 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v12, a0 ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v12 @@ -3303,7 +3303,7 @@ ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v12 @@ -3324,7 +3324,7 @@ ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 @@ -3338,7 +3338,7 @@ ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 @@ -3362,7 +3362,7 @@ ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v16, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vredmin.vs v8, v8, v16 @@ -3379,7 +3379,7 @@ ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 @@ -3428,7 +3428,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu @@ -3447,7 +3447,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v9 @@ -3472,7 +3472,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -3491,7 +3491,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v10 @@ -3516,7 +3516,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu @@ -3535,7 +3535,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v12 @@ -3560,7 +3560,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -3579,7 +3579,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 @@ -3607,7 +3607,7 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: vmin.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -3629,7 +3629,7 @@ ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 @@ -3662,7 +3662,7 @@ ; RV32-NEXT: vmin.vv v24, v0, v24 ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: vmin.vv v8, v8, v24 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -3690,7 +3690,7 @@ ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmin.vs v8, v8, v16 @@ -3723,7 +3723,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -3742,7 +3742,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -3761,7 +3761,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -3780,7 +3780,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -3800,7 +3800,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 @@ -3820,7 +3820,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 @@ -3840,7 +3840,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 @@ -3863,7 +3863,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 @@ -3896,7 +3896,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -3915,7 +3915,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -3934,7 +3934,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -3953,7 +3953,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 @@ -3973,7 +3973,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 @@ -3993,7 +3993,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 @@ -4016,7 +4016,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 @@ -4049,7 +4049,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -4068,7 +4068,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -4087,7 +4087,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 @@ -4106,7 +4106,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 @@ -4126,7 +4126,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 @@ -4149,7 +4149,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v16 @@ -4196,7 +4196,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu @@ -4215,7 +4215,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v9 @@ -4238,7 +4238,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu @@ -4257,7 +4257,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v10 @@ -4280,7 +4280,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu @@ -4299,7 +4299,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v12 @@ -4322,7 +4322,7 @@ ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -4341,7 +4341,7 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v16 @@ -4367,7 +4367,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: vmax.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -4389,7 +4389,7 @@ ; RV64-NEXT: vmax.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v16 @@ -4420,7 +4420,7 @@ ; RV32-NEXT: vmax.vv v24, v0, v24 ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: vmax.vv v8, v8, v24 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu @@ -4448,7 +4448,7 @@ ; RV64-NEXT: vmax.vv v8, v8, v16 ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmax.vs v8, v8, v16 @@ -4480,7 +4480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4498,7 +4498,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4516,7 +4516,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4534,7 +4534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4553,7 +4553,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 @@ -4572,7 +4572,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 @@ -4591,7 +4591,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 @@ -4613,7 +4613,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 @@ -4645,7 +4645,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4663,7 +4663,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4681,7 +4681,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4699,7 +4699,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 @@ -4718,7 +4718,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 @@ -4737,7 +4737,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 @@ -4759,7 +4759,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 @@ -4791,7 +4791,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4809,7 +4809,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -4827,7 +4827,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 @@ -4845,7 +4845,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 @@ -4864,7 +4864,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 @@ -4886,7 +4886,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, -1 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v16 @@ -4928,7 +4928,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, -1 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v9 @@ -4943,7 +4943,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, -1 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v9 @@ -4961,7 +4961,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, -1 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v10 @@ -4976,7 +4976,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, -1 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v10 @@ -4994,7 +4994,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, -1 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v12 @@ -5009,7 +5009,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v12 @@ -5027,7 +5027,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 @@ -5042,7 +5042,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 @@ -5063,7 +5063,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vminu.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 @@ -5081,7 +5081,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vminu.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 @@ -5108,7 +5108,7 @@ ; RV32-NEXT: vminu.vv v16, v24, v16 ; RV32-NEXT: vminu.vv v8, v8, v0 ; RV32-NEXT: vminu.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredminu.vs v8, v8, v16 @@ -5132,7 +5132,7 @@ ; RV64-NEXT: vminu.vv v16, v24, v16 ; RV64-NEXT: vminu.vv v8, v8, v0 ; RV64-NEXT: vminu.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, -1 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredminu.vs v8, v8, v16 @@ -5164,7 +5164,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5182,7 +5182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5200,7 +5200,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5218,7 +5218,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5237,7 +5237,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -5256,7 +5256,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 @@ -5275,7 +5275,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 @@ -5297,7 +5297,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 @@ -5329,7 +5329,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5347,7 +5347,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5365,7 +5365,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5383,7 +5383,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -5402,7 +5402,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 @@ -5421,7 +5421,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 @@ -5443,7 +5443,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 @@ -5475,7 +5475,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5493,7 +5493,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -5511,7 +5511,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -5529,7 +5529,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 @@ -5548,7 +5548,7 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 @@ -5570,7 +5570,7 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 @@ -5612,7 +5612,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v9 @@ -5627,7 +5627,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v9, 0 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v9 @@ -5645,7 +5645,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v10 @@ -5660,7 +5660,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v10, 0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v10 @@ -5678,7 +5678,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v12, 0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v12 @@ -5693,7 +5693,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v12, 0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v12 @@ -5711,7 +5711,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v16 @@ -5726,7 +5726,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v16 @@ -5747,7 +5747,7 @@ ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vmaxu.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v16 @@ -5765,7 +5765,7 @@ ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmaxu.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v16 @@ -5792,7 +5792,7 @@ ; RV32-NEXT: vmaxu.vv v16, v24, v16 ; RV32-NEXT: vmaxu.vv v8, v8, v0 ; RV32-NEXT: vmaxu.vv v8, v8, v16 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vredmaxu.vs v8, v8, v16 @@ -5816,7 +5816,7 @@ ; RV64-NEXT: vmaxu.vv v16, v24, v16 ; RV64-NEXT: vmaxu.vv v8, v8, v0 ; RV64-NEXT: vmaxu.vv v8, v8, v16 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -10,9 +10,9 @@ ; CHECK-LABEL: vreduce_fadd_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -25,7 +25,7 @@ define half @vreduce_ord_fadd_nxv1f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -41,9 +41,9 @@ ; CHECK-LABEL: vreduce_fadd_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -56,7 +56,7 @@ define half @vreduce_ord_fadd_nxv2f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -72,9 +72,10 @@ ; CHECK-LABEL: vreduce_fadd_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 @@ -86,8 +87,9 @@ define half @vreduce_ord_fadd_nxv4f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -101,9 +103,9 @@ ; CHECK-LABEL: vreduce_fadd_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -116,7 +118,7 @@ define float @vreduce_ord_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -132,9 +134,10 @@ ; CHECK-LABEL: vreduce_fadd_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 @@ -146,8 +149,9 @@ define float @vreduce_ord_fadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -161,9 +165,9 @@ ; CHECK-LABEL: vreduce_fadd_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -176,7 +180,7 @@ define float @vreduce_ord_fadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -192,9 +196,10 @@ ; CHECK-LABEL: vreduce_fadd_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 @@ -206,8 +211,9 @@ define double @vreduce_ord_fadd_nxv1f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -221,9 +227,9 @@ ; CHECK-LABEL: vreduce_fadd_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI14_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -236,7 +242,7 @@ define double @vreduce_ord_fadd_nxv2f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -252,9 +258,9 @@ ; CHECK-LABEL: vreduce_fadd_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI16_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI16_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI16_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -267,7 +273,7 @@ define double @vreduce_ord_fadd_nxv4f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 @@ -283,9 +289,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI18_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -298,9 +304,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -313,9 +319,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f16_nonans_noinfs: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI20_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -330,9 +336,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI21_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI21_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI21_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -347,9 +353,10 @@ ; CHECK-LABEL: vreduce_fmin_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI22_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI22_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -362,12 +369,12 @@ define half @vreduce_fmin_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI23_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI23_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI23_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI23_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v16, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -382,9 +389,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI24_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -397,9 +404,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f32_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI25_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI25_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI25_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -412,9 +419,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f32_nonans_noinfs: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI26_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI26_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI26_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -429,9 +436,10 @@ ; CHECK-LABEL: vreduce_fmin_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI27_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI27_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI27_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -445,9 +453,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI28_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI28_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI28_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -461,12 +469,12 @@ define float @vreduce_fmin_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI29_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI29_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI29_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI29_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v16, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -481,9 +489,10 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI30_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI30_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI30_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -495,9 +504,10 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f64_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI31_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI31_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI31_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -509,9 +519,10 @@ ; CHECK-LABEL: vreduce_fmin_nxv1f64_nonans_noinfs: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI32_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI32_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -525,9 +536,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI33_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI33_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI33_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -542,9 +553,9 @@ ; CHECK-LABEL: vreduce_fmin_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI34_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI34_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI34_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -558,12 +569,12 @@ define double @vreduce_fmin_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI35_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI35_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI35_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI35_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -578,9 +589,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI36_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -593,9 +604,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI37_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI37_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI37_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -608,9 +619,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f16_nonans_noinfs: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI38_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -625,9 +636,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI39_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI39_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI39_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -642,9 +653,10 @@ ; CHECK-LABEL: vreduce_fmax_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI40_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI40_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -657,12 +669,12 @@ define half @vreduce_fmax_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI41_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI41_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI41_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI41_0) +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v16, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -677,9 +689,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI42_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -692,9 +704,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f32_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI43_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI43_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI43_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -707,9 +719,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f32_nonans_noinfs: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI44_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI44_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI44_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -724,9 +736,10 @@ ; CHECK-LABEL: vreduce_fmax_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI45_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI45_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI45_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -740,9 +753,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI46_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI46_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -756,12 +769,12 @@ define float @vreduce_fmax_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI47_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI47_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI47_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI47_0) +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v16, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -776,9 +789,10 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI48_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI48_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI48_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -790,9 +804,10 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f64_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI49_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI49_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI49_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -804,9 +819,10 @@ ; CHECK-LABEL: vreduce_fmax_nxv1f64_nonans_noinfs: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI50_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI50_0)(a0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI50_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, ft0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -820,9 +836,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI51_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI51_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI51_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -837,9 +853,9 @@ ; CHECK-LABEL: vreduce_fmax_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI52_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI52_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, ft0 +; CHECK-NEXT: addi a0, a0, %lo(.LCPI52_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -853,12 +869,12 @@ define double @vreduce_fmax_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI53_0) -; CHECK-NEXT: fld ft0, %lo(.LCPI53_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: lui a0, %hi(.LCPI53_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI53_0) +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -9,7 +9,7 @@ define half @vpreduce_fadd_nxv1f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -22,7 +22,7 @@ define half @vpreduce_ord_fadd_nxv1f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -37,7 +37,7 @@ define half @vpreduce_fadd_nxv2f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -50,7 +50,7 @@ define half @vpreduce_ord_fadd_nxv2f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -65,7 +65,7 @@ define half @vpreduce_fadd_nxv4f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -78,7 +78,7 @@ define half @vpreduce_ord_fadd_nxv4f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -93,7 +93,7 @@ define float @vpreduce_fadd_nxv1f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -106,7 +106,7 @@ define float @vpreduce_ord_fadd_nxv1f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -121,7 +121,7 @@ define float @vpreduce_fadd_nxv2f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -134,7 +134,7 @@ define float @vpreduce_ord_fadd_nxv2f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -149,7 +149,7 @@ define float @vpreduce_fadd_nxv4f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t @@ -162,7 +162,7 @@ define float @vpreduce_ord_fadd_nxv4f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t @@ -177,7 +177,7 @@ define double @vpreduce_fadd_nxv1f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t @@ -190,7 +190,7 @@ define double @vpreduce_ord_fadd_nxv1f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t @@ -205,7 +205,7 @@ define double @vpreduce_fadd_nxv2f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t @@ -218,7 +218,7 @@ define double @vpreduce_ord_fadd_nxv2f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t @@ -233,7 +233,7 @@ define double @vpreduce_fadd_nxv4f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfredusum.vs v12, v8, v12, v0.t @@ -246,7 +246,7 @@ define double @vpreduce_ord_fadd_nxv4f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfredosum.vs v12, v8, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll @@ -6,7 +6,7 @@ define signext i8 @vreduce_add_nxv1i8( %v) { ; CHECK-LABEL: vreduce_add_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -21,7 +21,7 @@ define signext i8 @vreduce_umax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -37,7 +37,7 @@ ; CHECK-LABEL: vreduce_smax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -52,7 +52,7 @@ define signext i8 @vreduce_umin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -68,7 +68,7 @@ ; CHECK-LABEL: vreduce_smin_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -83,7 +83,7 @@ define signext i8 @vreduce_and_nxv1i8( %v) { ; CHECK-LABEL: vreduce_and_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -98,7 +98,7 @@ define signext i8 @vreduce_or_nxv1i8( %v) { ; CHECK-LABEL: vreduce_or_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -113,7 +113,7 @@ define signext i8 @vreduce_xor_nxv1i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -128,7 +128,7 @@ define signext i8 @vreduce_add_nxv2i8( %v) { ; CHECK-LABEL: vreduce_add_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -143,7 +143,7 @@ define signext i8 @vreduce_umax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -159,7 +159,7 @@ ; CHECK-LABEL: vreduce_smax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -174,7 +174,7 @@ define signext i8 @vreduce_umin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -190,7 +190,7 @@ ; CHECK-LABEL: vreduce_smin_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -205,7 +205,7 @@ define signext i8 @vreduce_and_nxv2i8( %v) { ; CHECK-LABEL: vreduce_and_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -220,7 +220,7 @@ define signext i8 @vreduce_or_nxv2i8( %v) { ; CHECK-LABEL: vreduce_or_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -235,7 +235,7 @@ define signext i8 @vreduce_xor_nxv2i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -250,7 +250,7 @@ define signext i8 @vreduce_add_nxv4i8( %v) { ; CHECK-LABEL: vreduce_add_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -265,7 +265,7 @@ define signext i8 @vreduce_umax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -281,7 +281,7 @@ ; CHECK-LABEL: vreduce_smax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -296,7 +296,7 @@ define signext i8 @vreduce_umin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -312,7 +312,7 @@ ; CHECK-LABEL: vreduce_smin_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -327,7 +327,7 @@ define signext i8 @vreduce_and_nxv4i8( %v) { ; CHECK-LABEL: vreduce_and_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -342,7 +342,7 @@ define signext i8 @vreduce_or_nxv4i8( %v) { ; CHECK-LABEL: vreduce_or_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -357,7 +357,7 @@ define signext i8 @vreduce_xor_nxv4i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -372,7 +372,7 @@ define signext i16 @vreduce_add_nxv1i16( %v) { ; CHECK-LABEL: vreduce_add_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -387,7 +387,7 @@ define signext i16 @vreduce_umax_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -403,7 +403,7 @@ ; CHECK-LABEL: vreduce_smax_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -418,7 +418,7 @@ define signext i16 @vreduce_umin_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -435,7 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -450,7 +450,7 @@ define signext i16 @vreduce_and_nxv1i16( %v) { ; CHECK-LABEL: vreduce_and_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -465,7 +465,7 @@ define signext i16 @vreduce_or_nxv1i16( %v) { ; CHECK-LABEL: vreduce_or_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -480,7 +480,7 @@ define signext i16 @vreduce_xor_nxv1i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -495,7 +495,7 @@ define signext i16 @vreduce_add_nxv2i16( %v) { ; CHECK-LABEL: vreduce_add_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -510,7 +510,7 @@ define signext i16 @vreduce_umax_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -526,7 +526,7 @@ ; CHECK-LABEL: vreduce_smax_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -541,7 +541,7 @@ define signext i16 @vreduce_umin_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -558,7 +558,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -573,7 +573,7 @@ define signext i16 @vreduce_and_nxv2i16( %v) { ; CHECK-LABEL: vreduce_and_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -588,7 +588,7 @@ define signext i16 @vreduce_or_nxv2i16( %v) { ; CHECK-LABEL: vreduce_or_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -603,7 +603,7 @@ define signext i16 @vreduce_xor_nxv2i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -618,8 +618,9 @@ define signext i16 @vreduce_add_nxv4i16( %v) { ; CHECK-LABEL: vreduce_add_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -632,8 +633,9 @@ define signext i16 @vreduce_umax_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -647,8 +649,9 @@ ; CHECK-LABEL: vreduce_smax_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -661,8 +664,9 @@ define signext i16 @vreduce_umin_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -677,8 +681,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -691,8 +696,9 @@ define signext i16 @vreduce_and_nxv4i16( %v) { ; CHECK-LABEL: vreduce_and_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -705,8 +711,9 @@ define signext i16 @vreduce_or_nxv4i16( %v) { ; CHECK-LABEL: vreduce_or_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -719,8 +726,9 @@ define signext i16 @vreduce_xor_nxv4i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -733,7 +741,7 @@ define i32 @vreduce_add_nxv1i32( %v) { ; CHECK-LABEL: vreduce_add_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -748,7 +756,7 @@ define i32 @vreduce_umax_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -764,7 +772,7 @@ ; CHECK-LABEL: vreduce_smax_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -779,7 +787,7 @@ define i32 @vreduce_umin_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -796,7 +804,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -811,7 +819,7 @@ define i32 @vreduce_and_nxv1i32( %v) { ; CHECK-LABEL: vreduce_and_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -826,7 +834,7 @@ define i32 @vreduce_or_nxv1i32( %v) { ; CHECK-LABEL: vreduce_or_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -841,7 +849,7 @@ define i32 @vreduce_xor_nxv1i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -856,8 +864,9 @@ define i32 @vreduce_add_nxv2i32( %v) { ; CHECK-LABEL: vreduce_add_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -870,8 +879,9 @@ define i32 @vreduce_umax_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -885,8 +895,9 @@ ; CHECK-LABEL: vreduce_smax_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -899,8 +910,9 @@ define i32 @vreduce_umin_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -915,8 +927,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -929,8 +942,9 @@ define i32 @vreduce_and_nxv2i32( %v) { ; CHECK-LABEL: vreduce_and_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -943,8 +957,9 @@ define i32 @vreduce_or_nxv2i32( %v) { ; CHECK-LABEL: vreduce_or_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -957,8 +972,9 @@ define i32 @vreduce_xor_nxv2i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -971,7 +987,7 @@ define i32 @vreduce_add_nxv4i32( %v) { ; CHECK-LABEL: vreduce_add_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -986,7 +1002,7 @@ define i32 @vreduce_umax_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -1002,7 +1018,7 @@ ; CHECK-LABEL: vreduce_smax_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 @@ -1017,7 +1033,7 @@ define i32 @vreduce_umin_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 @@ -1034,7 +1050,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 @@ -1049,7 +1065,7 @@ define i32 @vreduce_and_nxv4i32( %v) { ; CHECK-LABEL: vreduce_and_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 @@ -1064,7 +1080,7 @@ define i32 @vreduce_or_nxv4i32( %v) { ; CHECK-LABEL: vreduce_or_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -1079,7 +1095,7 @@ define i32 @vreduce_xor_nxv4i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -1094,8 +1110,9 @@ define i64 @vreduce_add_nxv1i64( %v) { ; CHECK-LABEL: vreduce_add_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1112,8 +1129,9 @@ define i64 @vreduce_umax_nxv1i64( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1135,9 +1153,10 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: sw a0, 12(sp) ; CHECK-NEXT: sw zero, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v9, (a0), zero +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1155,8 +1174,9 @@ define i64 @vreduce_umin_nxv1i64( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1180,9 +1200,10 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v9, (a0), zero +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1200,8 +1221,9 @@ define i64 @vreduce_and_nxv1i64( %v) { ; CHECK-LABEL: vreduce_and_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1218,8 +1240,9 @@ define i64 @vreduce_or_nxv1i64( %v) { ; CHECK-LABEL: vreduce_or_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1236,8 +1259,9 @@ define i64 @vreduce_xor_nxv1i64( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -1254,7 +1278,7 @@ define i64 @vreduce_add_nxv2i64( %v) { ; CHECK-LABEL: vreduce_add_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -1273,7 +1297,7 @@ define i64 @vreduce_umax_nxv2i64( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -1297,7 +1321,7 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: sw a0, 12(sp) ; CHECK-NEXT: sw zero, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu @@ -1318,7 +1342,7 @@ define i64 @vreduce_umin_nxv2i64( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 @@ -1344,7 +1368,7 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu @@ -1365,7 +1389,7 @@ define i64 @vreduce_and_nxv2i64( %v) { ; CHECK-LABEL: vreduce_and_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 @@ -1384,7 +1408,7 @@ define i64 @vreduce_or_nxv2i64( %v) { ; CHECK-LABEL: vreduce_or_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -1403,7 +1427,7 @@ define i64 @vreduce_xor_nxv2i64( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -1422,7 +1446,7 @@ define i64 @vreduce_add_nxv4i64( %v) { ; CHECK-LABEL: vreduce_add_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 @@ -1441,7 +1465,7 @@ define i64 @vreduce_umax_nxv4i64( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 @@ -1465,7 +1489,7 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: sw a0, 12(sp) ; CHECK-NEXT: sw zero, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu @@ -1486,7 +1510,7 @@ define i64 @vreduce_umin_nxv4i64( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 @@ -1512,7 +1536,7 @@ ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu @@ -1533,7 +1557,7 @@ define i64 @vreduce_and_nxv4i64( %v) { ; CHECK-LABEL: vreduce_and_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 @@ -1552,7 +1576,7 @@ define i64 @vreduce_or_nxv4i64( %v) { ; CHECK-LABEL: vreduce_or_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 @@ -1571,7 +1595,7 @@ define i64 @vreduce_xor_nxv4i64( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll @@ -6,7 +6,7 @@ define signext i8 @vreduce_add_nxv1i8( %v) { ; CHECK-LABEL: vreduce_add_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -21,7 +21,7 @@ define signext i8 @vreduce_umax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -37,7 +37,7 @@ ; CHECK-LABEL: vreduce_smax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -52,7 +52,7 @@ define signext i8 @vreduce_umin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -68,7 +68,7 @@ ; CHECK-LABEL: vreduce_smin_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -83,7 +83,7 @@ define signext i8 @vreduce_and_nxv1i8( %v) { ; CHECK-LABEL: vreduce_and_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -98,7 +98,7 @@ define signext i8 @vreduce_or_nxv1i8( %v) { ; CHECK-LABEL: vreduce_or_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -113,7 +113,7 @@ define signext i8 @vreduce_xor_nxv1i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -128,7 +128,7 @@ define signext i8 @vreduce_add_nxv2i8( %v) { ; CHECK-LABEL: vreduce_add_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -143,7 +143,7 @@ define signext i8 @vreduce_umax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -159,7 +159,7 @@ ; CHECK-LABEL: vreduce_smax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -174,7 +174,7 @@ define signext i8 @vreduce_umin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -190,7 +190,7 @@ ; CHECK-LABEL: vreduce_smin_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -205,7 +205,7 @@ define signext i8 @vreduce_and_nxv2i8( %v) { ; CHECK-LABEL: vreduce_and_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -220,7 +220,7 @@ define signext i8 @vreduce_or_nxv2i8( %v) { ; CHECK-LABEL: vreduce_or_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -235,7 +235,7 @@ define signext i8 @vreduce_xor_nxv2i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -250,7 +250,7 @@ define signext i8 @vreduce_add_nxv4i8( %v) { ; CHECK-LABEL: vreduce_add_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -265,7 +265,7 @@ define signext i8 @vreduce_umax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -281,7 +281,7 @@ ; CHECK-LABEL: vreduce_smax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -296,7 +296,7 @@ define signext i8 @vreduce_umin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -312,7 +312,7 @@ ; CHECK-LABEL: vreduce_smin_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -327,7 +327,7 @@ define signext i8 @vreduce_and_nxv4i8( %v) { ; CHECK-LABEL: vreduce_and_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -342,7 +342,7 @@ define signext i8 @vreduce_or_nxv4i8( %v) { ; CHECK-LABEL: vreduce_or_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -357,7 +357,7 @@ define signext i8 @vreduce_xor_nxv4i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -372,7 +372,7 @@ define signext i16 @vreduce_add_nxv1i16( %v) { ; CHECK-LABEL: vreduce_add_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -387,7 +387,7 @@ define signext i16 @vreduce_umax_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -403,7 +403,7 @@ ; CHECK-LABEL: vreduce_smax_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -418,7 +418,7 @@ define signext i16 @vreduce_umin_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -435,7 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addiw a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -450,7 +450,7 @@ define signext i16 @vreduce_and_nxv1i16( %v) { ; CHECK-LABEL: vreduce_and_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -465,7 +465,7 @@ define signext i16 @vreduce_or_nxv1i16( %v) { ; CHECK-LABEL: vreduce_or_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -480,7 +480,7 @@ define signext i16 @vreduce_xor_nxv1i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -495,7 +495,7 @@ define signext i16 @vreduce_add_nxv2i16( %v) { ; CHECK-LABEL: vreduce_add_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -510,7 +510,7 @@ define signext i16 @vreduce_umax_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -526,7 +526,7 @@ ; CHECK-LABEL: vreduce_smax_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -541,7 +541,7 @@ define signext i16 @vreduce_umin_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -558,7 +558,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addiw a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -573,7 +573,7 @@ define signext i16 @vreduce_and_nxv2i16( %v) { ; CHECK-LABEL: vreduce_and_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -588,7 +588,7 @@ define signext i16 @vreduce_or_nxv2i16( %v) { ; CHECK-LABEL: vreduce_or_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -603,7 +603,7 @@ define signext i16 @vreduce_xor_nxv2i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -618,8 +618,9 @@ define signext i16 @vreduce_add_nxv4i16( %v) { ; CHECK-LABEL: vreduce_add_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -632,8 +633,9 @@ define signext i16 @vreduce_umax_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -647,8 +649,9 @@ ; CHECK-LABEL: vreduce_smax_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -661,8 +664,9 @@ define signext i16 @vreduce_umin_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -677,8 +681,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addiw a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -691,8 +696,9 @@ define signext i16 @vreduce_and_nxv4i16( %v) { ; CHECK-LABEL: vreduce_and_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -705,8 +711,9 @@ define signext i16 @vreduce_or_nxv4i16( %v) { ; CHECK-LABEL: vreduce_or_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -719,8 +726,9 @@ define signext i16 @vreduce_xor_nxv4i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -733,7 +741,7 @@ define signext i32 @vreduce_add_nxv1i32( %v) { ; CHECK-LABEL: vreduce_add_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -748,7 +756,7 @@ define signext i32 @vreduce_umax_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -764,7 +772,7 @@ ; CHECK-LABEL: vreduce_smax_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 @@ -779,7 +787,7 @@ define signext i32 @vreduce_umin_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 @@ -796,7 +804,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addiw a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 @@ -811,7 +819,7 @@ define signext i32 @vreduce_and_nxv1i32( %v) { ; CHECK-LABEL: vreduce_and_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 @@ -826,7 +834,7 @@ define signext i32 @vreduce_or_nxv1i32( %v) { ; CHECK-LABEL: vreduce_or_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -841,7 +849,7 @@ define signext i32 @vreduce_xor_nxv1i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -856,8 +864,9 @@ define signext i32 @vreduce_add_nxv2i32( %v) { ; CHECK-LABEL: vreduce_add_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -870,8 +879,9 @@ define signext i32 @vreduce_umax_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -885,8 +895,9 @@ ; CHECK-LABEL: vreduce_smax_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -899,8 +910,9 @@ define signext i32 @vreduce_umin_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -915,8 +927,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addiw a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -929,8 +942,9 @@ define signext i32 @vreduce_and_nxv2i32( %v) { ; CHECK-LABEL: vreduce_and_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -943,8 +957,9 @@ define signext i32 @vreduce_or_nxv2i32( %v) { ; CHECK-LABEL: vreduce_or_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -957,8 +972,9 @@ define signext i32 @vreduce_xor_nxv2i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -971,7 +987,7 @@ define signext i32 @vreduce_add_nxv4i32( %v) { ; CHECK-LABEL: vreduce_add_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -986,7 +1002,7 @@ define signext i32 @vreduce_umax_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -1002,7 +1018,7 @@ ; CHECK-LABEL: vreduce_smax_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 @@ -1017,7 +1033,7 @@ define signext i32 @vreduce_umin_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 @@ -1034,7 +1050,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: addiw a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 @@ -1049,7 +1065,7 @@ define signext i32 @vreduce_and_nxv4i32( %v) { ; CHECK-LABEL: vreduce_and_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 @@ -1064,7 +1080,7 @@ define signext i32 @vreduce_or_nxv4i32( %v) { ; CHECK-LABEL: vreduce_or_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -1079,7 +1095,7 @@ define signext i32 @vreduce_xor_nxv4i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -1094,8 +1110,9 @@ define i64 @vreduce_add_nxv1i64( %v) { ; CHECK-LABEL: vreduce_add_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1108,8 +1125,9 @@ define i64 @vreduce_umax_nxv1i64( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1124,8 +1142,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: slli a0, a0, 63 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1138,8 +1157,9 @@ define i64 @vreduce_umin_nxv1i64( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1154,8 +1174,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1168,8 +1189,9 @@ define i64 @vreduce_and_nxv1i64( %v) { ; CHECK-LABEL: vreduce_and_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, -1 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1182,8 +1204,9 @@ define i64 @vreduce_or_nxv1i64( %v) { ; CHECK-LABEL: vreduce_or_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1196,8 +1219,9 @@ define i64 @vreduce_xor_nxv1i64( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1210,7 +1234,7 @@ define i64 @vreduce_add_nxv2i64( %v) { ; CHECK-LABEL: vreduce_add_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -1225,7 +1249,7 @@ define i64 @vreduce_umax_nxv2i64( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -1242,7 +1266,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: slli a0, a0, 63 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v10 @@ -1257,7 +1281,7 @@ define i64 @vreduce_umin_nxv2i64( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v10 @@ -1274,7 +1298,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v10 @@ -1289,7 +1313,7 @@ define i64 @vreduce_and_nxv2i64( %v) { ; CHECK-LABEL: vreduce_and_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v10 @@ -1304,7 +1328,7 @@ define i64 @vreduce_or_nxv2i64( %v) { ; CHECK-LABEL: vreduce_or_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -1319,7 +1343,7 @@ define i64 @vreduce_xor_nxv2i64( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -1334,7 +1358,7 @@ define i64 @vreduce_add_nxv4i64( %v) { ; CHECK-LABEL: vreduce_add_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 @@ -1349,7 +1373,7 @@ define i64 @vreduce_umax_nxv4i64( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 @@ -1366,7 +1390,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: slli a0, a0, 63 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmax.vs v8, v8, v12 @@ -1381,7 +1405,7 @@ define i64 @vreduce_umin_nxv4i64( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredminu.vs v8, v8, v12 @@ -1398,7 +1422,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmin.vs v8, v8, v12 @@ -1413,7 +1437,7 @@ define i64 @vreduce_and_nxv4i64( %v) { ; CHECK-LABEL: vreduce_and_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, -1 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredand.vs v8, v8, v12 @@ -1428,7 +1452,7 @@ define i64 @vreduce_or_nxv4i64( %v) { ; CHECK-LABEL: vreduce_or_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 @@ -1443,7 +1467,7 @@ define i64 @vreduce_xor_nxv4i64( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -9,7 +9,7 @@ define signext i8 @vpreduce_add_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -25,7 +25,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -40,7 +40,7 @@ define signext i8 @vpreduce_smax_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -56,7 +56,7 @@ ; CHECK-LABEL: vpreduce_umin_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -71,7 +71,7 @@ define signext i8 @vpreduce_smin_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -86,7 +86,7 @@ define signext i8 @vpreduce_and_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -101,7 +101,7 @@ define signext i8 @vpreduce_or_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -116,7 +116,7 @@ define signext i8 @vpreduce_xor_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -131,7 +131,7 @@ define signext i8 @vpreduce_add_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -147,7 +147,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -162,7 +162,7 @@ define signext i8 @vpreduce_smax_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -178,7 +178,7 @@ ; CHECK-LABEL: vpreduce_umin_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -193,7 +193,7 @@ define signext i8 @vpreduce_smin_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -208,7 +208,7 @@ define signext i8 @vpreduce_and_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -223,7 +223,7 @@ define signext i8 @vpreduce_or_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -238,7 +238,7 @@ define signext i8 @vpreduce_xor_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -253,7 +253,7 @@ define signext i8 @vpreduce_add_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -269,7 +269,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -284,7 +284,7 @@ define signext i8 @vpreduce_smax_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -300,7 +300,7 @@ ; CHECK-LABEL: vpreduce_umin_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -315,7 +315,7 @@ define signext i8 @vpreduce_smin_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -330,7 +330,7 @@ define signext i8 @vpreduce_and_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -345,7 +345,7 @@ define signext i8 @vpreduce_or_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -360,7 +360,7 @@ define signext i8 @vpreduce_xor_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -375,7 +375,7 @@ define signext i16 @vpreduce_add_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -393,7 +393,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -405,7 +405,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -420,7 +420,7 @@ define signext i16 @vpreduce_smax_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -438,7 +438,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -450,7 +450,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -465,7 +465,7 @@ define signext i16 @vpreduce_smin_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -480,7 +480,7 @@ define signext i16 @vpreduce_and_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -495,7 +495,7 @@ define signext i16 @vpreduce_or_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -510,7 +510,7 @@ define signext i16 @vpreduce_xor_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -525,7 +525,7 @@ define signext i16 @vpreduce_add_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -543,7 +543,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -555,7 +555,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -570,7 +570,7 @@ define signext i16 @vpreduce_smax_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -588,7 +588,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -600,7 +600,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -615,7 +615,7 @@ define signext i16 @vpreduce_smin_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -630,7 +630,7 @@ define signext i16 @vpreduce_and_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -645,7 +645,7 @@ define signext i16 @vpreduce_or_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -660,7 +660,7 @@ define signext i16 @vpreduce_xor_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -675,7 +675,7 @@ define signext i16 @vpreduce_add_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -693,7 +693,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -705,7 +705,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -720,7 +720,7 @@ define signext i16 @vpreduce_smax_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -738,7 +738,7 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 -; RV32-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -750,7 +750,7 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 -; RV64-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -765,7 +765,7 @@ define signext i16 @vpreduce_smin_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -780,7 +780,7 @@ define signext i16 @vpreduce_and_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -795,7 +795,7 @@ define signext i16 @vpreduce_or_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -810,7 +810,7 @@ define signext i16 @vpreduce_xor_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -825,7 +825,7 @@ define signext i32 @vpreduce_add_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -840,7 +840,7 @@ define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -851,7 +851,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -866,7 +866,7 @@ define signext i32 @vpreduce_smax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -881,7 +881,7 @@ define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -892,7 +892,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -907,7 +907,7 @@ define signext i32 @vpreduce_smin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -922,7 +922,7 @@ define signext i32 @vpreduce_and_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -937,7 +937,7 @@ define signext i32 @vpreduce_or_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -952,7 +952,7 @@ define signext i32 @vpreduce_xor_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -967,7 +967,7 @@ define signext i32 @vpreduce_add_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -982,7 +982,7 @@ define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -993,7 +993,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -1008,7 +1008,7 @@ define signext i32 @vpreduce_smax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -1023,7 +1023,7 @@ define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -1034,7 +1034,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -1049,7 +1049,7 @@ define signext i32 @vpreduce_smin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -1064,7 +1064,7 @@ define signext i32 @vpreduce_and_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t @@ -1079,7 +1079,7 @@ define signext i32 @vpreduce_or_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t @@ -1094,7 +1094,7 @@ define signext i32 @vpreduce_xor_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -1109,7 +1109,7 @@ define signext i32 @vpreduce_add_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredsum.vs v10, v8, v10, v0.t @@ -1124,7 +1124,7 @@ define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t @@ -1135,7 +1135,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t @@ -1150,7 +1150,7 @@ define signext i32 @vpreduce_smax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredmax.vs v10, v8, v10, v0.t @@ -1165,7 +1165,7 @@ define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t @@ -1176,7 +1176,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t @@ -1191,7 +1191,7 @@ define signext i32 @vpreduce_smin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredmin.vs v10, v8, v10, v0.t @@ -1206,7 +1206,7 @@ define signext i32 @vpreduce_and_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredand.vs v10, v8, v10, v0.t @@ -1221,7 +1221,7 @@ define signext i32 @vpreduce_or_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredor.vs v10, v8, v10, v0.t @@ -1236,7 +1236,7 @@ define signext i32 @vpreduce_xor_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredxor.vs v10, v8, v10, v0.t @@ -1255,7 +1255,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1270,7 +1270,7 @@ ; ; RV64-LABEL: vpreduce_add_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t @@ -1289,7 +1289,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1304,7 +1304,7 @@ ; ; RV64-LABEL: vpreduce_umax_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t @@ -1323,7 +1323,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1338,7 +1338,7 @@ ; ; RV64-LABEL: vpreduce_smax_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t @@ -1357,7 +1357,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1372,7 +1372,7 @@ ; ; RV64-LABEL: vpreduce_umin_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t @@ -1391,7 +1391,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1406,7 +1406,7 @@ ; ; RV64-LABEL: vpreduce_smin_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t @@ -1425,7 +1425,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1440,7 +1440,7 @@ ; ; RV64-LABEL: vpreduce_and_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t @@ -1459,7 +1459,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1474,7 +1474,7 @@ ; ; RV64-LABEL: vpreduce_or_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t @@ -1493,7 +1493,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu @@ -1508,7 +1508,7 @@ ; ; RV64-LABEL: vpreduce_xor_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t @@ -1527,7 +1527,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1542,7 +1542,7 @@ ; ; RV64-LABEL: vpreduce_add_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t @@ -1561,7 +1561,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1576,7 +1576,7 @@ ; ; RV64-LABEL: vpreduce_umax_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t @@ -1595,7 +1595,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1610,7 +1610,7 @@ ; ; RV64-LABEL: vpreduce_smax_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t @@ -1629,7 +1629,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1644,7 +1644,7 @@ ; ; RV64-LABEL: vpreduce_umin_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t @@ -1663,7 +1663,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1678,7 +1678,7 @@ ; ; RV64-LABEL: vpreduce_smin_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t @@ -1697,7 +1697,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1712,7 +1712,7 @@ ; ; RV64-LABEL: vpreduce_and_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t @@ -1731,7 +1731,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1746,7 +1746,7 @@ ; ; RV64-LABEL: vpreduce_or_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t @@ -1765,7 +1765,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu @@ -1780,7 +1780,7 @@ ; ; RV64-LABEL: vpreduce_xor_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t @@ -1799,7 +1799,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -1814,7 +1814,7 @@ ; ; RV64-LABEL: vpreduce_add_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredsum.vs v12, v8, v12, v0.t @@ -1833,7 +1833,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -1848,7 +1848,7 @@ ; ; RV64-LABEL: vpreduce_umax_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmaxu.vs v12, v8, v12, v0.t @@ -1867,7 +1867,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -1882,7 +1882,7 @@ ; ; RV64-LABEL: vpreduce_smax_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmax.vs v12, v8, v12, v0.t @@ -1901,7 +1901,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -1916,7 +1916,7 @@ ; ; RV64-LABEL: vpreduce_umin_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredminu.vs v12, v8, v12, v0.t @@ -1935,7 +1935,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -1950,7 +1950,7 @@ ; ; RV64-LABEL: vpreduce_smin_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmin.vs v12, v8, v12, v0.t @@ -1969,7 +1969,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -1984,7 +1984,7 @@ ; ; RV64-LABEL: vpreduce_and_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredand.vs v12, v8, v12, v0.t @@ -2003,7 +2003,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -2018,7 +2018,7 @@ ; ; RV64-LABEL: vpreduce_or_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredor.vs v12, v8, v12, v0.t @@ -2037,7 +2037,7 @@ ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu @@ -2052,7 +2052,7 @@ ; ; RV64-LABEL: vpreduce_xor_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredxor.vs v12, v8, v12, v0.t