diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1516,10 +1516,16 @@ return; } case ISD::SPLAT_VECTOR: + case RISCVISD::VMV_S_X_VL: + case RISCVISD::VFMV_S_F_VL: case RISCVISD::VMV_V_X_VL: case RISCVISD::VFMV_V_F_VL: { // Try to match splat of a scalar load to a strided load with stride of x0. - SDValue Src = Node->getOperand(0); + bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL || + Node->getOpcode() == RISCVISD::VFMV_S_F_VL; + if (IsScalarMove && !Node->getOperand(0).isUndef()) + break; + SDValue Src = IsScalarMove ? Node->getOperand(1) : Node->getOperand(0); auto *Ld = dyn_cast(Src); if (!Ld) break; @@ -1534,6 +1540,8 @@ SDValue VL; if (Node->getOpcode() == ISD::SPLAT_VECTOR) VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT); + else if (IsScalarMove) + selectVLOp(Node->getOperand(2), VL); else selectVLOp(Node->getOperand(1), VL); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2215,8 +2215,13 @@ static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { - if (VT.isFloatingPoint()) + if (VT.isFloatingPoint()) { + // If VL is 1, we could use vfmv.s.f. + if (isOneConstant(VL)) + return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, DAG.getUNDEF(VT), + Scalar, VL); return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL); + } MVT XLenVT = Subtarget.getXLenVT(); @@ -2229,6 +2234,12 @@ unsigned ExtOpc = isa(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar); + ConstantSDNode *Const = dyn_cast(Scalar); + // If VL is 1 and the scalar value won't benefit from immediate, we could + // use vmv.s.x. + if (isOneConstant(VL) && (isNullConstant(Scalar) || !Const)) + return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), Scalar, + VL); return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL); } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -152,10 +152,10 @@ define void @buildvec_dominant2_v4f32(<4 x float>* %x, float %f) { ; CHECK-LABEL: buildvec_dominant2_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: lui a1, %hi(.LCPI6_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI6_0) +; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v9, v8, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vpreduce_fadd_v2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -23,7 +23,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_v2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -38,7 +38,7 @@ ; CHECK-LABEL: vpreduce_fadd_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -51,7 +51,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -66,7 +66,7 @@ ; CHECK-LABEL: vpreduce_fadd_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -79,7 +79,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -94,7 +94,7 @@ ; CHECK-LABEL: vpreduce_fadd_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -107,7 +107,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -122,7 +122,7 @@ ; CHECK-LABEL: vpreduce_fadd_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -135,7 +135,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vpreduce_fadd_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 @@ -163,7 +163,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -23,7 +23,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -60,7 +60,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -97,7 +97,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -134,7 +134,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -171,7 +171,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -210,7 +210,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 +; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -249,7 +249,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -293,12 +293,12 @@ ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v24, fa0 +; CHECK-NEXT: vfmv.s.f v24, fa0 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: vfmv.s.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -329,7 +329,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -366,7 +366,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -403,7 +403,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -440,7 +440,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -477,7 +477,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 +; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -516,7 +516,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -560,12 +560,12 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v24, fa0 +; CHECK-NEXT: vfmv.s.f v24, fa0 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: vfmv.s.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -595,7 +595,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -668,7 +668,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -705,7 +705,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 +; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -742,7 +742,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -784,12 +784,12 @@ ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vle64.v v16, (a0) ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v24, fa0 +; CHECK-NEXT: vfmv.s.f v24, fa0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v16, ft0 +; CHECK-NEXT: vfmv.s.f v16, ft0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1414,8 +1414,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vpreduce_add_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -26,7 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -41,7 +41,7 @@ ; CHECK-LABEL: vpreduce_smax_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -72,7 +72,7 @@ ; CHECK-LABEL: vpreduce_smin_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -87,7 +87,7 @@ ; CHECK-LABEL: vpreduce_and_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -102,7 +102,7 @@ ; CHECK-LABEL: vpreduce_or_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -117,7 +117,7 @@ ; CHECK-LABEL: vpreduce_xor_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -132,7 +132,7 @@ ; CHECK-LABEL: vpreduce_add_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -148,7 +148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -163,7 +163,7 @@ ; CHECK-LABEL: vpreduce_smax_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -179,7 +179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -194,7 +194,7 @@ ; CHECK-LABEL: vpreduce_smin_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -209,7 +209,7 @@ ; CHECK-LABEL: vpreduce_and_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -224,7 +224,7 @@ ; CHECK-LABEL: vpreduce_or_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -239,7 +239,7 @@ ; CHECK-LABEL: vpreduce_xor_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -254,7 +254,7 @@ ; CHECK-LABEL: vpreduce_add_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -272,7 +272,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -284,7 +284,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -299,7 +299,7 @@ ; CHECK-LABEL: vpreduce_smax_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -317,7 +317,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -329,7 +329,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -344,7 +344,7 @@ ; CHECK-LABEL: vpreduce_smin_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -359,7 +359,7 @@ ; CHECK-LABEL: vpreduce_and_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -374,7 +374,7 @@ ; CHECK-LABEL: vpreduce_or_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -389,7 +389,7 @@ ; CHECK-LABEL: vpreduce_xor_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -404,7 +404,7 @@ ; CHECK-LABEL: vpreduce_add_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -422,7 +422,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -434,7 +434,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -449,7 +449,7 @@ ; CHECK-LABEL: vpreduce_smax_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -467,7 +467,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -479,7 +479,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -494,7 +494,7 @@ ; CHECK-LABEL: vpreduce_smin_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -509,7 +509,7 @@ ; CHECK-LABEL: vpreduce_and_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -524,7 +524,7 @@ ; CHECK-LABEL: vpreduce_or_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -539,7 +539,7 @@ ; CHECK-LABEL: vpreduce_xor_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -554,7 +554,7 @@ ; CHECK-LABEL: vpreduce_add_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -569,7 +569,7 @@ ; RV32-LABEL: vpreduce_umax_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -580,7 +580,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -595,7 +595,7 @@ ; CHECK-LABEL: vpreduce_smax_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -610,7 +610,7 @@ ; RV32-LABEL: vpreduce_umin_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -621,7 +621,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -636,7 +636,7 @@ ; CHECK-LABEL: vpreduce_smin_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -651,7 +651,7 @@ ; CHECK-LABEL: vpreduce_and_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -666,7 +666,7 @@ ; CHECK-LABEL: vpreduce_or_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -681,7 +681,7 @@ ; CHECK-LABEL: vpreduce_xor_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -696,7 +696,7 @@ ; CHECK-LABEL: vpreduce_add_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -711,7 +711,7 @@ ; RV32-LABEL: vpreduce_umax_v4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -722,7 +722,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -737,7 +737,7 @@ ; CHECK-LABEL: vpreduce_smax_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -752,7 +752,7 @@ ; RV32-LABEL: vpreduce_umin_v4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -763,7 +763,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -778,7 +778,7 @@ ; CHECK-LABEL: vpreduce_smin_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -793,7 +793,7 @@ ; CHECK-LABEL: vpreduce_and_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -808,7 +808,7 @@ ; CHECK-LABEL: vpreduce_or_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -823,7 +823,7 @@ ; CHECK-LABEL: vpreduce_xor_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -857,7 +857,7 @@ ; RV64-LABEL: vpreduce_add_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -891,7 +891,7 @@ ; RV64-LABEL: vpreduce_umax_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -925,7 +925,7 @@ ; RV64-LABEL: vpreduce_smax_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -959,7 +959,7 @@ ; RV64-LABEL: vpreduce_umin_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -993,7 +993,7 @@ ; RV64-LABEL: vpreduce_smin_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1027,7 +1027,7 @@ ; RV64-LABEL: vpreduce_and_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1061,7 +1061,7 @@ ; RV64-LABEL: vpreduce_or_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1095,7 +1095,7 @@ ; RV64-LABEL: vpreduce_xor_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1129,7 +1129,7 @@ ; RV64-LABEL: vpreduce_add_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1163,7 +1163,7 @@ ; RV64-LABEL: vpreduce_umax_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1197,7 +1197,7 @@ ; RV64-LABEL: vpreduce_smax_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1231,7 +1231,7 @@ ; RV64-LABEL: vpreduce_umin_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1265,7 +1265,7 @@ ; RV64-LABEL: vpreduce_smin_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1299,7 +1299,7 @@ ; RV64-LABEL: vpreduce_and_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1333,7 +1333,7 @@ ; RV64-LABEL: vpreduce_or_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1367,7 +1367,7 @@ ; RV64-LABEL: vpreduce_xor_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -24,7 +24,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -42,7 +42,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -60,7 +60,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -78,7 +78,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -97,7 +97,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -116,7 +116,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -135,7 +135,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -157,7 +157,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -189,7 +189,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -207,7 +207,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -225,7 +225,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -243,7 +243,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -262,7 +262,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -281,7 +281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -303,7 +303,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -335,7 +335,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -353,7 +353,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -371,7 +371,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -389,7 +389,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -408,7 +408,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -430,7 +430,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -487,7 +487,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v9, 0 +; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 @@ -520,7 +520,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v10, 0 +; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 @@ -553,7 +553,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v12, 0 +; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 @@ -586,7 +586,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 @@ -623,11 +623,11 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vadd.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; RV64-NEXT: vredsum.vs v8, v8, v16 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vredsum.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x @@ -676,7 +676,7 @@ ; RV64-NEXT: vadd.vv v8, v8, v0 ; RV64-NEXT: vadd.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredsum.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 @@ -1392,7 +1392,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1410,7 +1410,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1428,7 +1428,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1446,7 +1446,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1465,7 +1465,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1484,7 +1484,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1503,7 +1503,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1525,7 +1525,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1557,7 +1557,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1575,7 +1575,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1593,7 +1593,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1611,7 +1611,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1630,7 +1630,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1649,7 +1649,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1671,7 +1671,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1703,7 +1703,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1721,7 +1721,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1739,7 +1739,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1757,7 +1757,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1776,7 +1776,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1798,7 +1798,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1855,7 +1855,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v9, 0 +; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 @@ -1888,7 +1888,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v10, 0 +; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 @@ -1921,7 +1921,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v12, 0 +; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 @@ -1954,7 +1954,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 @@ -1991,11 +1991,11 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; RV64-NEXT: vredor.vs v8, v8, v16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vredor.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x @@ -2044,7 +2044,7 @@ ; RV64-NEXT: vor.vv v8, v8, v0 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 @@ -2076,7 +2076,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2094,7 +2094,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2112,7 +2112,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2130,7 +2130,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2149,7 +2149,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2168,7 +2168,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2187,7 +2187,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2209,7 +2209,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2241,7 +2241,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2259,7 +2259,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2277,7 +2277,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2295,7 +2295,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2314,7 +2314,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2333,7 +2333,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2355,7 +2355,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2387,7 +2387,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2405,7 +2405,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2423,7 +2423,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2441,7 +2441,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2460,7 +2460,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2482,7 +2482,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -2539,7 +2539,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v9, 0 +; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 @@ -2572,7 +2572,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v10, 0 +; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 @@ -2605,7 +2605,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v12, 0 +; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 @@ -2638,7 +2638,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 @@ -2675,11 +2675,11 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vxor.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; RV64-NEXT: vredxor.vs v8, v8, v16 +; RV64-NEXT: vxor.vv v8, v8, v16 +; RV64-NEXT: vredxor.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x @@ -2728,7 +2728,7 @@ ; RV64-NEXT: vxor.vv v8, v8, v0 ; RV64-NEXT: vxor.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredxor.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 @@ -5165,7 +5165,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5183,7 +5183,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5201,7 +5201,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5219,7 +5219,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5238,7 +5238,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5257,7 +5257,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5276,7 +5276,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5298,7 +5298,7 @@ ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5330,7 +5330,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5348,7 +5348,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5366,7 +5366,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5384,7 +5384,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5403,7 +5403,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5422,7 +5422,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5444,7 +5444,7 @@ ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5476,7 +5476,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5494,7 +5494,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5512,7 +5512,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5530,7 +5530,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5549,7 +5549,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5571,7 +5571,7 @@ ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmv.s.x v16, zero ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -5628,7 +5628,7 @@ ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v9, 0 +; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 @@ -5661,7 +5661,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v10, 0 +; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 @@ -5694,7 +5694,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v12, 0 +; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 @@ -5727,7 +5727,7 @@ ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 @@ -5764,11 +5764,11 @@ ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vmaxu.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v24, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; RV64-NEXT: vredmaxu.vs v8, v8, v16 +; RV64-NEXT: vmaxu.vv v8, v8, v16 +; RV64-NEXT: vredmaxu.vs v8, v8, v24 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <32 x i64>, <32 x i64>* %x @@ -5817,7 +5817,7 @@ ; RV64-NEXT: vmaxu.vv v8, v8, v0 ; RV64-NEXT: vmaxu.vv v8, v8, v16 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vredmaxu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -26,7 +26,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -57,7 +57,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -88,7 +88,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -119,7 +119,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -181,7 +181,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -212,7 +212,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -243,7 +243,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -274,7 +274,7 @@ ; CHECK-LABEL: vreduce_ord_fadd_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 +; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -886,8 +886,9 @@ define float @vreduce_nsz_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_nsz_fadd_nxv1f32: ; CHECK: # %bb.0: +; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -23,7 +23,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -38,7 +38,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -51,7 +51,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -66,7 +66,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -79,7 +79,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -94,7 +94,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -107,7 +107,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -122,7 +122,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -135,7 +135,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 @@ -163,7 +163,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 @@ -178,7 +178,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -191,7 +191,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 @@ -206,7 +206,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 @@ -219,7 +219,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 @@ -234,7 +234,7 @@ ; CHECK-LABEL: vpreduce_fadd_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 +; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfredusum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 @@ -247,7 +247,7 @@ ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vfmv.v.f v12, fa0 +; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu ; CHECK-NEXT: vfredosum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll @@ -7,7 +7,7 @@ ; CHECK-LABEL: vreduce_add_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -22,7 +22,7 @@ ; CHECK-LABEL: vreduce_umax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -99,7 +99,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -114,7 +114,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -129,7 +129,7 @@ ; CHECK-LABEL: vreduce_add_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -144,7 +144,7 @@ ; CHECK-LABEL: vreduce_umax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -221,7 +221,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -236,7 +236,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -251,7 +251,7 @@ ; CHECK-LABEL: vreduce_add_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -266,7 +266,7 @@ ; CHECK-LABEL: vreduce_umax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -343,7 +343,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -358,7 +358,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -373,7 +373,7 @@ ; CHECK-LABEL: vreduce_add_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -388,7 +388,7 @@ ; CHECK-LABEL: vreduce_umax_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -466,7 +466,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -481,7 +481,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -496,7 +496,7 @@ ; CHECK-LABEL: vreduce_add_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -511,7 +511,7 @@ ; CHECK-LABEL: vreduce_umax_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -589,7 +589,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -604,7 +604,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -619,7 +619,7 @@ ; CHECK-LABEL: vreduce_add_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -634,7 +634,7 @@ ; CHECK-LABEL: vreduce_umax_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -712,7 +712,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -727,7 +727,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -742,7 +742,7 @@ ; CHECK-LABEL: vreduce_add_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -757,7 +757,7 @@ ; CHECK-LABEL: vreduce_umax_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -835,7 +835,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -850,7 +850,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -865,7 +865,7 @@ ; CHECK-LABEL: vreduce_add_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -880,7 +880,7 @@ ; CHECK-LABEL: vreduce_umax_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -958,7 +958,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -973,7 +973,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -988,7 +988,7 @@ ; CHECK-LABEL: vreduce_add_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1003,7 +1003,7 @@ ; CHECK-LABEL: vreduce_umax_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1081,7 +1081,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1096,7 +1096,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll @@ -7,7 +7,7 @@ ; CHECK-LABEL: vreduce_add_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -22,7 +22,7 @@ ; CHECK-LABEL: vreduce_umax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -99,7 +99,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -114,7 +114,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -129,7 +129,7 @@ ; CHECK-LABEL: vreduce_add_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -144,7 +144,7 @@ ; CHECK-LABEL: vreduce_umax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -221,7 +221,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -236,7 +236,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -251,7 +251,7 @@ ; CHECK-LABEL: vreduce_add_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -266,7 +266,7 @@ ; CHECK-LABEL: vreduce_umax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -343,7 +343,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -358,7 +358,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -373,7 +373,7 @@ ; CHECK-LABEL: vreduce_add_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -388,7 +388,7 @@ ; CHECK-LABEL: vreduce_umax_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -466,7 +466,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -481,7 +481,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -496,7 +496,7 @@ ; CHECK-LABEL: vreduce_add_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -511,7 +511,7 @@ ; CHECK-LABEL: vreduce_umax_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -589,7 +589,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -604,7 +604,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -619,7 +619,7 @@ ; CHECK-LABEL: vreduce_add_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -634,7 +634,7 @@ ; CHECK-LABEL: vreduce_umax_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -712,7 +712,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -727,7 +727,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -742,7 +742,7 @@ ; CHECK-LABEL: vreduce_add_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -757,7 +757,7 @@ ; CHECK-LABEL: vreduce_umax_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -835,7 +835,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -850,7 +850,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -865,7 +865,7 @@ ; CHECK-LABEL: vreduce_add_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -880,7 +880,7 @@ ; CHECK-LABEL: vreduce_umax_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -958,7 +958,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -973,7 +973,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -988,7 +988,7 @@ ; CHECK-LABEL: vreduce_add_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1003,7 +1003,7 @@ ; CHECK-LABEL: vreduce_umax_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1081,7 +1081,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1096,7 +1096,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1111,7 +1111,7 @@ ; CHECK-LABEL: vreduce_add_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1126,7 +1126,7 @@ ; CHECK-LABEL: vreduce_umax_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1205,7 +1205,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1220,7 +1220,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1235,7 +1235,7 @@ ; CHECK-LABEL: vreduce_add_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: vreduce_umax_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1329,7 +1329,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1344,7 +1344,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v10, 0 +; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1359,7 +1359,7 @@ ; CHECK-LABEL: vreduce_add_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1374,7 +1374,7 @@ ; CHECK-LABEL: vreduce_umax_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1453,7 +1453,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -1468,7 +1468,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmv.v.i v12, 0 +; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vpreduce_add_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -26,7 +26,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -41,7 +41,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -72,7 +72,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -87,7 +87,7 @@ ; CHECK-LABEL: vpreduce_and_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -102,7 +102,7 @@ ; CHECK-LABEL: vpreduce_or_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -117,7 +117,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -132,7 +132,7 @@ ; CHECK-LABEL: vpreduce_add_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -148,7 +148,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -163,7 +163,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -179,7 +179,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -194,7 +194,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -209,7 +209,7 @@ ; CHECK-LABEL: vpreduce_and_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -224,7 +224,7 @@ ; CHECK-LABEL: vpreduce_or_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -239,7 +239,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -254,7 +254,7 @@ ; CHECK-LABEL: vpreduce_add_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -270,7 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -285,7 +285,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -301,7 +301,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -316,7 +316,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -331,7 +331,7 @@ ; CHECK-LABEL: vpreduce_and_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -346,7 +346,7 @@ ; CHECK-LABEL: vpreduce_or_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -361,7 +361,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -376,7 +376,7 @@ ; CHECK-LABEL: vpreduce_add_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -394,7 +394,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -406,7 +406,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -421,7 +421,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -439,7 +439,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -451,7 +451,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -466,7 +466,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -481,7 +481,7 @@ ; CHECK-LABEL: vpreduce_and_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -496,7 +496,7 @@ ; CHECK-LABEL: vpreduce_or_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -511,7 +511,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -526,7 +526,7 @@ ; CHECK-LABEL: vpreduce_add_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -544,7 +544,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -556,7 +556,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -571,7 +571,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -589,7 +589,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -601,7 +601,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -616,7 +616,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -631,7 +631,7 @@ ; CHECK-LABEL: vpreduce_and_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -646,7 +646,7 @@ ; CHECK-LABEL: vpreduce_or_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -661,7 +661,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -676,7 +676,7 @@ ; CHECK-LABEL: vpreduce_add_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -694,7 +694,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -706,7 +706,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -721,7 +721,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -739,7 +739,7 @@ ; RV32-NEXT: addi a2, a2, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -751,7 +751,7 @@ ; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -766,7 +766,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -781,7 +781,7 @@ ; CHECK-LABEL: vpreduce_and_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -796,7 +796,7 @@ ; CHECK-LABEL: vpreduce_or_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -811,7 +811,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -826,7 +826,7 @@ ; CHECK-LABEL: vpreduce_add_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -841,7 +841,7 @@ ; RV32-LABEL: vpreduce_umax_nxv1i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -852,7 +852,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -867,7 +867,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -882,7 +882,7 @@ ; RV32-LABEL: vpreduce_umin_nxv1i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -893,7 +893,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -908,7 +908,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -923,7 +923,7 @@ ; CHECK-LABEL: vpreduce_and_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -938,7 +938,7 @@ ; CHECK-LABEL: vpreduce_or_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -953,7 +953,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -968,7 +968,7 @@ ; CHECK-LABEL: vpreduce_add_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -983,7 +983,7 @@ ; RV32-LABEL: vpreduce_umax_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -994,7 +994,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1009,7 +1009,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -1024,7 +1024,7 @@ ; RV32-LABEL: vpreduce_umin_nxv2i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 @@ -1035,7 +1035,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1050,7 +1050,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -1065,7 +1065,7 @@ ; CHECK-LABEL: vpreduce_and_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -1080,7 +1080,7 @@ ; CHECK-LABEL: vpreduce_or_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -1095,7 +1095,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 @@ -1110,7 +1110,7 @@ ; CHECK-LABEL: vpreduce_add_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v10, a0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredsum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 @@ -1125,7 +1125,7 @@ ; RV32-LABEL: vpreduce_umax_nxv4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v10, a0 +; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 @@ -1136,7 +1136,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1151,7 +1151,7 @@ ; CHECK-LABEL: vpreduce_smax_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v10, a0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredmax.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 @@ -1166,7 +1166,7 @@ ; RV32-LABEL: vpreduce_umin_nxv4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV32-NEXT: vmv.v.x v10, a0 +; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 @@ -1177,7 +1177,7 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1192,7 +1192,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v10, a0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredmin.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 @@ -1207,7 +1207,7 @@ ; CHECK-LABEL: vpreduce_and_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v10, a0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredand.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 @@ -1222,7 +1222,7 @@ ; CHECK-LABEL: vpreduce_or_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v10, a0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 @@ -1237,7 +1237,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v10, a0 +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu ; CHECK-NEXT: vredxor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 @@ -1271,7 +1271,7 @@ ; RV64-LABEL: vpreduce_add_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1305,7 +1305,7 @@ ; RV64-LABEL: vpreduce_umax_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1339,7 +1339,7 @@ ; RV64-LABEL: vpreduce_smax_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1373,7 +1373,7 @@ ; RV64-LABEL: vpreduce_umin_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1407,7 +1407,7 @@ ; RV64-LABEL: vpreduce_smin_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1441,7 +1441,7 @@ ; RV64-LABEL: vpreduce_and_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1475,7 +1475,7 @@ ; RV64-LABEL: vpreduce_or_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1509,7 +1509,7 @@ ; RV64-LABEL: vpreduce_xor_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 @@ -1543,7 +1543,7 @@ ; RV64-LABEL: vpreduce_add_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1577,7 +1577,7 @@ ; RV64-LABEL: vpreduce_umax_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1611,7 +1611,7 @@ ; RV64-LABEL: vpreduce_smax_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1645,7 +1645,7 @@ ; RV64-LABEL: vpreduce_umin_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1679,7 +1679,7 @@ ; RV64-LABEL: vpreduce_smin_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1713,7 +1713,7 @@ ; RV64-LABEL: vpreduce_and_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1747,7 +1747,7 @@ ; RV64-LABEL: vpreduce_or_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1781,7 +1781,7 @@ ; RV64-LABEL: vpreduce_xor_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 @@ -1815,7 +1815,7 @@ ; RV64-LABEL: vpreduce_add_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredsum.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 @@ -1849,7 +1849,7 @@ ; RV64-LABEL: vpreduce_umax_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmaxu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 @@ -1883,7 +1883,7 @@ ; RV64-LABEL: vpreduce_smax_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmax.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 @@ -1917,7 +1917,7 @@ ; RV64-LABEL: vpreduce_umin_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredminu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 @@ -1951,7 +1951,7 @@ ; RV64-LABEL: vpreduce_smin_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredmin.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 @@ -1985,7 +1985,7 @@ ; RV64-LABEL: vpreduce_and_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredand.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 @@ -2019,7 +2019,7 @@ ; RV64-LABEL: vpreduce_or_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 @@ -2053,7 +2053,7 @@ ; RV64-LABEL: vpreduce_xor_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; RV64-NEXT: vmv.v.x v12, a0 +; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu ; RV64-NEXT: vredxor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12