diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -82,6 +82,38 @@ } } +static bool isSplatMoveInstr(const MachineInstr &MI) { + switch (MI.getOpcode()) { + default: + return false; + case RISCV::PseudoVMV_V_X_M1: + case RISCV::PseudoVMV_V_X_M2: + case RISCV::PseudoVMV_V_X_M4: + case RISCV::PseudoVMV_V_X_M8: + case RISCV::PseudoVMV_V_X_MF2: + case RISCV::PseudoVMV_V_X_MF4: + case RISCV::PseudoVMV_V_X_MF8: + case RISCV::PseudoVMV_V_I_M1: + case RISCV::PseudoVMV_V_I_M2: + case RISCV::PseudoVMV_V_I_M4: + case RISCV::PseudoVMV_V_I_M8: + case RISCV::PseudoVMV_V_I_MF2: + case RISCV::PseudoVMV_V_I_MF4: + case RISCV::PseudoVMV_V_I_MF8: + return true; + } +} + +static bool isSplatOfZeroOrMinusOne(const MachineInstr &MI) { + if (!isSplatMoveInstr(MI)) + return false; + + const MachineOperand &SrcMO = MI.getOperand(1); + if (SrcMO.isImm()) + return SrcMO.getImm() == 0 || SrcMO.getImm() == -1; + return SrcMO.isReg() && SrcMO.getReg() == RISCV::X0; +} + /// Get the EEW for a load or store instruction. Return None if MI is not /// a load or store which ignores SEW. static Optional getEEWForLoadStore(const MachineInstr &MI) { @@ -370,6 +402,14 @@ Res.LMUL = false; } + // A splat of 0/-1 is always a splat of 0/-1, regardless of etype. + // TODO: We're currently demanding VL + SEWLMULRatio which is sufficient + // but not neccessary. What we really need is VLInBytes. + if (isSplatOfZeroOrMinusOne(MI)) { + Res.SEW = false; + Res.LMUL = false; + } + return Res; } diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll @@ -27,11 +27,9 @@ ; RV32-NEXT: vmsne.vi v0, v11, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 -; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vmerge.vim v8, v9, 1, v0 ; RV32-NEXT: vmv.x.s a1, v8 @@ -54,11 +52,9 @@ ; RV64-NEXT: vmsne.vi v0, v13, 0 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 -; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64-NEXT: vmv.v.i v10, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vmerge.vim v8, v10, 1, v0 ; RV64-NEXT: vmv.x.s a1, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -1567,7 +1567,7 @@ ; LMULMAX1-NEXT: .cfi_offset ra, -8 ; LMULMAX1-NEXT: li a0, 8 ; LMULMAX1-NEXT: sd a0, 128(sp) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse32.v v8, (sp) ; LMULMAX1-NEXT: addi a0, sp, 112 @@ -1584,7 +1584,6 @@ ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, sp, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -9,8 +9,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret %y = load <2 x i1>, <2 x i1>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -41,8 +41,8 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -66,8 +66,8 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -91,8 +91,8 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -116,8 +116,8 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -205,8 +205,8 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v10, (a1) ; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -230,8 +230,8 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v10, (a1) ; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -463,8 +463,8 @@ ; CHECK-NEXT: vmfeq.vv v8, v8, v8 ; CHECK-NEXT: vmfeq.vv v9, v9, v9 ; CHECK-NEXT: vmand.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -490,8 +490,8 @@ ; CHECK-NEXT: vmfne.vv v8, v8, v8 ; CHECK-NEXT: vmfne.vv v9, v9, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -546,8 +546,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -571,8 +571,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -596,8 +596,8 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -621,8 +621,8 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -710,8 +710,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfle.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -735,8 +735,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfle.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -969,8 +969,8 @@ ; CHECK-NEXT: vmfeq.vf v9, v9, fa0 ; CHECK-NEXT: vmfeq.vv v8, v8, v8 ; CHECK-NEXT: vmand.mm v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -997,8 +997,8 @@ ; CHECK-NEXT: vmfne.vf v9, v9, fa0 ; CHECK-NEXT: vmfne.vv v8, v8, v8 ; CHECK-NEXT: vmor.mm v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1054,8 +1054,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1079,8 +1079,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1104,8 +1104,8 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1129,8 +1129,8 @@ ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1218,8 +1218,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfge.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1243,8 +1243,8 @@ ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfge.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1477,8 +1477,8 @@ ; CHECK-NEXT: vmfeq.vf v9, v9, fa0 ; CHECK-NEXT: vmfeq.vv v8, v8, v8 ; CHECK-NEXT: vmand.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 @@ -1505,8 +1505,8 @@ ; CHECK-NEXT: vmfne.vf v9, v9, fa0 ; CHECK-NEXT: vmfne.vv v8, v8, v8 ; CHECK-NEXT: vmor.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -154,8 +154,8 @@ ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v9 ; LMULMAX1-NEXT: vand.vi v9, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 @@ -188,8 +188,8 @@ ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v9 ; LMULMAX1-NEXT: vand.vi v9, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 @@ -564,8 +564,8 @@ ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v13, v9 ; LMULMAX1-NEXT: vand.vi v9, v13, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 @@ -620,8 +620,8 @@ ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v13, v9 ; LMULMAX1-NEXT: vand.vi v9, v13, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.v.i v9, 0 +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -507,8 +507,8 @@ ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: vmseq.vi v0, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32-NEXT: vmv.v.i v8, 0 +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmv.v.i v9, 0 @@ -567,8 +567,8 @@ ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64-NEXT: vmseq.vi v0, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64-NEXT: vmv.v.i v8, 0 +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmv.v.i v9, 0 @@ -635,8 +635,8 @@ ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vmseq.vi v0, v9, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v10, 0