diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1423,29 +1423,21 @@ if (Used.VLAny) return false; - // TODO: Requires more care in the mutation... - if (isVLPreservingConfig(PrevMI)) - return false; - // We don't bother to handle the equally zero case here as it's largely // uninteresting. - if (Used.VLZeroness && - (!isNonZeroAVL(MI.getOperand(1)) || - !isNonZeroAVL(PrevMI.getOperand(1)))) - return false; + if (Used.VLZeroness) { + if (isVLPreservingConfig(PrevMI)) + return false; + if (!isNonZeroAVL(MI.getOperand(1)) || + !isNonZeroAVL(PrevMI.getOperand(1))) + return false; + } // TODO: Track whether the register is defined between // PrevMI and MI. if (MI.getOperand(1).isReg() && RISCV::X0 != MI.getOperand(1).getReg()) return false; - - // TODO: We need to change the result register to allow this rewrite - // without the result forming a vl preserving vsetvli which is not - // a correct state merge. - if (PrevMI.getOperand(0).getReg() == RISCV::X0 && - MI.getOperand(1).isReg()) - return false; } if (!PrevMI.getOperand(2).isImm() || !MI.getOperand(2).isImm()) @@ -1483,6 +1475,8 @@ continue; } else if (canMutatePriorConfig(MI, *NextMI, Used)) { if (!isVLPreservingConfig(*NextMI)) { + MI.getOperand(0).setReg(NextMI->getOperand(0).getReg()); + MI.getOperand(0).setIsDead(false); if (NextMI->getOperand(1).isImm()) MI.getOperand(1).ChangeToImmediate(NextMI->getOperand(1).getImm()); else @@ -1490,11 +1484,7 @@ MI.setDesc(NextMI->getDesc()); } MI.getOperand(2).setImm(NextMI->getOperand(2).getImm()); - // Don't delete a vsetvli if its result might be used. - Register NextVRefDef = NextMI->getOperand(0).getReg(); - if (NextVRefDef == RISCV::X0 || - (NextVRefDef.isVirtual() && MRI->use_nodbg_empty(NextVRefDef))) - ToDelete.push_back(NextMI); + ToDelete.push_back(NextMI); // fallthrough } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -1893,12 +1893,11 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB26_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -3441,12 +3440,11 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB41_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -7824,12 +7822,11 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh fa5, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, fa5 ; RV64ZVE32F-NEXT: .LBB67_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -9246,12 +9243,11 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB80_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, fa5 ; RV64ZVE32F-NEXT: .LBB80_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -12126,11 +12122,10 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lbu a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB97_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -1472,11 +1472,10 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB21_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB21_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -2789,11 +2788,10 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB35_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -6731,11 +6729,10 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB61_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -7994,11 +7991,10 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB74_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -10646,10 +10642,9 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB91_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 @@ -10879,10 +10874,9 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB92_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -1049,10 +1049,9 @@ ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -1081,10 +1080,9 @@ ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -182,11 +182,10 @@ ; RV32-SLOW-NEXT: vmv1r.v v8, v9 ; RV32-SLOW-NEXT: ret ; RV32-SLOW-NEXT: .LBB5_3: # %cond.load -; RV32-SLOW-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; RV32-SLOW-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV32-SLOW-NEXT: vmv.x.s a1, v8 ; RV32-SLOW-NEXT: lw a2, 0(a1) ; RV32-SLOW-NEXT: lw a1, 4(a1) -; RV32-SLOW-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV32-SLOW-NEXT: vslide1down.vx v9, v9, a2 ; RV32-SLOW-NEXT: vslide1down.vx v9, v9, a1 ; RV32-SLOW-NEXT: andi a0, a0, 2 @@ -218,13 +217,12 @@ ; RV64-SLOW-NEXT: vmv1r.v v8, v9 ; RV64-SLOW-NEXT: ret ; RV64-SLOW-NEXT: .LBB5_3: # %cond.load -; RV64-SLOW-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; RV64-SLOW-NEXT: vmv.x.s a1, v8 ; RV64-SLOW-NEXT: lwu a2, 4(a1) ; RV64-SLOW-NEXT: lwu a1, 0(a1) ; RV64-SLOW-NEXT: slli a2, a2, 32 ; RV64-SLOW-NEXT: or a1, a2, a1 -; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; RV64-SLOW-NEXT: vmv.s.x v9, a1 ; RV64-SLOW-NEXT: andi a0, a0, 2 ; RV64-SLOW-NEXT: beqz a0, .LBB5_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -1353,10 +1353,9 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1388,10 +1387,9 @@ ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll @@ -1143,9 +1143,8 @@ define i64 @vreduce_add_nxv1i64( %v) { ; RV32-LABEL: vreduce_add_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredsum.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -1172,10 +1171,9 @@ ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -1201,10 +1199,9 @@ ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -1372,9 +1369,8 @@ define i64 @vreduce_xor_nxv1i64( %v) { ; RV32-LABEL: vreduce_xor_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredxor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -1399,9 +1395,8 @@ define i64 @vreduce_add_nxv2i64( %v) { ; RV32-LABEL: vreduce_add_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredsum.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -1626,9 +1621,8 @@ define i64 @vreduce_xor_nxv2i64( %v) { ; RV32-LABEL: vreduce_xor_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredxor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -1653,9 +1647,8 @@ define i64 @vreduce_add_nxv4i64( %v) { ; RV32-LABEL: vreduce_add_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredsum.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -1880,9 +1873,8 @@ define i64 @vreduce_xor_nxv4i64( %v) { ; RV32-LABEL: vreduce_xor_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredxor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -584,9 +584,8 @@ define i64 @bad_removal(<2 x i64> %arg) { ; CHECK-LABEL: bad_removal: ; CHECK: # %bb.0: # %bb -; CHECK-NEXT: vsetivli zero, 16, e64, m1, ta, ma -; CHECK-NEXT: vmv.x.s a0, v8 -; CHECK-NEXT: vsetivli a1, 16, e64, m1, ta, ma +; CHECK-NEXT: vsetivli a0, 16, e64, m1, ta, ma +; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: ret bb: