diff --git a/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp @@ -52,6 +52,56 @@ INITIALIZE_PASS(RISCVCleanupVSETVLI, DEBUG_TYPE, RISCV_CLEANUP_VSETVLI_NAME, false, false) +static bool isRedundantVSETVLI(MachineInstr &MI, MachineInstr *PrevVSETVLI) { + // If we don't have a previous VSET{I}VLI or the VL output isn't dead, we + // can't remove this VSETVLI. + if (!PrevVSETVLI || !MI.getOperand(0).isDead()) + return false; + + // Does this VSET{I}VLI use the same VTYPE immediate. + int64_t PrevVTYPEImm = PrevVSETVLI->getOperand(2).getImm(); + int64_t VTYPEImm = MI.getOperand(2).getImm(); + if (PrevVTYPEImm != VTYPEImm) + return false; + + if (MI.getOpcode() == RISCV::PseudoVSETIVLI) { + // If the previous opcode wasn't vsetivli we can't compare them. + if (PrevVSETVLI->getOpcode() != RISCV::PseudoVSETIVLI) + return false; + + // For VSETIVLI, we can just compare the immediates. + return PrevVSETVLI->getOperand(1).getImm() == MI.getOperand(1).getImm(); + } + + assert(MI.getOpcode() == RISCV::PseudoVSETVLI); + Register AVLReg = MI.getOperand(1).getReg(); + + // If this VSETVLI isn't changing VL, it is redundant. + if (AVLReg == RISCV::X0 && MI.getOperand(0).getReg() == RISCV::X0) + return true; + + // If the previous opcode isn't vsetvli we can't do any more comparison. + if (PrevVSETVLI->getOpcode() != RISCV::PseudoVSETVLI) + return false; + + // Does this VSETVLI use the same AVL register? + if (AVLReg != PrevVSETVLI->getOperand(1).getReg()) + return false; + + // If the AVLReg is X0 we must be setting VL to VLMAX. Keeping VL unchanged + // was handled above. + if (AVLReg == RISCV::X0) { + // This instruction is setting VL to VLMAX, this is redundant if the + // previous VSETVLI was also setting VL to VLMAX. But it is not redundant + // if they were setting it to any other value or leaving VL unchanged. + Register PrevOutVL = PrevVSETVLI->getOperand(0).getReg(); + return PrevOutVL != RISCV::X0; + } + + // This vsetvli is redundant. + return true; +} + bool RISCVCleanupVSETVLI::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; MachineInstr *PrevVSETVLI = nullptr; @@ -70,62 +120,14 @@ continue; } - // If we don't have a previous VSET{I}VLI or the VL output isn't dead, we - // can't remove this VSETVLI. - if (!PrevVSETVLI || !MI.getOperand(0).isDead()) { - PrevVSETVLI = &MI; - continue; - } - - // If a previous "set vl" instruction opcode is different from this one, we - // can't differentiate the AVL values. - if (PrevVSETVLI->getOpcode() != MI.getOpcode()) { - PrevVSETVLI = &MI; - continue; - } - - // The remaining two cases are - // 1. PrevVSETVLI = PseudoVSETVLI - // MI = PseudoVSETVLI - // - // 2. PrevVSETVLI = PseudoVSETIVLI - // MI = PseudoVSETIVLI - Register AVLReg; - bool SameAVL = false; - if (MI.getOpcode() == RISCV::PseudoVSETVLI) { - AVLReg = MI.getOperand(1).getReg(); - SameAVL = PrevVSETVLI->getOperand(1).getReg() == AVLReg; - } else { // RISCV::PseudoVSETIVLI - SameAVL = - PrevVSETVLI->getOperand(1).getImm() == MI.getOperand(1).getImm(); - } - int64_t PrevVTYPEImm = PrevVSETVLI->getOperand(2).getImm(); - int64_t VTYPEImm = MI.getOperand(2).getImm(); - - // Does this VSET{I}VLI use the same AVL register/value and VTYPE immediate? - if (!SameAVL || PrevVTYPEImm != VTYPEImm) { + if (isRedundantVSETVLI(MI, PrevVSETVLI)) { + // This VSETVLI is redundant, remove it. + MI.eraseFromParent(); + Changed = true; + } else { + // Otherwise update VSET{I}VLI for the next iteration. PrevVSETVLI = &MI; - continue; } - - // If the AVLReg is X0 we need to look at the output VL of both VSETVLIs. - if ((MI.getOpcode() == RISCV::PseudoVSETVLI) && (AVLReg == RISCV::X0)) { - assert((PrevVSETVLI->getOpcode() == RISCV::PseudoVSETVLI) && - "Unexpected vsetvli opcode."); - Register PrevOutVL = PrevVSETVLI->getOperand(0).getReg(); - Register OutVL = MI.getOperand(0).getReg(); - // We can't remove if the previous VSETVLI left VL unchanged and the - // current instruction is setting it to VLMAX. Without knowing the VL - // before the previous instruction we don't know if this is a change. - if (PrevOutVL == RISCV::X0 && OutVL != RISCV::X0) { - PrevVSETVLI = &MI; - continue; - } - } - - // This VSETVLI is redundant, remove it. - MI.eraseFromParent(); - Changed = true; } return Changed; diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll @@ -17,7 +17,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -29,7 +28,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -51,7 +49,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -63,7 +60,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -85,7 +81,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -97,7 +92,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -119,7 +113,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -131,7 +124,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -153,7 +145,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -165,7 +156,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -187,7 +177,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -199,7 +188,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -221,7 +209,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -233,7 +220,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -255,7 +241,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -267,7 +252,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -289,7 +273,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -301,7 +284,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -323,7 +305,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -335,7 +316,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -357,7 +337,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -369,7 +348,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -391,7 +369,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -403,7 +380,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -425,7 +401,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -437,7 +412,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -459,7 +433,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -471,7 +444,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -493,7 +465,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -505,7 +476,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll @@ -17,7 +17,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -29,7 +28,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -51,7 +49,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -63,7 +60,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -85,7 +81,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -97,7 +92,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -119,7 +113,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -131,7 +124,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -153,7 +145,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -165,7 +156,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -187,7 +177,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -199,7 +188,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -221,7 +209,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -233,7 +220,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -255,7 +241,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -267,7 +252,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -289,7 +273,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -301,7 +284,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -323,7 +305,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -335,7 +316,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -357,7 +337,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -369,7 +348,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -391,7 +369,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -403,7 +380,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -425,7 +401,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -437,7 +412,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -459,7 +433,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -471,7 +444,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -493,7 +465,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -505,7 +476,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -17,7 +17,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,mf8,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -29,7 +28,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,mf8,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -51,7 +49,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -63,7 +60,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -85,7 +81,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -97,7 +92,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -119,7 +113,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -131,7 +124,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -153,7 +145,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -165,7 +156,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -187,7 +177,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -199,7 +188,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -221,7 +209,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -233,7 +220,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -255,7 +241,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -267,7 +252,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -289,7 +273,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -301,7 +284,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -323,7 +305,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -335,7 +316,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -357,7 +337,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -369,7 +348,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -391,7 +369,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -403,7 +380,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -425,7 +401,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -437,7 +412,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -459,7 +433,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -471,7 +444,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -493,7 +465,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -505,7 +476,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -527,7 +497,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -539,7 +508,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -561,7 +529,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -573,7 +540,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -595,7 +561,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -607,7 +572,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -620,7 +584,6 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -633,12 +596,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -650,12 +610,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -668,7 +625,6 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu ; CHECK-NEXT: vsrl.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmv.x.s a1, v26 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -681,12 +637,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m2,ta,mu ; CHECK-NEXT: vsrl.vx v26, v26, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmv.x.s a1, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -698,12 +651,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m2,ta,mu ; CHECK-NEXT: vsrl.vx v26, v26, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmv.x.s a1, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -716,7 +666,6 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu ; CHECK-NEXT: vsrl.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmv.x.s a1, v28 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -729,12 +678,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m4,ta,mu ; CHECK-NEXT: vsrl.vx v28, v28, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmv.x.s a1, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -746,12 +692,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m4,ta,mu ; CHECK-NEXT: vsrl.vx v28, v28, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmv.x.s a1, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -764,7 +707,6 @@ ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu ; CHECK-NEXT: vsrl.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vmv.x.s a1, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -777,12 +719,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -794,12 +733,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: addi a1, zero, 32 -; CHECK-NEXT: vsetivli a2, 1, e64,m8,ta,mu ; CHECK-NEXT: vsrl.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -17,7 +17,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,mf8,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -29,7 +28,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,mf8,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -51,7 +49,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -63,7 +60,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -85,7 +81,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -97,7 +92,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -119,7 +113,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -131,7 +124,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -153,7 +145,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -165,7 +156,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -187,7 +177,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -199,7 +188,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -221,7 +209,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e8,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -233,7 +220,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e8,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -255,7 +241,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -267,7 +252,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -289,7 +273,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -301,7 +284,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -323,7 +305,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -335,7 +316,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -357,7 +337,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -369,7 +348,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -391,7 +369,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -403,7 +380,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -425,7 +401,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -437,7 +412,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -459,7 +433,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -471,7 +444,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -493,7 +465,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -505,7 +476,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -527,7 +497,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -539,7 +508,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -561,7 +529,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -573,7 +540,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -595,7 +561,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -607,7 +572,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -629,7 +593,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vi v25, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -641,7 +604,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu ; CHECK-NEXT: vslidedown.vx v25, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -663,7 +625,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vi v26, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -675,7 +636,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu ; CHECK-NEXT: vslidedown.vx v26, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; CHECK-NEXT: vmv.x.s a0, v26 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -697,7 +657,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vi v28, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -709,7 +668,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu ; CHECK-NEXT: vslidedown.vx v28, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; CHECK-NEXT: vmv.x.s a0, v28 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx @@ -731,7 +689,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 2 @@ -743,7 +700,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -9,7 +9,6 @@ ; RV32-NEXT: vle8.v v25, (a0) ; RV32-NEXT: vsetivli a0, 1, e8,m1,ta,mu ; RV32-NEXT: vslidedown.vi v25, v25, 7 -; RV32-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; RV32-NEXT: vmv.x.s a0, v25 ; RV32-NEXT: ret ; @@ -19,7 +18,6 @@ ; RV64-NEXT: vle8.v v25, (a0) ; RV64-NEXT: vsetivli a0, 1, e8,m1,ta,mu ; RV64-NEXT: vslidedown.vi v25, v25, 7 -; RV64-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; RV64-NEXT: vmv.x.s a0, v25 ; RV64-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x @@ -34,7 +32,6 @@ ; RV32-NEXT: vle16.v v25, (a0) ; RV32-NEXT: vsetivli a0, 1, e16,m1,ta,mu ; RV32-NEXT: vslidedown.vi v25, v25, 7 -; RV32-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; RV32-NEXT: vmv.x.s a0, v25 ; RV32-NEXT: ret ; @@ -44,7 +41,6 @@ ; RV64-NEXT: vle16.v v25, (a0) ; RV64-NEXT: vsetivli a0, 1, e16,m1,ta,mu ; RV64-NEXT: vslidedown.vi v25, v25, 7 -; RV64-NEXT: vsetvli zero, zero, e16,m1,ta,mu ; RV64-NEXT: vmv.x.s a0, v25 ; RV64-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x @@ -59,7 +55,6 @@ ; RV32-NEXT: vle32.v v25, (a0) ; RV32-NEXT: vsetivli a0, 1, e32,m1,ta,mu ; RV32-NEXT: vslidedown.vi v25, v25, 2 -; RV32-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; RV32-NEXT: vmv.x.s a0, v25 ; RV32-NEXT: ret ; @@ -69,7 +64,6 @@ ; RV64-NEXT: vle32.v v25, (a0) ; RV64-NEXT: vsetivli a0, 1, e32,m1,ta,mu ; RV64-NEXT: vslidedown.vi v25, v25, 2 -; RV64-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; RV64-NEXT: vmv.x.s a0, v25 ; RV64-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x @@ -82,12 +76,10 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu ; RV32-NEXT: vle64.v v25, (a0) -; RV32-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; RV32-NEXT: vmv.x.s a0, v25 ; RV32-NEXT: addi a1, zero, 32 ; RV32-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; RV32-NEXT: vsrl.vx v25, v25, a1 -; RV32-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; RV32-NEXT: vmv.x.s a1, v25 ; RV32-NEXT: ret ; @@ -95,7 +87,6 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu ; RV64-NEXT: vle64.v v25, (a0) -; RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; RV64-NEXT: vmv.x.s a0, v25 ; RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x @@ -111,7 +102,6 @@ ; RV32-NEXT: vle8.v v26, (a0) ; RV32-NEXT: vsetivli a0, 1, e8,m2,ta,mu ; RV32-NEXT: vslidedown.vi v26, v26, 7 -; RV32-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; RV32-NEXT: vmv.x.s a0, v26 ; RV32-NEXT: ret ; @@ -122,7 +112,6 @@ ; RV64-NEXT: vle8.v v26, (a0) ; RV64-NEXT: vsetivli a0, 1, e8,m2,ta,mu ; RV64-NEXT: vslidedown.vi v26, v26, 7 -; RV64-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; RV64-NEXT: vmv.x.s a0, v26 ; RV64-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x @@ -137,7 +126,6 @@ ; RV32-NEXT: vle16.v v26, (a0) ; RV32-NEXT: vsetivli a0, 1, e16,m2,ta,mu ; RV32-NEXT: vslidedown.vi v26, v26, 7 -; RV32-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; RV32-NEXT: vmv.x.s a0, v26 ; RV32-NEXT: ret ; @@ -147,7 +135,6 @@ ; RV64-NEXT: vle16.v v26, (a0) ; RV64-NEXT: vsetivli a0, 1, e16,m2,ta,mu ; RV64-NEXT: vslidedown.vi v26, v26, 7 -; RV64-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; RV64-NEXT: vmv.x.s a0, v26 ; RV64-NEXT: ret %a = load <16 x i16>, <16 x i16>* %x @@ -162,7 +149,6 @@ ; RV32-NEXT: vle32.v v26, (a0) ; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v26, v26, 6 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a0, v26 ; RV32-NEXT: ret ; @@ -172,7 +158,6 @@ ; RV64-NEXT: vle32.v v26, (a0) ; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu ; RV64-NEXT: vslidedown.vi v26, v26, 6 -; RV64-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV64-NEXT: vmv.x.s a0, v26 ; RV64-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x @@ -187,12 +172,9 @@ ; RV32-NEXT: vle64.v v26, (a0) ; RV32-NEXT: vsetivli a0, 1, e64,m2,ta,mu ; RV32-NEXT: vslidedown.vi v26, v26, 3 -; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vmv.x.s a0, v26 ; RV32-NEXT: addi a1, zero, 32 -; RV32-NEXT: vsetivli a2, 1, e64,m2,ta,mu ; RV32-NEXT: vsrl.vx v26, v26, a1 -; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v26 ; RV32-NEXT: ret ; @@ -202,7 +184,6 @@ ; RV64-NEXT: vle64.v v26, (a0) ; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu ; RV64-NEXT: vslidedown.vi v26, v26, 3 -; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV64-NEXT: vmv.x.s a0, v26 ; RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x @@ -221,11 +202,8 @@ ; RV32-NEXT: vle32.v v26, (a0) ; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v28, v26, 4 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a0, v28 -; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v26, v26, 5 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v26 ; RV32-NEXT: ret ; @@ -235,7 +213,6 @@ ; RV64-NEXT: vle64.v v26, (a0) ; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu ; RV64-NEXT: vslidedown.vi v26, v26, 2 -; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV64-NEXT: vmv.x.s a0, v26 ; RV64-NEXT: ret %a = load <3 x i64>, <3 x i64>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -21,44 +21,30 @@ ; RV32-NEXT: sw a1, 64(sp) ; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu ; RV32-NEXT: vle32.v v26, (a0) -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v26 ; RV32-NEXT: sw a1, 0(sp) -; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu ; RV32-NEXT: addi a1, sp, 32 ; RV32-NEXT: vle32.v v28, (a1) -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v28 ; RV32-NEXT: sw a1, 28(sp) -; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu ; RV32-NEXT: addi a1, sp, 64 ; RV32-NEXT: vle32.v v28, (a1) -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v28 ; RV32-NEXT: sw a1, 24(sp) ; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v28, v26, 5 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v28 ; RV32-NEXT: sw a1, 20(sp) -; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v28, v26, 4 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v28 ; RV32-NEXT: sw a1, 16(sp) -; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v28, v26, 3 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v28 ; RV32-NEXT: sw a1, 12(sp) -; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v28, v26, 2 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v28 ; RV32-NEXT: sw a1, 8(sp) -; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu ; RV32-NEXT: vslidedown.vi v26, v26, 1 -; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; RV32-NEXT: vmv.x.s a1, v26 ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu @@ -84,23 +70,17 @@ ; RV64-NEXT: vsetivli a2, 4, e64,m2,ta,mu ; RV64-NEXT: vle64.v v26, (a0) ; RV64-NEXT: sd a1, 32(sp) -; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV64-NEXT: vmv.x.s a1, v26 ; RV64-NEXT: sd a1, 0(sp) -; RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu ; RV64-NEXT: addi a1, sp, 32 ; RV64-NEXT: vle64.v v28, (a1) -; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV64-NEXT: vmv.x.s a1, v28 ; RV64-NEXT: sd a1, 24(sp) ; RV64-NEXT: vsetivli a1, 1, e64,m2,ta,mu ; RV64-NEXT: vslidedown.vi v28, v26, 2 -; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV64-NEXT: vmv.x.s a1, v28 ; RV64-NEXT: sd a1, 16(sp) -; RV64-NEXT: vsetivli a1, 1, e64,m2,ta,mu ; RV64-NEXT: vslidedown.vi v26, v26, 1 -; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV64-NEXT: vmv.x.s a1, v26 ; RV64-NEXT: sd a1, 8(sp) ; RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll @@ -1157,7 +1157,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv1i64( %v) @@ -1176,7 +1175,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv1i64( %v) @@ -1201,7 +1199,6 @@ ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv1i64( %v) @@ -1220,7 +1217,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv1i64( %v) @@ -1246,7 +1242,6 @@ ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv1i64( %v) @@ -1265,7 +1260,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv1i64( %v) @@ -1284,7 +1278,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv1i64( %v) @@ -1303,7 +1296,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv1i64( %v) @@ -1324,7 +1316,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv2i64( %v) @@ -1345,7 +1336,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv2i64( %v) @@ -1372,7 +1362,6 @@ ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv2i64( %v) @@ -1393,7 +1382,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv2i64( %v) @@ -1421,7 +1409,6 @@ ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv2i64( %v) @@ -1442,7 +1429,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv2i64( %v) @@ -1463,7 +1449,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv2i64( %v) @@ -1484,7 +1469,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv2i64( %v) @@ -1505,7 +1489,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.add.nxv4i64( %v) @@ -1526,7 +1509,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umax.nxv4i64( %v) @@ -1553,7 +1535,6 @@ ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smax.nxv4i64( %v) @@ -1574,7 +1555,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.umin.nxv4i64( %v) @@ -1602,7 +1582,6 @@ ; CHECK-NEXT: vmv.x.s a0, v25 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.smin.nxv4i64( %v) @@ -1623,7 +1602,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.and.nxv4i64( %v) @@ -1644,7 +1622,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.or.nxv4i64( %v) @@ -1665,7 +1642,6 @@ ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu ; CHECK-NEXT: vsrl.vx v25, v25, a1 -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu ; CHECK-NEXT: vmv.x.s a1, v25 ; CHECK-NEXT: ret %red = call i64 @llvm.vector.reduce.xor.nxv4i64( %v)