diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -431,11 +431,22 @@ Register AVLReg = Info.getAVLReg(); if (AVLReg == RISCV::NoRegister) { - BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoVSETVLI)) + // We can only use x0, x0 if there's no chance of the vtype change causing + // the previous vl to become invalid. + if (PrevInfo.isValid() && !PrevInfo.isUnknown() && + Info.hasSameVLMAX(PrevInfo)) { + BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoVSETVLI)) + .addReg(RISCV::X0, RegState::Define | RegState::Dead) + .addReg(RISCV::X0, RegState::Kill) + .addImm(Info.encodeVTYPE()) + .addReg(RISCV::VL, RegState::Implicit); + return; + } + // Otherwise use an AVL of 0 to avoid depending on previous vl. + BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoVSETIVLI)) .addReg(RISCV::X0, RegState::Define | RegState::Dead) - .addReg(RISCV::X0, RegState::Kill) - .addImm(Info.encodeVTYPE()) - .addReg(RISCV::VL, RegState::Implicit); + .addImm(0) + .addImm(Info.encodeVTYPE()); return; } diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv32.ll @@ -5,7 +5,7 @@ define half @extractelt_nxv1f16_0( %v) { ; CHECK-LABEL: extractelt_nxv1f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -37,7 +37,7 @@ define half @extractelt_nxv2f16_0( %v) { ; CHECK-LABEL: extractelt_nxv2f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -69,7 +69,7 @@ define half @extractelt_nxv4f16_0( %v) { ; CHECK-LABEL: extractelt_nxv4f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -101,7 +101,7 @@ define half @extractelt_nxv8f16_0( %v) { ; CHECK-LABEL: extractelt_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -133,7 +133,7 @@ define half @extractelt_nxv16f16_0( %v) { ; CHECK-LABEL: extractelt_nxv16f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -165,7 +165,7 @@ define half @extractelt_nxv32f16_0( %v) { ; CHECK-LABEL: extractelt_nxv32f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -197,7 +197,7 @@ define float @extractelt_nxv1f32_0( %v) { ; CHECK-LABEL: extractelt_nxv1f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -229,7 +229,7 @@ define float @extractelt_nxv2f32_0( %v) { ; CHECK-LABEL: extractelt_nxv2f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -261,7 +261,7 @@ define float @extractelt_nxv4f32_0( %v) { ; CHECK-LABEL: extractelt_nxv4f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -293,7 +293,7 @@ define float @extractelt_nxv8f32_0( %v) { ; CHECK-LABEL: extractelt_nxv8f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -325,7 +325,7 @@ define float @extractelt_nxv16f32_0( %v) { ; CHECK-LABEL: extractelt_nxv16f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -357,7 +357,7 @@ define double @extractelt_nxv1f64_0( %v) { ; CHECK-LABEL: extractelt_nxv1f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -389,7 +389,7 @@ define double @extractelt_nxv2f64_0( %v) { ; CHECK-LABEL: extractelt_nxv2f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -421,7 +421,7 @@ define double @extractelt_nxv4f64_0( %v) { ; CHECK-LABEL: extractelt_nxv4f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -453,7 +453,7 @@ define double @extractelt_nxv8f64_0( %v) { ; CHECK-LABEL: extractelt_nxv8f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp-rv64.ll @@ -5,7 +5,7 @@ define half @extractelt_nxv1f16_0( %v) { ; CHECK-LABEL: extractelt_nxv1f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -37,7 +37,7 @@ define half @extractelt_nxv2f16_0( %v) { ; CHECK-LABEL: extractelt_nxv2f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -69,7 +69,7 @@ define half @extractelt_nxv4f16_0( %v) { ; CHECK-LABEL: extractelt_nxv4f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -101,7 +101,7 @@ define half @extractelt_nxv8f16_0( %v) { ; CHECK-LABEL: extractelt_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -133,7 +133,7 @@ define half @extractelt_nxv16f16_0( %v) { ; CHECK-LABEL: extractelt_nxv16f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -165,7 +165,7 @@ define half @extractelt_nxv32f16_0( %v) { ; CHECK-LABEL: extractelt_nxv32f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -197,7 +197,7 @@ define float @extractelt_nxv1f32_0( %v) { ; CHECK-LABEL: extractelt_nxv1f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -229,7 +229,7 @@ define float @extractelt_nxv2f32_0( %v) { ; CHECK-LABEL: extractelt_nxv2f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -261,7 +261,7 @@ define float @extractelt_nxv4f32_0( %v) { ; CHECK-LABEL: extractelt_nxv4f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -293,7 +293,7 @@ define float @extractelt_nxv8f32_0( %v) { ; CHECK-LABEL: extractelt_nxv8f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -325,7 +325,7 @@ define float @extractelt_nxv16f32_0( %v) { ; CHECK-LABEL: extractelt_nxv16f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -357,7 +357,7 @@ define double @extractelt_nxv1f64_0( %v) { ; CHECK-LABEL: extractelt_nxv1f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -389,7 +389,7 @@ define double @extractelt_nxv2f64_0( %v) { ; CHECK-LABEL: extractelt_nxv2f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -421,7 +421,7 @@ define double @extractelt_nxv4f64_0( %v) { ; CHECK-LABEL: extractelt_nxv4f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -453,7 +453,7 @@ define double @extractelt_nxv8f64_0( %v) { ; CHECK-LABEL: extractelt_nxv8f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -5,7 +5,7 @@ define signext i8 @extractelt_nxv1i8_0( %v) { ; CHECK-LABEL: extractelt_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -37,7 +37,7 @@ define signext i8 @extractelt_nxv2i8_0( %v) { ; CHECK-LABEL: extractelt_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -69,7 +69,7 @@ define signext i8 @extractelt_nxv4i8_0( %v) { ; CHECK-LABEL: extractelt_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -101,7 +101,7 @@ define signext i8 @extractelt_nxv8i8_0( %v) { ; CHECK-LABEL: extractelt_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -133,7 +133,7 @@ define signext i8 @extractelt_nxv16i8_0( %v) { ; CHECK-LABEL: extractelt_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -165,7 +165,7 @@ define signext i8 @extractelt_nxv32i8_0( %v) { ; CHECK-LABEL: extractelt_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -197,7 +197,7 @@ define signext i8 @extractelt_nxv64i8_0( %v) { ; CHECK-LABEL: extractelt_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -229,7 +229,7 @@ define signext i16 @extractelt_nxv1i16_0( %v) { ; CHECK-LABEL: extractelt_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -261,7 +261,7 @@ define signext i16 @extractelt_nxv2i16_0( %v) { ; CHECK-LABEL: extractelt_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -293,7 +293,7 @@ define signext i16 @extractelt_nxv4i16_0( %v) { ; CHECK-LABEL: extractelt_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -325,7 +325,7 @@ define signext i16 @extractelt_nxv8i16_0( %v) { ; CHECK-LABEL: extractelt_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -357,7 +357,7 @@ define signext i16 @extractelt_nxv16i16_0( %v) { ; CHECK-LABEL: extractelt_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -389,7 +389,7 @@ define signext i16 @extractelt_nxv32i16_0( %v) { ; CHECK-LABEL: extractelt_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -421,7 +421,7 @@ define i32 @extractelt_nxv1i32_0( %v) { ; CHECK-LABEL: extractelt_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -453,7 +453,7 @@ define i32 @extractelt_nxv2i32_0( %v) { ; CHECK-LABEL: extractelt_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -485,7 +485,7 @@ define i32 @extractelt_nxv4i32_0( %v) { ; CHECK-LABEL: extractelt_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -517,7 +517,7 @@ define i32 @extractelt_nxv8i32_0( %v) { ; CHECK-LABEL: extractelt_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -549,7 +549,7 @@ define i32 @extractelt_nxv16i32_0( %v) { ; CHECK-LABEL: extractelt_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -5,7 +5,7 @@ define signext i8 @extractelt_nxv1i8_0( %v) { ; CHECK-LABEL: extractelt_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -37,7 +37,7 @@ define signext i8 @extractelt_nxv2i8_0( %v) { ; CHECK-LABEL: extractelt_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -69,7 +69,7 @@ define signext i8 @extractelt_nxv4i8_0( %v) { ; CHECK-LABEL: extractelt_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -101,7 +101,7 @@ define signext i8 @extractelt_nxv8i8_0( %v) { ; CHECK-LABEL: extractelt_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -133,7 +133,7 @@ define signext i8 @extractelt_nxv16i8_0( %v) { ; CHECK-LABEL: extractelt_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -165,7 +165,7 @@ define signext i8 @extractelt_nxv32i8_0( %v) { ; CHECK-LABEL: extractelt_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -197,7 +197,7 @@ define signext i8 @extractelt_nxv64i8_0( %v) { ; CHECK-LABEL: extractelt_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -229,7 +229,7 @@ define signext i16 @extractelt_nxv1i16_0( %v) { ; CHECK-LABEL: extractelt_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -261,7 +261,7 @@ define signext i16 @extractelt_nxv2i16_0( %v) { ; CHECK-LABEL: extractelt_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -293,7 +293,7 @@ define signext i16 @extractelt_nxv4i16_0( %v) { ; CHECK-LABEL: extractelt_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -325,7 +325,7 @@ define signext i16 @extractelt_nxv8i16_0( %v) { ; CHECK-LABEL: extractelt_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -357,7 +357,7 @@ define signext i16 @extractelt_nxv16i16_0( %v) { ; CHECK-LABEL: extractelt_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -389,7 +389,7 @@ define signext i16 @extractelt_nxv32i16_0( %v) { ; CHECK-LABEL: extractelt_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -421,7 +421,7 @@ define signext i32 @extractelt_nxv1i32_0( %v) { ; CHECK-LABEL: extractelt_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -453,7 +453,7 @@ define signext i32 @extractelt_nxv2i32_0( %v) { ; CHECK-LABEL: extractelt_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -485,7 +485,7 @@ define signext i32 @extractelt_nxv4i32_0( %v) { ; CHECK-LABEL: extractelt_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -517,7 +517,7 @@ define signext i32 @extractelt_nxv8i32_0( %v) { ; CHECK-LABEL: extractelt_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -549,7 +549,7 @@ define signext i32 @extractelt_nxv16i32_0( %v) { ; CHECK-LABEL: extractelt_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -581,7 +581,7 @@ define i64 @extractelt_nxv1i64_0( %v) { ; CHECK-LABEL: extractelt_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -613,7 +613,7 @@ define i64 @extractelt_nxv2i64_0( %v) { ; CHECK-LABEL: extractelt_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -645,7 +645,7 @@ define i64 @extractelt_nxv4i64_0( %v) { ; CHECK-LABEL: extractelt_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -677,7 +677,7 @@ define i64 @extractelt_nxv8i64_0( %v) { ; CHECK-LABEL: extractelt_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -17,7 +17,7 @@ define i8 @bitcast_v1i8_i8(<1 x i8> %a) { ; CHECK-LABEL: bitcast_v1i8_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x i8> %a to i8 @@ -27,7 +27,7 @@ define i16 @bitcast_v2i8_i16(<2 x i8> %a) { ; CHECK-LABEL: bitcast_v2i8_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x i8> %a to i16 @@ -37,7 +37,7 @@ define i16 @bitcast_v1i16_i16(<1 x i16> %a) { ; CHECK-LABEL: bitcast_v1i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x i16> %a to i16 @@ -47,7 +47,7 @@ define i32 @bitcast_v4i8_i32(<4 x i8> %a) { ; CHECK-LABEL: bitcast_v4i8_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <4 x i8> %a to i32 @@ -57,7 +57,7 @@ define i32 @bitcast_v2i16_i32(<2 x i16> %a) { ; CHECK-LABEL: bitcast_v2i16_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x i16> %a to i32 @@ -67,7 +67,7 @@ define i32 @bitcast_v1i32_i32(<1 x i32> %a) { ; CHECK-LABEL: bitcast_v1i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x i32> %a to i32 @@ -86,7 +86,7 @@ ; ; RV64-LABEL: bitcast_v8i8_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <8 x i8> %a to i64 @@ -105,7 +105,7 @@ ; ; RV64-LABEL: bitcast_v4i16_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <4 x i16> %a to i64 @@ -124,7 +124,7 @@ ; ; RV64-LABEL: bitcast_v2i32_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <2 x i32> %a to i64 @@ -143,7 +143,7 @@ ; ; RV64-LABEL: bitcast_v1i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <1 x i64> %a to i64 @@ -153,7 +153,7 @@ define half @bitcast_v2i8_f16(<2 x i8> %a) { ; CHECK-LABEL: bitcast_v2i8_f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x i8> %a to half @@ -163,7 +163,7 @@ define half @bitcast_v1i16_f16(<1 x i16> %a) { ; CHECK-LABEL: bitcast_v1i16_f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x i16> %a to half @@ -173,7 +173,7 @@ define float @bitcast_v4i8_f32(<4 x i8> %a) { ; CHECK-LABEL: bitcast_v4i8_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <4 x i8> %a to float @@ -183,7 +183,7 @@ define float @bitcast_v2i16_f32(<2 x i16> %a) { ; CHECK-LABEL: bitcast_v2i16_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x i16> %a to float @@ -193,7 +193,7 @@ define float @bitcast_v1i32_f32(<1 x i32> %a) { ; CHECK-LABEL: bitcast_v1i32_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x i32> %a to float @@ -212,7 +212,7 @@ ; ; RV64-LABEL: bitcast_v8i8_f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <8 x i8> %a to double @@ -231,7 +231,7 @@ ; ; RV64-LABEL: bitcast_v4i16_f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <4 x i16> %a to double @@ -250,7 +250,7 @@ ; ; RV64-LABEL: bitcast_v2i32_f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <2 x i32> %a to double @@ -269,7 +269,7 @@ ; ; RV64-LABEL: bitcast_v1i64_f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %b = bitcast <1 x i64> %a to double diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll @@ -5,7 +5,7 @@ define i16 @bitcast_v1f16_i16(<1 x half> %a) { ; CHECK-LABEL: bitcast_v1f16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x half> %a to i16 @@ -15,7 +15,7 @@ define half @bitcast_v1f16_f16(<1 x half> %a) { ; CHECK-LABEL: bitcast_v1f16_f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fmv.x.h a0, ft0 ; CHECK-NEXT: ret @@ -26,7 +26,7 @@ define i32 @bitcast_v2f16_i32(<2 x half> %a) { ; CHECK-LABEL: bitcast_v2f16_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x half> %a to i32 @@ -36,7 +36,7 @@ define i32 @bitcast_v1f32_i32(<1 x float> %a) { ; CHECK-LABEL: bitcast_v1f32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x float> %a to i32 @@ -46,13 +46,13 @@ define float @bitcast_v2f16_f32(<2 x half> %a) { ; RV32-FP-LABEL: bitcast_v2f16_f32: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.x.s a0, v8 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v2f16_f32: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.f.s ft0, v8 ; RV64-FP-NEXT: fmv.x.w a0, ft0 ; RV64-FP-NEXT: ret @@ -63,13 +63,13 @@ define float @bitcast_v1f32_f32(<1 x float> %a) { ; RV32-FP-LABEL: bitcast_v1f32_f32: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV32-FP-NEXT: vmv.x.s a0, v8 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_v1f32_f32: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV64-FP-NEXT: vfmv.f.s ft0, v8 ; RV64-FP-NEXT: fmv.x.w a0, ft0 ; RV64-FP-NEXT: ret @@ -89,7 +89,7 @@ ; ; RV64-FP-LABEL: bitcast_v4f16_i64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <4 x half> %a to i64 @@ -108,7 +108,7 @@ ; ; RV64-FP-LABEL: bitcast_v2f32_i64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <2 x float> %a to i64 @@ -127,7 +127,7 @@ ; ; RV64-FP-LABEL: bitcast_v1f64_i64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <1 x double> %a to i64 @@ -139,7 +139,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: addi sp, sp, -16 ; RV32-FP-NEXT: .cfi_def_cfa_offset 16 -; RV32-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.f.s ft0, v8 ; RV32-FP-NEXT: fsd ft0, 8(sp) ; RV32-FP-NEXT: lw a0, 8(sp) @@ -149,7 +149,7 @@ ; ; RV64-FP-LABEL: bitcast_v4f16_f64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <4 x half> %a to double @@ -161,7 +161,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: addi sp, sp, -16 ; RV32-FP-NEXT: .cfi_def_cfa_offset 16 -; RV32-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.f.s ft0, v8 ; RV32-FP-NEXT: fsd ft0, 8(sp) ; RV32-FP-NEXT: lw a0, 8(sp) @@ -171,7 +171,7 @@ ; ; RV64-FP-LABEL: bitcast_v2f32_f64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <2 x float> %a to double @@ -183,7 +183,7 @@ ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: addi sp, sp, -16 ; RV32-FP-NEXT: .cfi_def_cfa_offset 16 -; RV32-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV32-FP-NEXT: vfmv.f.s ft0, v8 ; RV32-FP-NEXT: fsd ft0, 8(sp) ; RV32-FP-NEXT: lw a0, 8(sp) @@ -193,7 +193,7 @@ ; ; RV64-FP-LABEL: bitcast_v1f64_f64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <1 x double> %a to double diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -37,7 +37,7 @@ ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi sp, sp, -32 ; LMULMAX1-NEXT: .cfi_def_cfa_offset 32 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; LMULMAX1-NEXT: vfmv.f.s ft0, v10 ; LMULMAX1-NEXT: fsw ft0, 24(sp) ; LMULMAX1-NEXT: vfmv.f.s ft0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -321,7 +321,7 @@ define <4 x i8> @interleave_shuffles(<4 x i8> %x) { ; CHECK-LABEL: interleave_shuffles: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vrgather.vi v25, v8, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -90,7 +90,7 @@ ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: beqz a1, .LBB4_2 ; RV32-NEXT: # %bb.1: # %cond.load -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: lb a2, 1(a1) ; RV32-NEXT: lbu a1, 0(a1) @@ -137,7 +137,7 @@ ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: beqz a1, .LBB4_2 ; RV64-NEXT: # %bb.1: # %cond.load -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: lb a2, 1(a1) ; RV64-NEXT: lbu a1, 0(a1) @@ -237,7 +237,7 @@ ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: beqz a1, .LBB5_2 ; RV64-NEXT: # %bb.1: # %cond.load -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: lwu a2, 4(a1) ; RV64-NEXT: lwu a1, 0(a1) @@ -302,7 +302,7 @@ ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; RV32-NEXT: .LBB6_5: # %cond.store -; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32-NEXT: vmv.x.s a2, v9 @@ -379,7 +379,7 @@ ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; RV64-NEXT: .LBB6_5: # %cond.store -; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vmv.x.s a2, v10 @@ -456,7 +456,7 @@ ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; RV32-NEXT: .LBB7_3: # %cond.store -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: vmv.x.s a2, v9 ; RV32-NEXT: sh a1, 0(a2) @@ -501,7 +501,7 @@ ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; RV64-NEXT: .LBB7_3: # %cond.store -; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vmv.x.s a2, v9 @@ -683,7 +683,7 @@ ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; RV32-NEXT: .LBB9_3: # %cond.store -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV32-NEXT: vmv.x.s a2, v8 ; RV32-NEXT: sh a2, 0(a0) ; RV32-NEXT: srli a2, a2, 16 @@ -726,7 +726,7 @@ ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret ; RV64-NEXT: .LBB9_3: # %cond.store -; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; RV64-NEXT: vmv.x.s a2, v8 ; RV64-NEXT: sh a2, 0(a0) ; RV64-NEXT: srli a2, a2, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll @@ -7,7 +7,7 @@ define half @intrinsic_vfmv.f.s_s_nxv1f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -20,7 +20,7 @@ define half @intrinsic_vfmv.f.s_s_nxv2f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define half @intrinsic_vfmv.f.s_s_nxv4f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -46,7 +46,7 @@ define half @intrinsic_vfmv.f.s_s_nxv8f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define half @intrinsic_vfmv.f.s_s_nxv16f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -72,7 +72,7 @@ define half @intrinsic_vfmv.f.s_s_nxv32f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ define float @intrinsic_vfmv.f.s_s_nxv1f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -98,7 +98,7 @@ define float @intrinsic_vfmv.f.s_s_nxv2f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define float @intrinsic_vfmv.f.s_s_nxv4f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -124,7 +124,7 @@ define float @intrinsic_vfmv.f.s_s_nxv8f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -137,7 +137,7 @@ define float @intrinsic_vfmv.f.s_s_nxv16f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define double @intrinsic_vfmv.f.s_s_nxv1f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -163,7 +163,7 @@ define double @intrinsic_vfmv.f.s_s_nxv2f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -176,7 +176,7 @@ define double @intrinsic_vfmv.f.s_s_nxv4f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define double @intrinsic_vfmv.f.s_s_nxv8f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll @@ -6,7 +6,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -19,7 +19,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -45,7 +45,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -110,7 +110,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -123,7 +123,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -214,7 +214,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll @@ -6,7 +6,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -19,7 +19,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -45,7 +45,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -110,7 +110,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -123,7 +123,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -214,7 +214,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv1i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -253,7 +253,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv2i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -266,7 +266,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv4i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -279,7 +279,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv8i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -224,7 +224,7 @@ ; CHECK-LABEL: name: vmv_x_s ; CHECK: liveins: $v8 ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v8 - ; CHECK: dead $x0 = PseudoVSETVLI killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK: dead $x0 = PseudoVSETIVLI 0, 88, implicit-def $vl, implicit-def $vtype ; CHECK: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6, implicit $vtype ; CHECK: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK: PseudoRET implicit $x10 diff --git a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll --- a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll +++ b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll @@ -15,7 +15,7 @@ ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fsh ft0, 14(sp) # 2-byte Folded Spill ; CHECK-NEXT: #APP @@ -36,7 +36,7 @@ ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fsw ft0, 12(sp) # 4-byte Folded Spill ; CHECK-NEXT: #APP @@ -57,7 +57,7 @@ ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fsd ft0, 8(sp) # 8-byte Folded Spill ; CHECK-NEXT: #APP diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -727,7 +727,7 @@ ; RV32MV-NEXT: vmsne.vv v0, v26, v30 ; RV32MV-NEXT: vmv.v.i v26, 0 ; RV32MV-NEXT: vmerge.vim v26, v26, -1, v0 -; RV32MV-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV32MV-NEXT: vsetivli zero, 0, e32, m2, ta, mu ; RV32MV-NEXT: vmv.x.s a0, v26 ; RV32MV-NEXT: sw a0, 0(s1) ; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, mu