diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -301,69 +301,69 @@ Opc = RISCV::FSGNJ_D; IsScalableVector = false; } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; LMul = RISCVII::LMUL_1; } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV2R_V; + Opc = RISCV::VMV2R_V; LMul = RISCVII::LMUL_2; } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV4R_V; + Opc = RISCV::VMV4R_V; LMul = RISCVII::LMUL_4; } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV8R_V; + Opc = RISCV::VMV8R_V; LMul = RISCVII::LMUL_8; } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 2; LMul = RISCVII::LMUL_1; } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV2R_V; + Opc = RISCV::VMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 2; LMul = RISCVII::LMUL_2; } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV4R_V; + Opc = RISCV::VMV4R_V; SubRegIdx = RISCV::sub_vrm4_0; NF = 2; LMul = RISCVII::LMUL_4; } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 3; LMul = RISCVII::LMUL_1; } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV2R_V; + Opc = RISCV::VMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 3; LMul = RISCVII::LMUL_2; } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 4; LMul = RISCVII::LMUL_1; } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV2R_V; + Opc = RISCV::VMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 4; LMul = RISCVII::LMUL_2; } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 5; LMul = RISCVII::LMUL_1; } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 6; LMul = RISCVII::LMUL_1; } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 7; LMul = RISCVII::LMUL_1; } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) { - Opc = RISCV::PseudoVMV1R_V; + Opc = RISCV::VMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 8; LMul = RISCVII::LMUL_1; @@ -488,13 +488,13 @@ Opcode = RISCV::FSD; IsScalableVector = false; } else if (RISCV::VRRegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVSPILL_M1; + Opcode = RISCV::VS1R_V; } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVSPILL_M2; + Opcode = RISCV::VS2R_V; } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVSPILL_M4; + Opcode = RISCV::VS4R_V; } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVSPILL_M8; + Opcode = RISCV::VS8R_V; } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC)) Opcode = RISCV::PseudoVSPILL2_M1; else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC)) @@ -571,13 +571,13 @@ Opcode = RISCV::FLD; IsScalableVector = false; } else if (RISCV::VRRegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVRELOAD_M1; + Opcode = RISCV::VL1RE8_V; } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVRELOAD_M2; + Opcode = RISCV::VL2RE8_V; } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVRELOAD_M4; + Opcode = RISCV::VL4RE8_V; } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) { - Opcode = RISCV::PseudoVRELOAD_M8; + Opcode = RISCV::VL8RE8_V; } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC)) Opcode = RISCV::PseudoVRELOAD2_M1; else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC)) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -5007,16 +5007,6 @@ //===----------------------------------------------------------------------===// // Pseudo Instructions for CodeGen //===----------------------------------------------------------------------===// -let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { - def PseudoVMV1R_V : VPseudo, - VMVRSched<1>; - def PseudoVMV2R_V : VPseudo, - VMVRSched<2>; - def PseudoVMV4R_V : VPseudo, - VMVRSched<4>; - def PseudoVMV8R_V : VPseudo, - VMVRSched<8>; -} let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in { def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins), @@ -5028,20 +5018,6 @@ Uses = [VL] in def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>; -let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in { - def PseudoVSPILL_M1 : VPseudo; - def PseudoVSPILL_M2 : VPseudo; - def PseudoVSPILL_M4 : VPseudo; - def PseudoVSPILL_M8 : VPseudo; -} - -let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in { - def PseudoVRELOAD_M1 : VPseudo; - def PseudoVRELOAD_M2 : VPseudo; - def PseudoVRELOAD_M4 : VPseudo; - def PseudoVRELOAD_M8 : VPseudo; -} - foreach lmul = MxList in { foreach nf = NFSet.L in { defvar vreg = SegRegClass.RC; diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -64,7 +64,7 @@ ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfwsub.wv v16, v8, v24 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill @@ -75,9 +75,9 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb @@ -135,7 +135,7 @@ ; SUBREGLIVENESS-NEXT: add a1, a1, a2 ; SUBREGLIVENESS-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload ; SUBREGLIVENESS-NEXT: addi a1, sp, 16 -; SUBREGLIVENESS-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; SUBREGLIVENESS-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; SUBREGLIVENESS-NEXT: vfwsub.wv v8, v24, v20 ; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; SUBREGLIVENESS-NEXT: vssubu.vv v16, v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -1392,7 +1392,7 @@ ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vor.vv v16, v16, v0 ; RV32-NEXT: addi a5, sp, 16 -; RV32-NEXT: vl8re8.v v0, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v0 ; RV32-NEXT: addi a5, sp, 16 ; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill @@ -1408,7 +1408,7 @@ ; RV32-NEXT: vlse64.v v16, (a3), zero ; RV32-NEXT: vor.vv v8, v0, v8 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: vsrl.vi v24, v8, 4 ; RV32-NEXT: vand.vv v24, v24, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -528,7 +528,7 @@ ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vor.vv v16, v16, v0 ; RV32-NEXT: addi a4, sp, 16 -; RV32-NEXT: vl8re8.v v0, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v0 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill @@ -543,7 +543,7 @@ ; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll @@ -1081,10 +1081,10 @@ ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -1104,17 +1104,17 @@ ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vor.vv v8, v8, v24, v0.t ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 @@ -1151,7 +1151,7 @@ ; RV64-NEXT: vsll.vx v16, v16, a4, v0.t ; RV64-NEXT: vor.vv v16, v24, v16, v0.t ; RV64-NEXT: addi a5, sp, 16 -; RV64-NEXT: vl8re8.v v24, (a5) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill @@ -1166,7 +1166,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16, v0.t ; RV64-NEXT: vor.vv v8, v8, v24, v0.t ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 @@ -1211,7 +1211,7 @@ ; RV32-NEXT: vsll.vi v24, v24, 8 ; RV32-NEXT: vor.vv v24, v0, v24 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v0, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v0, v24 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill @@ -1226,7 +1226,7 @@ ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 @@ -1321,10 +1321,10 @@ ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -1344,17 +1344,17 @@ ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vor.vv v8, v8, v24, v0.t ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 @@ -1391,7 +1391,7 @@ ; RV64-NEXT: vsll.vx v16, v16, a4, v0.t ; RV64-NEXT: vor.vv v16, v24, v16, v0.t ; RV64-NEXT: addi a5, sp, 16 -; RV64-NEXT: vl8re8.v v24, (a5) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill @@ -1406,7 +1406,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16, v0.t ; RV64-NEXT: vor.vv v8, v8, v24, v0.t ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 @@ -1451,7 +1451,7 @@ ; RV32-NEXT: vsll.vi v24, v24, 8 ; RV32-NEXT: vor.vv v24, v0, v24 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v0, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v0, v24 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill @@ -1466,7 +1466,7 @@ ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 @@ -1555,12 +1555,12 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -157,21 +157,21 @@ ; CHECK-NEXT: vs8r.v v0, (a7) ; CHECK-NEXT: add a6, a0, a6 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vs8r.v v8, (a6) ; CHECK-NEXT: add a5, a0, a5 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vs8r.v v8, (a5) ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vs8r.v v8, (a4) ; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: csrr a1, vlenb @@ -179,7 +179,7 @@ ; CHECK-NEXT: mul a1, a1, a2 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5 @@ -261,10 +261,10 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vadd.vv v8, v24, v8 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vadd.vv v8, v8, v24 ; CHECK-NEXT: vadd.vv v24, v0, v16 ; CHECK-NEXT: vadd.vx v16, v8, a4 @@ -411,14 +411,14 @@ ; RV32-NEXT: addi a2, a2, 128 ; RV32-NEXT: li a5, 42 ; RV32-NEXT: addi a3, sp, 128 -; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload ; RV32-NEXT: vs8r.v v8, (a1) ; RV32-NEXT: vmv8r.v v8, v0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 128 -; RV32-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: call ext3@plt ; RV32-NEXT: addi sp, s0, -144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload @@ -479,14 +479,14 @@ ; RV64-NEXT: addi a2, a2, 128 ; RV64-NEXT: li a5, 42 ; RV64-NEXT: addi a3, sp, 128 -; RV64-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vs8r.v v8, (a1) ; RV64-NEXT: vmv8r.v v8, v0 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 128 -; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: call ext3@plt ; RV64-NEXT: addi sp, s0, -144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -766,7 +766,7 @@ ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill @@ -788,7 +788,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir --- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir @@ -95,7 +95,7 @@ ; CHECK-NEXT: $x10 = ADD $x2, killed $x10 ; CHECK-NEXT: $x10 = ADDI killed $x10, 2047 ; CHECK-NEXT: $x10 = ADDI killed $x10, 161 - ; CHECK-NEXT: PseudoVSPILL_M1 killed renamable $v25, killed $x10 :: (store unknown-size into %stack.1, align 8) + ; CHECK-NEXT: VS1R_V killed renamable $v25, killed $x10 :: (store unknown-size into %stack.1, align 8) ; CHECK-NEXT: renamable $x1 = ADDI $x0, 255 ; CHECK-NEXT: renamable $x5 = nuw ADDI $x2, 384 ; CHECK-NEXT: renamable $x6 = ADDI $x2, 512 @@ -136,7 +136,7 @@ ; CHECK-NEXT: $x10 = ADD $x2, killed $x10 ; CHECK-NEXT: $x10 = ADDI killed $x10, 2047 ; CHECK-NEXT: $x10 = ADDI killed $x10, 161 - ; CHECK-NEXT: renamable $v0 = PseudoVRELOAD_M1 killed $x10 :: (load unknown-size from %stack.1, align 8) + ; CHECK-NEXT: renamable $v0 = VL1RE8_V killed $x10 :: (load unknown-size from %stack.1, align 8) ; CHECK-NEXT: $x10 = LD $x2, 8 :: (load (s64) from %stack.15) ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype @@ -173,7 +173,7 @@ dead renamable $x15 = PseudoVSETIVLI 1, 72, implicit-def $vl, implicit-def $vtype renamable $v25 = PseudoVMV_V_X_M1 killed renamable $x12, $noreg, 4, implicit $vl, implicit $vtype - PseudoVSPILL_M1 killed renamable $v25, %stack.1 :: (store unknown-size into %stack.1, align 8) + VS1R_V killed renamable $v25, %stack.1 :: (store unknown-size into %stack.1, align 8) renamable $x1 = ADDI $x0, 255 renamable $x5 = nuw ADDI %stack.0, 256 renamable $x6 = ADDI %stack.0, 384 @@ -204,7 +204,7 @@ renamable $x16 = SUB killed renamable $x13, renamable $x13 dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype renamable $x13 = nsw ADDI renamable $x16, -2 - renamable $v0 = PseudoVRELOAD_M1 %stack.1 :: (load unknown-size from %stack.1, align 8) + renamable $v0 = VL1RE8_V %stack.1 :: (load unknown-size from %stack.1, align 8) renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, 1, implicit $vl, implicit $vtype renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3, implicit $vl, implicit $vtype BLT killed renamable $x16, renamable $x27, %bb.2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll @@ -119,7 +119,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll @@ -78,7 +78,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma @@ -312,7 +312,7 @@ ; CHECK-NEXT: mul a5, a5, a6 ; CHECK-NEXT: add a5, sp, a5 ; CHECK-NEXT: addi a5, a5, 16 -; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: csrr a5, vlenb ; CHECK-NEXT: li a6, 48 @@ -396,12 +396,12 @@ ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vslideup.vi v16, v24, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a4, 48 @@ -414,12 +414,12 @@ ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vslideup.vi v16, v24, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a4, 40 @@ -428,7 +428,7 @@ ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vslideup.vi v8, v16, 16 ; CHECK-NEXT: addi a1, a7, -16 ; CHECK-NEXT: sltu a4, a7, a1 @@ -441,7 +441,7 @@ ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 @@ -458,14 +458,14 @@ ; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t ; CHECK-NEXT: vsetvli zero, a3, e32, m8, tu, ma ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vslideup.vi v24, v16, 16 ; CHECK-NEXT: vse32.v v24, (a0) ; CHECK-NEXT: addi a1, a0, 256 @@ -476,7 +476,7 @@ ; CHECK-NEXT: mul a2, a2, a3 ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vl8re8.v v8, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: addi a0, a0, 384 ; CHECK-NEXT: csrr a1, vlenb @@ -484,7 +484,7 @@ ; CHECK-NEXT: mul a1, a1, a2 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 56 @@ -526,7 +526,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll @@ -951,25 +951,25 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a5, 24 ; RV32-NEXT: mul a0, a0, a5 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v24, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v16, v24, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -992,21 +992,21 @@ ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v24, v0.t ; RV32-NEXT: vsll.vi v8, v8, 8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v24, v8, v0.t ; RV32-NEXT: vor.vv v8, v16, v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 @@ -1042,7 +1042,7 @@ ; RV64-NEXT: vsll.vx v16, v16, a4, v0.t ; RV64-NEXT: vor.vv v16, v24, v16, v0.t ; RV64-NEXT: addi a5, sp, 16 -; RV64-NEXT: vl8re8.v v24, (a5) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill @@ -1057,7 +1057,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16, v0.t ; RV64-NEXT: vor.vv v8, v8, v24, v0.t ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 @@ -1105,7 +1105,7 @@ ; RV32-NEXT: vand.vx v0, v0, a0 ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: addi a4, sp, 16 -; RV32-NEXT: vl8re8.v v0, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill @@ -1120,7 +1120,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 @@ -1221,25 +1221,25 @@ ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a5, 24 ; RV32-NEXT: mul a0, a0, a5 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v24, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v16, v24, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -1262,21 +1262,21 @@ ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v24, v0.t ; RV32-NEXT: vsll.vi v8, v8, 8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v24, v8, v0.t ; RV32-NEXT: vor.vv v8, v16, v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 @@ -1312,7 +1312,7 @@ ; RV64-NEXT: vsll.vx v16, v16, a4, v0.t ; RV64-NEXT: vor.vv v16, v24, v16, v0.t ; RV64-NEXT: addi a5, sp, 16 -; RV64-NEXT: vl8re8.v v24, (a5) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: addi a5, sp, 16 ; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill @@ -1327,7 +1327,7 @@ ; RV64-NEXT: vor.vv v8, v8, v16, v0.t ; RV64-NEXT: vor.vv v8, v8, v24, v0.t ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 @@ -1375,7 +1375,7 @@ ; RV32-NEXT: vand.vx v0, v0, a0 ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: addi a4, sp, 16 -; RV32-NEXT: vl8re8.v v0, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v24, v24, v0 ; RV32-NEXT: addi a4, sp, 16 ; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill @@ -1390,7 +1390,7 @@ ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vor.vv v8, v24, v8 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 @@ -1476,12 +1476,12 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: vor.vv v16, v8, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -706,14 +706,14 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma @@ -767,7 +767,7 @@ ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 ; CHECK-NEXT: sltu a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir @@ -49,7 +49,7 @@ SD $x10, %stack.0, 0 SD $x10, %stack.2, 0 dead renamable $x15 = PseudoVSETIVLI 1, 72, implicit-def $vl, implicit-def $vtype - PseudoVSPILL_M1 killed renamable $v25, %stack.1 :: (store unknown-size into %stack.1, align 8) + VS1R_V killed renamable $v25, %stack.1 :: (store unknown-size into %stack.1, align 8) ; This is here just to make all the eligible registers live at this point. ; This way when we replace the frame index %stack.1 with its actual address ; we have to allocate a virtual register to compute it. diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -706,14 +706,14 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma @@ -767,7 +767,7 @@ ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 ; CHECK-NEXT: sltu a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll @@ -290,12 +290,12 @@ ; RV32-V128-NEXT: slli a0, a0, 3 ; RV32-V128-NEXT: add a0, sp, a0 ; RV32-V128-NEXT: addi a0, a0, 16 -; RV32-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-V128-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV32-V128-NEXT: vmv.v.v v24, v8 ; RV32-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-V128-NEXT: addi a0, sp, 16 -; RV32-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-V128-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vwaddu.vv v0, v8, v16 ; RV32-V128-NEXT: li a0, -1 ; RV32-V128-NEXT: vwmaccu.vx v0, a0, v16 @@ -341,12 +341,12 @@ ; RV64-V128-NEXT: slli a0, a0, 3 ; RV64-V128-NEXT: add a0, sp, a0 ; RV64-V128-NEXT: addi a0, a0, 16 -; RV64-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV64-V128-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV64-V128-NEXT: vmv.v.v v24, v8 ; RV64-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-V128-NEXT: addi a0, sp, 16 -; RV64-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-V128-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vwaddu.vv v0, v8, v16 ; RV64-V128-NEXT: li a0, -1 ; RV64-V128-NEXT: vwmaccu.vx v0, a0, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll @@ -396,12 +396,12 @@ ; RV32-V128-NEXT: slli a0, a0, 3 ; RV32-V128-NEXT: add a0, sp, a0 ; RV32-V128-NEXT: addi a0, a0, 16 -; RV32-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-V128-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV32-V128-NEXT: vmv.v.v v24, v8 ; RV32-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-V128-NEXT: addi a0, sp, 16 -; RV32-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-V128-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vwaddu.vv v0, v8, v16 ; RV32-V128-NEXT: li a0, -1 ; RV32-V128-NEXT: vwmaccu.vx v0, a0, v16 @@ -447,12 +447,12 @@ ; RV64-V128-NEXT: slli a0, a0, 3 ; RV64-V128-NEXT: add a0, sp, a0 ; RV64-V128-NEXT: addi a0, a0, 16 -; RV64-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV64-V128-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV64-V128-NEXT: vmv.v.v v24, v8 ; RV64-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-V128-NEXT: addi a0, sp, 16 -; RV64-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-V128-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vwaddu.vv v0, v8, v16 ; RV64-V128-NEXT: li a0, -1 ; RV64-V128-NEXT: vwmaccu.vx v0, a0, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll @@ -387,13 +387,13 @@ ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vmfeq.vf v8, v16, ft0 ; RV32-NEXT: vse64.v v24, (a1), v0.t ; RV32-NEXT: addi a0, a1, 128 ; RV32-NEXT: vmv1r.v v0, v8 ; RV32-NEXT: addi a1, sp, 16 -; RV32-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vse64.v v8, (a0), v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -427,13 +427,13 @@ ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vmfeq.vf v8, v16, ft0 ; RV64-NEXT: vse64.v v24, (a1), v0.t ; RV64-NEXT: addi a0, a1, 128 ; RV64-NEXT: vmv1r.v v0, v8 ; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vse64.v v8, (a0), v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 @@ -495,13 +495,13 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vf v8, v16, ft0 ; CHECK-NEXT: vse32.v v24, (a1), v0.t ; CHECK-NEXT: addi a0, a1, 128 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -544,13 +544,13 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vf v8, v16, ft0 ; CHECK-NEXT: vse16.v v24, (a1), v0.t ; CHECK-NEXT: addi a0, a1, 128 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll @@ -426,13 +426,13 @@ ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vmseq.vv v0, v16, v8 ; RV32-NEXT: addi a0, a1, 128 ; RV32-NEXT: vse64.v v24, (a0), v0.t ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vse64.v v8, (a1), v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -465,13 +465,13 @@ ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vmseq.vi v8, v16, 0 ; RV64-NEXT: vse64.v v24, (a1), v0.t ; RV64-NEXT: addi a0, a1, 128 ; RV64-NEXT: vmv1r.v v0, v8 ; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vse64.v v8, (a0), v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 @@ -549,13 +549,13 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vi v8, v16, 0 ; CHECK-NEXT: vse32.v v24, (a1), v0.t ; CHECK-NEXT: addi a0, a1, 128 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -615,13 +615,13 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vi v8, v16, 0 ; CHECK-NEXT: vse16.v v24, (a1), v0.t ; CHECK-NEXT: addi a0, a1, 128 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -663,13 +663,13 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vi v8, v16, 0 ; CHECK-NEXT: vse8.v v24, (a1), v0.t ; CHECK-NEXT: addi a0, a1, 128 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -1620,12 +1620,12 @@ ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwadd.vv v0, v24, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -1633,14 +1633,14 @@ ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwadd.vv v0, v8, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vadd.vv v8, v0, v8 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 @@ -1689,12 +1689,12 @@ ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwadd.vv v0, v24, v8 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 @@ -1702,14 +1702,14 @@ ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwadd.vv v0, v8, v16 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vv v8, v0, v8 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredsum.vs v8, v8, v16 @@ -1761,12 +1761,12 @@ ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwaddu.vv v0, v24, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -1774,14 +1774,14 @@ ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV32-NEXT: addi a0, sp, 16 -; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwaddu.vv v0, v8, v16 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vadd.vv v8, v0, v8 ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 @@ -1830,12 +1830,12 @@ ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwaddu.vv v0, v24, v8 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 @@ -1843,14 +1843,14 @@ ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwaddu.vv v0, v8, v16 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vadd.vv v8, v0, v8 ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredsum.vs v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -639,7 +639,7 @@ ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 ; CHECK-NEXT: sltu a0, a0, a1 @@ -698,7 +698,7 @@ ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 ; CHECK-NEXT: sltu a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -706,14 +706,14 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma @@ -767,7 +767,7 @@ ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 ; CHECK-NEXT: sltu a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -706,14 +706,14 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma @@ -767,7 +767,7 @@ ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 ; CHECK-NEXT: sltu a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -706,14 +706,14 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfabs.v v24, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmflt.vf v1, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma @@ -767,7 +767,7 @@ ; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t ; CHECK-NEXT: addi a1, a0, -16 ; CHECK-NEXT: sltu a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll @@ -580,9 +580,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v16, v1, 8 @@ -1191,9 +1191,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v16, v1, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -664,9 +664,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vmv1r.v v8, v1 @@ -1354,9 +1354,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v16, v1, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -358,9 +358,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v8, v8, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll @@ -698,7 +698,7 @@ ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -716,20 +716,20 @@ ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add sp, sp, a0 @@ -779,7 +779,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v0, v8, v24 ; CHECK-NEXT: addi a0, a4, -16 ; CHECK-NEXT: sltu a1, a4, a0 @@ -790,9 +790,9 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v0 ; CHECK-NEXT: vmv.v.v v16, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll @@ -358,9 +358,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmax.vv v8, v8, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll @@ -358,9 +358,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmin.vv v8, v8, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll @@ -698,7 +698,7 @@ ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -716,20 +716,20 @@ ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add sp, sp, a0 @@ -779,7 +779,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v0, v8, v24 ; CHECK-NEXT: addi a0, a4, -16 ; CHECK-NEXT: sltu a1, a4, a0 @@ -790,9 +790,9 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v0 ; CHECK-NEXT: vmv.v.v v16, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll @@ -107,7 +107,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwadd.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -207,7 +207,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwadd.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll @@ -107,7 +107,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwmul.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -207,7 +207,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwmul.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll @@ -107,7 +107,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwsub.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -207,7 +207,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfwsub.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -1086,7 +1086,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 ; CHECK-NEXT: bltu a2, a0, .LBB79_2 ; CHECK-NEXT: # %bb.1: @@ -1098,7 +1098,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -1745,7 +1745,7 @@ ; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 @@ -1828,7 +1828,7 @@ ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 10 @@ -1914,7 +1914,7 @@ ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 10 @@ -2001,7 +2001,7 @@ ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -181,7 +181,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb @@ -228,7 +228,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v24, v24, v16, v0 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 @@ -236,9 +236,9 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb @@ -437,9 +437,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -605,9 +605,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll @@ -265,7 +265,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwadd.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -300,7 +300,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwadd.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -334,7 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwadd.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll @@ -265,7 +265,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwaddu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -300,7 +300,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwaddu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -334,7 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwaddu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -291,7 +291,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmul.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -328,7 +328,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmul.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -364,7 +364,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmul.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll @@ -283,7 +283,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v8, v24, v16 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulsu.vv v16, v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -320,7 +320,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v8, v24, v16 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulsu.vv v16, v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -356,7 +356,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v8, v24, v16 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulsu.vv v16, v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -267,7 +267,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -304,7 +304,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -340,7 +340,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwmulu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll @@ -265,7 +265,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsub.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -300,7 +300,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsub.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -334,7 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsub.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll @@ -265,7 +265,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsubu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -300,7 +300,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsubu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -334,7 +334,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vwsubu.vv v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -766,7 +766,7 @@ ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill @@ -788,7 +788,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir --- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir +++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir @@ -80,7 +80,7 @@ bb.0: liveins: $x1, $x5, $x6, $x7, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x28, $x29, $x30, $x31, $v25 - PseudoVSPILL_M1 killed renamable $v25, %stack.1 :: (store unknown-size into %stack.1, align 8) + VS1R_V killed renamable $v25, %stack.1 :: (store unknown-size into %stack.1, align 8) ; This is here just to make all the eligible registers live at this point. ; This way when we replace the frame index %stack.1 with its actual address ; we have to allocate two virtual registers to compute it. diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -1293,7 +1293,7 @@ ; RV64-NEXT: vslidedown.vx v0, v0, a1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vluxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, a2, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -1827,7 +1827,7 @@ ; RV64-NEXT: vslidedown.vx v0, v0, a0 ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll --- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll @@ -43,7 +43,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (s2) ; CHECK-NEXT: addi a0, s1, 160 -; CHECK-NEXT: vl2re8.v v10, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfadd.vv v8, v10, v8 ; CHECK-NEXT: vse32.v v8, (s2) ; CHECK-NEXT: addi sp, s0, -192 diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -700,7 +700,7 @@ ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -766,7 +766,7 @@ ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill @@ -788,7 +788,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -766,7 +766,7 @@ ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill @@ -788,7 +788,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -766,7 +766,7 @@ ; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill @@ -788,7 +788,7 @@ ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector.ll @@ -102,7 +102,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -120,7 +120,7 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 1 ; SPILL-O2-NEXT: add sp, sp, a0 @@ -145,7 +145,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl4re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 2 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -163,7 +163,7 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: vl4re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 2 ; SPILL-O2-NEXT: add sp, sp, a0 @@ -188,7 +188,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 3 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -206,7 +206,7 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O2-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 3 ; SPILL-O2-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -131,7 +131,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -189,7 +189,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl4re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 2 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -247,7 +247,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector.ll @@ -59,7 +59,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -77,7 +77,7 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 1 ; SPILL-O2-NEXT: add sp, sp, a0 @@ -102,7 +102,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl4re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 2 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -120,7 +120,7 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: vl4re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 2 ; SPILL-O2-NEXT: add sp, sp, a0 @@ -145,7 +145,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 3 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -163,7 +163,7 @@ ; SPILL-O2-NEXT: #APP ; SPILL-O2-NEXT: #NO_APP ; SPILL-O2-NEXT: addi a0, sp, 16 -; SPILL-O2-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O2-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: slli a0, a0, 3 ; SPILL-O2-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -131,7 +131,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -189,7 +189,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl4re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 2 ; SPILL-O0-NEXT: add sp, sp, a0 @@ -247,7 +247,7 @@ ; SPILL-O0-NEXT: #APP ; SPILL-O0-NEXT: #NO_APP ; SPILL-O0-NEXT: addi a0, sp, 16 -; SPILL-O0-NEXT: vl2re8.v v8, (a0) # Unknown-size Folded Reload +; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 ; SPILL-O0-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -1122,9 +1122,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: add a0, a1, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma @@ -2275,7 +2275,7 @@ ; CHECK-NEXT: slli a2, a2, 5 ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vl8re8.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v2, v16, v8, v0.t ; CHECK-NEXT: bltu a7, a3, .LBB171_4 ; CHECK-NEXT: # %bb.3: @@ -2312,12 +2312,12 @@ ; CHECK-NEXT: mul a0, a0, a2 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v18, v24, v8, v0.t ; CHECK-NEXT: add a0, a1, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma @@ -2337,9 +2337,9 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vl8re8.v v8, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t ; CHECK-NEXT: add a2, a4, a1 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma @@ -2354,13 +2354,13 @@ ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a2, 24 ; CHECK-NEXT: mul a0, a0, a2 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t ; CHECK-NEXT: slli a0, a1, 1 ; CHECK-NEXT: add a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -1195,9 +1195,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: vmv1r.v v8, v1 @@ -2419,9 +2419,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t ; CHECK-NEXT: add a0, a1, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/stack-coloring-scalablevec.mir b/llvm/test/CodeGen/RISCV/rvv/stack-coloring-scalablevec.mir --- a/llvm/test/CodeGen/RISCV/rvv/stack-coloring-scalablevec.mir +++ b/llvm/test/CodeGen/RISCV/rvv/stack-coloring-scalablevec.mir @@ -28,12 +28,12 @@ liveins: $v8, $v10, $x10, $x11 LIFETIME_START %stack.0 - PseudoVSPILL_M1 killed renamable $v8, %stack.0 :: (store 16 into %stack.0, align 16) - renamable $v8 = PseudoVRELOAD_M1 killed $x10 :: (load 16 from %stack.0, align 16) + VS1R_V killed renamable $v8, %stack.0 :: (store 16 into %stack.0, align 16) + renamable $v8 = VL1RE8_V killed $x10 :: (load 16 from %stack.0, align 16) LIFETIME_END %stack.0 LIFETIME_START %stack.1 - PseudoVSPILL_M2 killed renamable $v10m2, %stack.1 :: (store unknown-size into %stack.1, align 16) - renamable $v10m2 = PseudoVRELOAD_M2 killed $x11 :: (load unknown-size from %stack.1, align 16) + VS2R_V killed renamable $v10m2, %stack.1 :: (store unknown-size into %stack.1, align 16) + renamable $v10m2 = VL2RE8_V killed $x11 :: (load unknown-size from %stack.1, align 16) LIFETIME_END %stack.1 PseudoRET ... diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -771,7 +771,7 @@ ; CHECK-RV32-NEXT: add a1, a1, a3 ; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-RV32-NEXT: addi a0, sp, 16 -; CHECK-RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-RV32-NEXT: vsse64.v v8, (a1), a2, v0.t ; CHECK-RV32-NEXT: csrr a0, vlenb ; CHECK-RV32-NEXT: slli a0, a0, 3 @@ -832,7 +832,7 @@ ; CHECK-RV64-NEXT: add a1, a1, a3 ; CHECK-RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-RV64-NEXT: addi a0, sp, 16 -; CHECK-RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-RV64-NEXT: vsse64.v v8, (a1), a2, v0.t ; CHECK-RV64-NEXT: csrr a0, vlenb ; CHECK-RV64-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -1249,12 +1249,12 @@ ; CHECK-NEXT: mul a0, a0, a2 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a2, 24 @@ -1272,21 +1272,21 @@ ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 @@ -1336,7 +1336,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v16, v8, v24 ; CHECK-NEXT: bltu a4, a1, .LBB93_2 ; CHECK-NEXT: # %bb.1: @@ -1347,9 +1347,9 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v0, v24, v8 ; CHECK-NEXT: vmv.v.v v8, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -1249,12 +1249,12 @@ ; CHECK-NEXT: mul a0, a0, a2 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a2, 24 @@ -1272,21 +1272,21 @@ ; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 40 ; CHECK-NEXT: mul a0, a0, a1 @@ -1336,7 +1336,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v16, v8, v24 ; CHECK-NEXT: bltu a4, a1, .LBB93_2 ; CHECK-NEXT: # %bb.1: @@ -1347,9 +1347,9 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfmadd.vv v0, v24, v8 ; CHECK-NEXT: vmv.v.v v8, v0 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -341,7 +341,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -341,7 +341,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -113,7 +113,7 @@ ; CHECK-NEXT: and a2, a3, a2 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB7_2 ; CHECK-NEXT: # %bb.1: @@ -188,7 +188,7 @@ ; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t ; CHECK-NEXT: bltu a2, a3, .LBB8_4 ; CHECK-NEXT: # %bb.3: @@ -204,7 +204,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.f.w v12, v24, v0.t ; CHECK-NEXT: bltu a2, a1, .LBB8_6 ; CHECK-NEXT: # %bb.5: @@ -216,7 +216,7 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.f.w v8, v24, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -14,7 +14,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v28m2 + ; CHECK-NEXT: $v12m2 = VMV2R_V $v28m2 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $v12m2 = COPY $v28m2 @@ -65,7 +65,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = VL4RE32_V $x16 - ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = VL4RE32_V $x16 $v12m4 = COPY $v28m4 @@ -83,7 +83,7 @@ ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5 /* e32 */, implicit-def $vl - ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype $v4m4,$x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl @@ -106,7 +106,7 @@ ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x17, 73, implicit-def $vl, implicit-def $vtype @@ -156,7 +156,7 @@ ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype @@ -177,7 +177,7 @@ ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 killed $x17, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype ; CHECK-NEXT: early-clobber $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v28m2 + ; CHECK-NEXT: $v12m2 = VMV2R_V $v28m2 $x15 = PseudoVSETIVLI 4, 73, implicit-def $vl, implicit-def $vtype $v26m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype $v8m2 = PseudoVLE16_V_M2 killed $x17, $noreg, 4, implicit $vl, implicit $vtype @@ -198,7 +198,7 @@ ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 74 /* e16, m4, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + ; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 74, implicit-def $vl, implicit-def $vtype @@ -215,14 +215,14 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v26 = PseudoVMV1R_V killed $v8 + ; CHECK-NEXT: $v26 = VMV1R_V killed $v8 ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75 /* e16, m8, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8m8 = PseudoVRELOAD_M8 killed $x10 + ; CHECK-NEXT: $v8m8 = VL8RE8_V killed $x10 $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, implicit $vl, implicit $vtype $v26 = COPY killed renamable $v8 $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype - $v8m8 = PseudoVRELOAD_M8 killed $x10 + $v8m8 = VL8RE8_V killed $x10 ... --- name: copy_zvlsseg_reg @@ -236,7 +236,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v8 + ; CHECK-NEXT: $v10 = VMV1R_V $v8 $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $v10 = COPY $v8 @@ -271,7 +271,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12 = PseudoVMV1R_V $v28 + ; CHECK-NEXT: $v12 = VMV1R_V $v28 $x15 = PseudoVSETVLI $x14, 87, implicit-def $vl, implicit-def $vtype $v28 = PseudoVLE32_V_MF2 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $v12 = COPY $v28 @@ -290,14 +290,14 @@ ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 killed $x12, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15 - ; CHECK-NEXT: $v24 = PseudoVMV1R_V killed $v8 - ; CHECK-NEXT: $v25 = PseudoVMV1R_V killed $v9 - ; CHECK-NEXT: $v26 = PseudoVMV1R_V killed $v10 - ; CHECK-NEXT: $v27 = PseudoVMV1R_V killed $v11 - ; CHECK-NEXT: $v28 = PseudoVMV1R_V killed $v12 - ; CHECK-NEXT: $v29 = PseudoVMV1R_V killed $v13 - ; CHECK-NEXT: $v30 = PseudoVMV1R_V killed $v14 - ; CHECK-NEXT: $v31 = PseudoVMV1R_V killed $v15 + ; CHECK-NEXT: $v24 = VMV1R_V killed $v8 + ; CHECK-NEXT: $v25 = VMV1R_V killed $v9 + ; CHECK-NEXT: $v26 = VMV1R_V killed $v10 + ; CHECK-NEXT: $v27 = VMV1R_V killed $v11 + ; CHECK-NEXT: $v28 = VMV1R_V killed $v12 + ; CHECK-NEXT: $v29 = VMV1R_V killed $v13 + ; CHECK-NEXT: $v30 = VMV1R_V killed $v14 + ; CHECK-NEXT: $v31 = VMV1R_V killed $v15 $x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 killed $x12, $noreg, 5, implicit $vl, implicit $vtype $x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -400,7 +400,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -2083,7 +2083,7 @@ ; RV64-NEXT: vslidedown.vx v0, v0, a1 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 -; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 @@ -2161,9 +2161,9 @@ ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 @@ -2242,9 +2242,9 @@ ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 @@ -2324,9 +2324,9 @@ ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 -; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: addi a1, sp, 16 -; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -451,7 +451,7 @@ ; CHECK-NEXT: add a1, a1, a3 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vse64.v v8, (a1), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll @@ -413,7 +413,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v24, v16, 0 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vi v0, v16, 0 ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 @@ -422,7 +422,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -386,9 +386,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -443,9 +443,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -729,9 +729,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -333,7 +333,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -333,7 +333,7 @@ ; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v25 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: bltu a2, a3, .LBB17_4 ; CHECK-NEXT: # %bb.3: @@ -349,7 +349,7 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v12, v24, 0, v0.t ; CHECK-NEXT: bltu a2, a1, .LBB17_6 ; CHECK-NEXT: # %bb.5: @@ -361,7 +361,7 @@ ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -333,7 +333,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir --- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir @@ -171,7 +171,7 @@ ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: renamable $v8 = PseudoVMV_V_I_MF8 0, 2, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x10 = ADDI $x2, 32 - ; CHECK-NEXT: PseudoVSPILL_M1 killed renamable $v8, killed $x10 :: (store unknown-size into %stack.1, align 8) + ; CHECK-NEXT: VS1R_V killed renamable $v8, killed $x10 :: (store unknown-size into %stack.1, align 8) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1.while.cond: ; CHECK-NEXT: successors: %bb.2(0x30000000), %bb.1(0x50000000) @@ -186,7 +186,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $x10 = ADDI $x2, 32 - ; CHECK-NEXT: renamable $v8 = PseudoVRELOAD_M1 killed $x10 :: (load unknown-size from %stack.1, align 8) + ; CHECK-NEXT: renamable $v8 = VL1RE8_V killed $x10 :: (load unknown-size from %stack.1, align 8) ; CHECK-NEXT: PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3 /* e8 */, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) ; CHECK-NEXT: $x10 = COPY renamable $x9 ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10 @@ -201,7 +201,7 @@ SD killed renamable $x16, %fixed-stack.1, 0 :: (store (s64) into %fixed-stack.1, align 16) dead $x0 = PseudoVSETIVLI 2, 69, implicit-def $vl, implicit-def $vtype renamable $v8 = PseudoVMV_V_I_MF8 0, 2, 3, implicit $vl, implicit $vtype - PseudoVSPILL_M1 killed renamable $v8, %stack.1 :: (store unknown-size into %stack.1, align 8) + VS1R_V killed renamable $v8, %stack.1 :: (store unknown-size into %stack.1, align 8) bb.1.while.cond: successors: %bb.2(0x30000000), %bb.1(0x50000000) @@ -215,7 +215,7 @@ liveins: $x8, $x9 dead $x0 = PseudoVSETIVLI 2, 69, implicit-def $vl, implicit-def $vtype - renamable $v8 = PseudoVRELOAD_M1 %stack.1 :: (load unknown-size from %stack.1, align 8) + renamable $v8 = VL1RE8_V %stack.1 :: (load unknown-size from %stack.1, align 8) PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2 $x10 = COPY renamable $x9 diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv32.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv32.mir --- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv32.mir +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv32.mir @@ -69,7 +69,7 @@ $x25 = COPY $x10 SW renamable $x25, %stack.0, 0 :: (store (s32) into %stack.0) - PseudoVSPILL_M2 renamable $v30m2, %stack.1 :: (store unknown-size into %stack.1, align 8) + VS2R_V renamable $v30m2, %stack.1 :: (store unknown-size into %stack.1, align 8) PseudoRET ... @@ -88,7 +88,7 @@ $x25 = COPY $x10 SW renamable $x25, %stack.0, 0 :: (store (s32) into %stack.0) - PseudoVSPILL_M2 renamable $v30m2, %stack.1 :: (store unknown-size into %stack.1, align 8) + VS2R_V renamable $v30m2, %stack.1 :: (store unknown-size into %stack.1, align 8) PseudoRET ... diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv64.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv64.mir --- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv64.mir +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-slot-rv64.mir @@ -45,7 +45,7 @@ $x25 = COPY $x10 SD renamable $x25, %stack.0, 0 :: (store (s64) into %stack.0) - PseudoVSPILL_M2 renamable $v30m2, %stack.1 :: (store unknown-size into %stack.1, align 8) + VS2R_V renamable $v30m2, %stack.1 :: (store unknown-size into %stack.1, align 8) PseudoRET ... diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-copy.mir @@ -7,30 +7,30 @@ body: | bb.0: ; CHECK-LABEL: name: copy_zvlsseg_N2 - ; CHECK: $v2 = PseudoVMV1R_V $v4 - ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v4 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v4 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v4 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v0m2 = PseudoVMV2R_V $v4m2 - ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v4m2 - ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v4m2 - ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v4m2 - ; CHECK-NEXT: $v10m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v0m4 = PseudoVMV4R_V $v8m4 - ; CHECK-NEXT: $v4m4 = PseudoVMV4R_V $v12m4 - ; CHECK-NEXT: $v4m4 = PseudoVMV4R_V $v8m4 - ; CHECK-NEXT: $v8m4 = PseudoVMV4R_V $v12m4 - ; CHECK-NEXT: $v16m4 = PseudoVMV4R_V $v12m4 - ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v8m4 - ; CHECK-NEXT: $v16m4 = PseudoVMV4R_V $v8m4 - ; CHECK-NEXT: $v20m4 = PseudoVMV4R_V $v12m4 + ; CHECK: $v2 = VMV1R_V $v4 + ; CHECK-NEXT: $v3 = VMV1R_V $v5 + ; CHECK-NEXT: $v3 = VMV1R_V $v4 + ; CHECK-NEXT: $v4 = VMV1R_V $v5 + ; CHECK-NEXT: $v6 = VMV1R_V $v5 + ; CHECK-NEXT: $v5 = VMV1R_V $v4 + ; CHECK-NEXT: $v6 = VMV1R_V $v4 + ; CHECK-NEXT: $v7 = VMV1R_V $v5 + ; CHECK-NEXT: $v0m2 = VMV2R_V $v4m2 + ; CHECK-NEXT: $v2m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v2m2 = VMV2R_V $v4m2 + ; CHECK-NEXT: $v4m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v8m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v6m2 = VMV2R_V $v4m2 + ; CHECK-NEXT: $v8m2 = VMV2R_V $v4m2 + ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v0m4 = VMV4R_V $v8m4 + ; CHECK-NEXT: $v4m4 = VMV4R_V $v12m4 + ; CHECK-NEXT: $v4m4 = VMV4R_V $v8m4 + ; CHECK-NEXT: $v8m4 = VMV4R_V $v12m4 + ; CHECK-NEXT: $v16m4 = VMV4R_V $v12m4 + ; CHECK-NEXT: $v12m4 = VMV4R_V $v8m4 + ; CHECK-NEXT: $v16m4 = VMV4R_V $v8m4 + ; CHECK-NEXT: $v20m4 = VMV4R_V $v12m4 $v2_v3 = COPY $v4_v5 $v3_v4 = COPY $v4_v5 $v5_v6 = COPY $v4_v5 @@ -51,33 +51,33 @@ body: | bb.0: ; CHECK-LABEL: name: copy_zvlsseg_N3 - ; CHECK: $v2 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v6 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v7 - ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v6 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v7 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v6 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v7 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v7 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v6 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v5 - ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v6 - ; CHECK-NEXT: $v11 = PseudoVMV1R_V $v7 - ; CHECK-NEXT: $v0m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v8m2 - ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v10m2 - ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v8m2 - ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v10m2 - ; CHECK-NEXT: $v14m2 = PseudoVMV2R_V $v10m2 - ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v8m2 - ; CHECK-NEXT: $v10m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v6m2 - ; CHECK-NEXT: $v14m2 = PseudoVMV2R_V $v8m2 - ; CHECK-NEXT: $v16m2 = PseudoVMV2R_V $v10m2 + ; CHECK: $v2 = VMV1R_V $v5 + ; CHECK-NEXT: $v3 = VMV1R_V $v6 + ; CHECK-NEXT: $v4 = VMV1R_V $v7 + ; CHECK-NEXT: $v3 = VMV1R_V $v5 + ; CHECK-NEXT: $v4 = VMV1R_V $v6 + ; CHECK-NEXT: $v5 = VMV1R_V $v7 + ; CHECK-NEXT: $v4 = VMV1R_V $v5 + ; CHECK-NEXT: $v5 = VMV1R_V $v6 + ; CHECK-NEXT: $v6 = VMV1R_V $v7 + ; CHECK-NEXT: $v9 = VMV1R_V $v7 + ; CHECK-NEXT: $v8 = VMV1R_V $v6 + ; CHECK-NEXT: $v7 = VMV1R_V $v5 + ; CHECK-NEXT: $v9 = VMV1R_V $v5 + ; CHECK-NEXT: $v10 = VMV1R_V $v6 + ; CHECK-NEXT: $v11 = VMV1R_V $v7 + ; CHECK-NEXT: $v0m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v2m2 = VMV2R_V $v8m2 + ; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2 + ; CHECK-NEXT: $v2m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v4m2 = VMV2R_V $v8m2 + ; CHECK-NEXT: $v6m2 = VMV2R_V $v10m2 + ; CHECK-NEXT: $v14m2 = VMV2R_V $v10m2 + ; CHECK-NEXT: $v12m2 = VMV2R_V $v8m2 + ; CHECK-NEXT: $v10m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v12m2 = VMV2R_V $v6m2 + ; CHECK-NEXT: $v14m2 = VMV2R_V $v8m2 + ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2 $v2_v3_v4 = COPY $v5_v6_v7 $v3_v4_v5 = COPY $v5_v6_v7 $v4_v5_v6 = COPY $v5_v6_v7 @@ -94,38 +94,38 @@ body: | bb.0: ; CHECK-LABEL: name: copy_zvlsseg_N4 - ; CHECK: $v6 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v14 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v13 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v14 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v2m2 = PseudoVMV2R_V $v10m2 - ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v12m2 - ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v14m2 - ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v16m2 - ; CHECK-NEXT: $v4m2 = PseudoVMV2R_V $v10m2 - ; CHECK-NEXT: $v6m2 = PseudoVMV2R_V $v12m2 - ; CHECK-NEXT: $v8m2 = PseudoVMV2R_V $v14m2 - ; CHECK-NEXT: $v10m2 = PseudoVMV2R_V $v16m2 - ; CHECK-NEXT: $v22m2 = PseudoVMV2R_V $v16m2 - ; CHECK-NEXT: $v20m2 = PseudoVMV2R_V $v14m2 - ; CHECK-NEXT: $v18m2 = PseudoVMV2R_V $v12m2 - ; CHECK-NEXT: $v16m2 = PseudoVMV2R_V $v10m2 - ; CHECK-NEXT: $v18m2 = PseudoVMV2R_V $v10m2 - ; CHECK-NEXT: $v20m2 = PseudoVMV2R_V $v12m2 - ; CHECK-NEXT: $v22m2 = PseudoVMV2R_V $v14m2 - ; CHECK-NEXT: $v24m2 = PseudoVMV2R_V $v16m2 + ; CHECK: $v6 = VMV1R_V $v10 + ; CHECK-NEXT: $v7 = VMV1R_V $v11 + ; CHECK-NEXT: $v8 = VMV1R_V $v12 + ; CHECK-NEXT: $v9 = VMV1R_V $v13 + ; CHECK-NEXT: $v7 = VMV1R_V $v10 + ; CHECK-NEXT: $v8 = VMV1R_V $v11 + ; CHECK-NEXT: $v9 = VMV1R_V $v12 + ; CHECK-NEXT: $v10 = VMV1R_V $v13 + ; CHECK-NEXT: $v16 = VMV1R_V $v13 + ; CHECK-NEXT: $v15 = VMV1R_V $v12 + ; CHECK-NEXT: $v14 = VMV1R_V $v11 + ; CHECK-NEXT: $v13 = VMV1R_V $v10 + ; CHECK-NEXT: $v14 = VMV1R_V $v10 + ; CHECK-NEXT: $v15 = VMV1R_V $v11 + ; CHECK-NEXT: $v16 = VMV1R_V $v12 + ; CHECK-NEXT: $v17 = VMV1R_V $v13 + ; CHECK-NEXT: $v2m2 = VMV2R_V $v10m2 + ; CHECK-NEXT: $v4m2 = VMV2R_V $v12m2 + ; CHECK-NEXT: $v6m2 = VMV2R_V $v14m2 + ; CHECK-NEXT: $v8m2 = VMV2R_V $v16m2 + ; CHECK-NEXT: $v4m2 = VMV2R_V $v10m2 + ; CHECK-NEXT: $v6m2 = VMV2R_V $v12m2 + ; CHECK-NEXT: $v8m2 = VMV2R_V $v14m2 + ; CHECK-NEXT: $v10m2 = VMV2R_V $v16m2 + ; CHECK-NEXT: $v22m2 = VMV2R_V $v16m2 + ; CHECK-NEXT: $v20m2 = VMV2R_V $v14m2 + ; CHECK-NEXT: $v18m2 = VMV2R_V $v12m2 + ; CHECK-NEXT: $v16m2 = VMV2R_V $v10m2 + ; CHECK-NEXT: $v18m2 = VMV2R_V $v10m2 + ; CHECK-NEXT: $v20m2 = VMV2R_V $v12m2 + ; CHECK-NEXT: $v22m2 = VMV2R_V $v14m2 + ; CHECK-NEXT: $v24m2 = VMV2R_V $v16m2 $v6_v7_v8_v9 = COPY $v10_v11_v12_v13 $v7_v8_v9_v10 = COPY $v10_v11_v12_v13 $v13_v14_v15_v16 = COPY $v10_v11_v12_v13 @@ -141,26 +141,26 @@ body: | bb.0: ; CHECK-LABEL: name: copy_zvlsseg_N5 - ; CHECK: $v5 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v14 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v14 + ; CHECK: $v5 = VMV1R_V $v10 + ; CHECK-NEXT: $v6 = VMV1R_V $v11 + ; CHECK-NEXT: $v7 = VMV1R_V $v12 + ; CHECK-NEXT: $v8 = VMV1R_V $v13 + ; CHECK-NEXT: $v9 = VMV1R_V $v14 + ; CHECK-NEXT: $v6 = VMV1R_V $v10 + ; CHECK-NEXT: $v7 = VMV1R_V $v11 + ; CHECK-NEXT: $v8 = VMV1R_V $v12 + ; CHECK-NEXT: $v9 = VMV1R_V $v13 + ; CHECK-NEXT: $v10 = VMV1R_V $v14 + ; CHECK-NEXT: $v18 = VMV1R_V $v14 + ; CHECK-NEXT: $v17 = VMV1R_V $v13 + ; CHECK-NEXT: $v16 = VMV1R_V $v12 + ; CHECK-NEXT: $v15 = VMV1R_V $v11 + ; CHECK-NEXT: $v14 = VMV1R_V $v10 + ; CHECK-NEXT: $v15 = VMV1R_V $v10 + ; CHECK-NEXT: $v16 = VMV1R_V $v11 + ; CHECK-NEXT: $v17 = VMV1R_V $v12 + ; CHECK-NEXT: $v18 = VMV1R_V $v13 + ; CHECK-NEXT: $v19 = VMV1R_V $v14 $v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14 $v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14 $v14_v15_v16_v17_v18 = COPY $v10_v11_v12_v13_v14 @@ -171,30 +171,30 @@ body: | bb.0: ; CHECK-LABEL: name: copy_zvlsseg_N6 - ; CHECK: $v4 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v15 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v15 + ; CHECK: $v4 = VMV1R_V $v10 + ; CHECK-NEXT: $v5 = VMV1R_V $v11 + ; CHECK-NEXT: $v6 = VMV1R_V $v12 + ; CHECK-NEXT: $v7 = VMV1R_V $v13 + ; CHECK-NEXT: $v8 = VMV1R_V $v14 + ; CHECK-NEXT: $v9 = VMV1R_V $v15 + ; CHECK-NEXT: $v5 = VMV1R_V $v10 + ; CHECK-NEXT: $v6 = VMV1R_V $v11 + ; CHECK-NEXT: $v7 = VMV1R_V $v12 + ; CHECK-NEXT: $v8 = VMV1R_V $v13 + ; CHECK-NEXT: $v9 = VMV1R_V $v14 + ; CHECK-NEXT: $v10 = VMV1R_V $v15 + ; CHECK-NEXT: $v20 = VMV1R_V $v15 + ; CHECK-NEXT: $v19 = VMV1R_V $v14 + ; CHECK-NEXT: $v18 = VMV1R_V $v13 + ; CHECK-NEXT: $v17 = VMV1R_V $v12 + ; CHECK-NEXT: $v16 = VMV1R_V $v11 + ; CHECK-NEXT: $v15 = VMV1R_V $v10 + ; CHECK-NEXT: $v16 = VMV1R_V $v10 + ; CHECK-NEXT: $v17 = VMV1R_V $v11 + ; CHECK-NEXT: $v18 = VMV1R_V $v12 + ; CHECK-NEXT: $v19 = VMV1R_V $v13 + ; CHECK-NEXT: $v20 = VMV1R_V $v14 + ; CHECK-NEXT: $v21 = VMV1R_V $v15 $v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15 $v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15 $v15_v16_v17_v18_v19_v20 = COPY $v10_v11_v12_v13_v14_v15 @@ -205,34 +205,34 @@ body: | bb.0: ; CHECK-LABEL: name: copy_zvlsseg_N7 - ; CHECK: $v3 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v16 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v16 - ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v16 - ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v16 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v23 = PseudoVMV1R_V $v16 + ; CHECK: $v3 = VMV1R_V $v10 + ; CHECK-NEXT: $v4 = VMV1R_V $v11 + ; CHECK-NEXT: $v5 = VMV1R_V $v12 + ; CHECK-NEXT: $v6 = VMV1R_V $v13 + ; CHECK-NEXT: $v7 = VMV1R_V $v14 + ; CHECK-NEXT: $v8 = VMV1R_V $v15 + ; CHECK-NEXT: $v9 = VMV1R_V $v16 + ; CHECK-NEXT: $v4 = VMV1R_V $v10 + ; CHECK-NEXT: $v5 = VMV1R_V $v11 + ; CHECK-NEXT: $v6 = VMV1R_V $v12 + ; CHECK-NEXT: $v7 = VMV1R_V $v13 + ; CHECK-NEXT: $v8 = VMV1R_V $v14 + ; CHECK-NEXT: $v9 = VMV1R_V $v15 + ; CHECK-NEXT: $v10 = VMV1R_V $v16 + ; CHECK-NEXT: $v22 = VMV1R_V $v16 + ; CHECK-NEXT: $v21 = VMV1R_V $v15 + ; CHECK-NEXT: $v20 = VMV1R_V $v14 + ; CHECK-NEXT: $v19 = VMV1R_V $v13 + ; CHECK-NEXT: $v18 = VMV1R_V $v12 + ; CHECK-NEXT: $v17 = VMV1R_V $v11 + ; CHECK-NEXT: $v16 = VMV1R_V $v10 + ; CHECK-NEXT: $v17 = VMV1R_V $v10 + ; CHECK-NEXT: $v18 = VMV1R_V $v11 + ; CHECK-NEXT: $v19 = VMV1R_V $v12 + ; CHECK-NEXT: $v20 = VMV1R_V $v13 + ; CHECK-NEXT: $v21 = VMV1R_V $v14 + ; CHECK-NEXT: $v22 = VMV1R_V $v15 + ; CHECK-NEXT: $v23 = VMV1R_V $v16 $v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16 $v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16 $v16_v17_v18_v19_v20_v21_v22 = COPY $v10_v11_v12_v13_v14_v15_v16 @@ -243,38 +243,38 @@ body: | bb.0: ; CHECK-LABEL: name: copy_zvlsseg_N8 - ; CHECK: $v2 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v16 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v17 - ; CHECK-NEXT: $v3 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v4 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v5 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v6 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v7 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v8 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v9 = PseudoVMV1R_V $v16 - ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v17 - ; CHECK-NEXT: $v24 = PseudoVMV1R_V $v17 - ; CHECK-NEXT: $v23 = PseudoVMV1R_V $v16 - ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v17 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v18 = PseudoVMV1R_V $v10 - ; CHECK-NEXT: $v19 = PseudoVMV1R_V $v11 - ; CHECK-NEXT: $v20 = PseudoVMV1R_V $v12 - ; CHECK-NEXT: $v21 = PseudoVMV1R_V $v13 - ; CHECK-NEXT: $v22 = PseudoVMV1R_V $v14 - ; CHECK-NEXT: $v23 = PseudoVMV1R_V $v15 - ; CHECK-NEXT: $v24 = PseudoVMV1R_V $v16 - ; CHECK-NEXT: $v25 = PseudoVMV1R_V $v17 + ; CHECK: $v2 = VMV1R_V $v10 + ; CHECK-NEXT: $v3 = VMV1R_V $v11 + ; CHECK-NEXT: $v4 = VMV1R_V $v12 + ; CHECK-NEXT: $v5 = VMV1R_V $v13 + ; CHECK-NEXT: $v6 = VMV1R_V $v14 + ; CHECK-NEXT: $v7 = VMV1R_V $v15 + ; CHECK-NEXT: $v8 = VMV1R_V $v16 + ; CHECK-NEXT: $v9 = VMV1R_V $v17 + ; CHECK-NEXT: $v3 = VMV1R_V $v10 + ; CHECK-NEXT: $v4 = VMV1R_V $v11 + ; CHECK-NEXT: $v5 = VMV1R_V $v12 + ; CHECK-NEXT: $v6 = VMV1R_V $v13 + ; CHECK-NEXT: $v7 = VMV1R_V $v14 + ; CHECK-NEXT: $v8 = VMV1R_V $v15 + ; CHECK-NEXT: $v9 = VMV1R_V $v16 + ; CHECK-NEXT: $v10 = VMV1R_V $v17 + ; CHECK-NEXT: $v24 = VMV1R_V $v17 + ; CHECK-NEXT: $v23 = VMV1R_V $v16 + ; CHECK-NEXT: $v22 = VMV1R_V $v15 + ; CHECK-NEXT: $v21 = VMV1R_V $v14 + ; CHECK-NEXT: $v20 = VMV1R_V $v13 + ; CHECK-NEXT: $v19 = VMV1R_V $v12 + ; CHECK-NEXT: $v18 = VMV1R_V $v11 + ; CHECK-NEXT: $v17 = VMV1R_V $v10 + ; CHECK-NEXT: $v18 = VMV1R_V $v10 + ; CHECK-NEXT: $v19 = VMV1R_V $v11 + ; CHECK-NEXT: $v20 = VMV1R_V $v12 + ; CHECK-NEXT: $v21 = VMV1R_V $v13 + ; CHECK-NEXT: $v22 = VMV1R_V $v14 + ; CHECK-NEXT: $v23 = VMV1R_V $v15 + ; CHECK-NEXT: $v24 = VMV1R_V $v16 + ; CHECK-NEXT: $v25 = VMV1R_V $v17 $v2_v3_v4_v5_v6_v7_v8_v9 = COPY $v10_v11_v12_v13_v14_v15_v16_v17 $v3_v4_v5_v6_v7_v8_v9_v10 = COPY $v10_v11_v12_v13_v14_v15_v16_v17 $v17_v18_v19_v20_v21_v22_v23_v24 = COPY $v10_v11_v12_v13_v14_v15_v16_v17