diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -50,8 +50,6 @@ bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); bool expandVMSET_VMCLR(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Opcode); - bool expandVSPILL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); - bool expandVRELOAD(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); }; char RISCVExpandPseudo::ID = 0; @@ -108,30 +106,6 @@ case RISCV::PseudoVMSET_M_B64: // vmset.m vd => vmxnor.mm vd, vd, vd return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM); - case RISCV::PseudoVSPILL2_M1: - case RISCV::PseudoVSPILL2_M2: - case RISCV::PseudoVSPILL2_M4: - case RISCV::PseudoVSPILL3_M1: - case RISCV::PseudoVSPILL3_M2: - case RISCV::PseudoVSPILL4_M1: - case RISCV::PseudoVSPILL4_M2: - case RISCV::PseudoVSPILL5_M1: - case RISCV::PseudoVSPILL6_M1: - case RISCV::PseudoVSPILL7_M1: - case RISCV::PseudoVSPILL8_M1: - return expandVSPILL(MBB, MBBI); - case RISCV::PseudoVRELOAD2_M1: - case RISCV::PseudoVRELOAD2_M2: - case RISCV::PseudoVRELOAD2_M4: - case RISCV::PseudoVRELOAD3_M1: - case RISCV::PseudoVRELOAD3_M2: - case RISCV::PseudoVRELOAD4_M1: - case RISCV::PseudoVRELOAD4_M2: - case RISCV::PseudoVRELOAD5_M1: - case RISCV::PseudoVRELOAD6_M1: - case RISCV::PseudoVRELOAD7_M1: - case RISCV::PseudoVRELOAD8_M1: - return expandVRELOAD(MBB, MBBI); } return false; @@ -234,101 +208,6 @@ return true; } -bool RISCVExpandPseudo::expandVSPILL(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI) { - const TargetRegisterInfo *TRI = - MBB.getParent()->getSubtarget().getRegisterInfo(); - DebugLoc DL = MBBI->getDebugLoc(); - Register SrcReg = MBBI->getOperand(0).getReg(); - Register Base = MBBI->getOperand(1).getReg(); - Register VL = MBBI->getOperand(2).getReg(); - auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(MBBI->getOpcode()); - if (!ZvlssegInfo) - return false; - unsigned NF = ZvlssegInfo->first; - unsigned LMUL = ZvlssegInfo->second; - assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); - unsigned Opcode = RISCV::VS1R_V; - unsigned SubRegIdx = RISCV::sub_vrm1_0; - static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, - "Unexpected subreg numbering"); - if (LMUL == 2) { - Opcode = RISCV::VS2R_V; - SubRegIdx = RISCV::sub_vrm2_0; - static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, - "Unexpected subreg numbering"); - } else if (LMUL == 4) { - Opcode = RISCV::VS4R_V; - SubRegIdx = RISCV::sub_vrm4_0; - static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, - "Unexpected subreg numbering"); - } else - assert(LMUL == 1 && "LMUL must be 1, 2, or 4."); - - for (unsigned I = 0; I < NF; ++I) { - // Adding implicit-use of super register to describe we are using part of - // super register, that prevents machine verifier complaining when part of - // subreg is undef, see comment in MachineVerifier::checkLiveness for more - // detail. - BuildMI(MBB, MBBI, DL, TII->get(Opcode)) - .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I)) - .addReg(Base) - .addMemOperand(*(MBBI->memoperands_begin())) - .addReg(SrcReg, RegState::Implicit); - if (I != NF - 1) - BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADD), Base) - .addReg(Base) - .addReg(VL); - } - MBBI->eraseFromParent(); - return true; -} - -bool RISCVExpandPseudo::expandVRELOAD(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI) { - const TargetRegisterInfo *TRI = - MBB.getParent()->getSubtarget().getRegisterInfo(); - DebugLoc DL = MBBI->getDebugLoc(); - Register DestReg = MBBI->getOperand(0).getReg(); - Register Base = MBBI->getOperand(1).getReg(); - Register VL = MBBI->getOperand(2).getReg(); - auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(MBBI->getOpcode()); - if (!ZvlssegInfo) - return false; - unsigned NF = ZvlssegInfo->first; - unsigned LMUL = ZvlssegInfo->second; - assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); - unsigned Opcode = RISCV::VL1RE8_V; - unsigned SubRegIdx = RISCV::sub_vrm1_0; - static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, - "Unexpected subreg numbering"); - if (LMUL == 2) { - Opcode = RISCV::VL2RE8_V; - SubRegIdx = RISCV::sub_vrm2_0; - static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, - "Unexpected subreg numbering"); - } else if (LMUL == 4) { - Opcode = RISCV::VL4RE8_V; - SubRegIdx = RISCV::sub_vrm4_0; - static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, - "Unexpected subreg numbering"); - } else - assert(LMUL == 1 && "LMUL must be 1, 2, or 4."); - - for (unsigned I = 0; I < NF; ++I) { - BuildMI(MBB, MBBI, DL, TII->get(Opcode), - TRI->getSubReg(DestReg, SubRegIdx + I)) - .addReg(Base) - .addMemOperand(*(MBBI->memoperands_begin())); - if (I != NF - 1) - BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADD), Base) - .addReg(Base) - .addReg(VL); - } - MBBI->eraseFromParent(); - return true; -} - class RISCVPreRAExpandPseudo : public MachineFunctionPass { public: const RISCVInstrInfo *TII; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -464,7 +464,6 @@ unsigned Opcode; bool IsScalableVector = true; - bool IsZvlsseg = true; if (RISCV::GPRRegClass.hasSubClassEq(RC)) { Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW : RISCV::SD; @@ -480,16 +479,12 @@ IsScalableVector = false; } else if (RISCV::VRRegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVSPILL_M1; - IsZvlsseg = false; } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVSPILL_M2; - IsZvlsseg = false; } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVSPILL_M4; - IsZvlsseg = false; } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVSPILL_M8; - IsZvlsseg = false; } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC)) Opcode = RISCV::PseudoVSPILL2_M1; else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC)) @@ -521,16 +516,10 @@ MemoryLocation::UnknownSize, MFI.getObjectAlign(FI)); MFI.setStackID(FI, TargetStackID::ScalableVector); - auto MIB = BuildMI(MBB, I, DL, get(Opcode)) - .addReg(SrcReg, getKillRegState(IsKill)) - .addFrameIndex(FI) - .addMemOperand(MMO); - if (IsZvlsseg) { - // For spilling/reloading Zvlsseg registers, append the dummy field for - // the scaled vector length. The argument will be used when expanding - // these pseudo instructions. - MIB.addReg(RISCV::X0); - } + BuildMI(MBB, I, DL, get(Opcode)) + .addReg(SrcReg, getKillRegState(IsKill)) + .addFrameIndex(FI) + .addMemOperand(MMO); } else { MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, @@ -558,7 +547,6 @@ unsigned Opcode; bool IsScalableVector = true; - bool IsZvlsseg = true; if (RISCV::GPRRegClass.hasSubClassEq(RC)) { Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW : RISCV::LD; @@ -574,16 +562,12 @@ IsScalableVector = false; } else if (RISCV::VRRegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVRELOAD_M1; - IsZvlsseg = false; } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVRELOAD_M2; - IsZvlsseg = false; } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVRELOAD_M4; - IsZvlsseg = false; } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) { Opcode = RISCV::PseudoVRELOAD_M8; - IsZvlsseg = false; } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC)) Opcode = RISCV::PseudoVRELOAD2_M1; else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC)) @@ -615,15 +599,9 @@ MemoryLocation::UnknownSize, MFI.getObjectAlign(FI)); MFI.setStackID(FI, TargetStackID::ScalableVector); - auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg) - .addFrameIndex(FI) - .addMemOperand(MMO); - if (IsZvlsseg) { - // For spilling/reloading Zvlsseg registers, append the dummy field for - // the scaled vector length. The argument will be used when expanding - // these pseudo instructions. - MIB.addReg(RISCV::X0); - } + BuildMI(MBB, I, DL, get(Opcode), DstReg) + .addFrameIndex(FI) + .addMemOperand(MMO); } else { MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -5072,12 +5072,12 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1, Size = !mul(4, !sub(!mul(nf, 2), 1)) in { def "PseudoVSPILL" # nf # "_" # lmul.MX : - Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2, GPR:$vlenb), []>; + Pseudo<(outs), (ins vreg:$rs1, GPR:$rs2), []>; } let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1, Size = !mul(4, !sub(!mul(nf, 2), 1)) in { def "PseudoVRELOAD" # nf # "_" # lmul.MX : - Pseudo<(outs vreg:$rs1), (ins GPR:$rs2, GPR:$vlenb), []>; + Pseudo<(outs vreg:$rs1), (ins GPR:$rs2), []>; } } } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -51,6 +51,9 @@ unsigned FIOperandNum, RegScavenger *RS = nullptr) const override; + void lowerVSPILL(MachineBasicBlock::iterator II) const; + void lowerVRELOAD(MachineBasicBlock::iterator II) const; + Register getFrameRegister(const MachineFunction &MF) const override; bool requiresRegisterScavenging(const MachineFunction &MF) const override { diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -247,6 +247,138 @@ .setMIFlag(Flag); } +// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by +// LMUL*VLENB bytes. +void RISCVRegisterInfo::lowerVSPILL(MachineBasicBlock::iterator II) const { + DebugLoc DL = II->getDebugLoc(); + MachineBasicBlock &MBB = *II->getParent(); + MachineFunction &MF = *MBB.getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + + auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode()); + unsigned NF = ZvlssegInfo->first; + unsigned LMUL = ZvlssegInfo->second; + assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); + unsigned Opcode, SubRegIdx; + switch (LMUL) { + default: + llvm_unreachable("LMUL must be 1, 2, or 4."); + case 1: + Opcode = RISCV::VS1R_V; + SubRegIdx = RISCV::sub_vrm1_0; + break; + case 2: + Opcode = RISCV::VS2R_V; + SubRegIdx = RISCV::sub_vrm2_0; + break; + case 4: + Opcode = RISCV::VS4R_V; + SubRegIdx = RISCV::sub_vrm4_0; + break; + } + static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, + "Unexpected subreg numbering"); + static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, + "Unexpected subreg numbering"); + static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, + "Unexpected subreg numbering"); + + Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass); + BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL); + uint32_t ShiftAmount = Log2_32(LMUL); + if (ShiftAmount != 0) + BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL) + .addReg(VL) + .addImm(ShiftAmount); + + Register SrcReg = II->getOperand(0).getReg(); + Register Base = II->getOperand(1).getReg(); + bool IsBaseKill = II->getOperand(1).isKill(); + Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass); + for (unsigned I = 0; I < NF; ++I) { + // Adding implicit-use of super register to describe we are using part of + // super register, that prevents machine verifier complaining when part of + // subreg is undef, see comment in MachineVerifier::checkLiveness for more + // detail. + BuildMI(MBB, II, DL, TII->get(Opcode)) + .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I)) + .addReg(Base, getKillRegState(I == NF - 1)) + .addMemOperand(*(II->memoperands_begin())) + .addReg(SrcReg, RegState::Implicit); + if (I != NF - 1) + BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase) + .addReg(Base, getKillRegState(I != 0 || IsBaseKill)) + .addReg(VL, getKillRegState(I == NF - 2)); + Base = NewBase; + } + II->eraseFromParent(); +} + +// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by +// LMUL*VLENB bytes. +void RISCVRegisterInfo::lowerVRELOAD(MachineBasicBlock::iterator II) const { + DebugLoc DL = II->getDebugLoc(); + MachineBasicBlock &MBB = *II->getParent(); + MachineFunction &MF = *MBB.getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + + auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode()); + unsigned NF = ZvlssegInfo->first; + unsigned LMUL = ZvlssegInfo->second; + assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations."); + unsigned Opcode, SubRegIdx; + switch (LMUL) { + default: + llvm_unreachable("LMUL must be 1, 2, or 4."); + case 1: + Opcode = RISCV::VL1RE8_V; + SubRegIdx = RISCV::sub_vrm1_0; + break; + case 2: + Opcode = RISCV::VL2RE8_V; + SubRegIdx = RISCV::sub_vrm2_0; + break; + case 4: + Opcode = RISCV::VL4RE8_V; + SubRegIdx = RISCV::sub_vrm4_0; + break; + } + static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, + "Unexpected subreg numbering"); + static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, + "Unexpected subreg numbering"); + static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, + "Unexpected subreg numbering"); + + Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass); + BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL); + uint32_t ShiftAmount = Log2_32(LMUL); + if (ShiftAmount != 0) + BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL) + .addReg(VL) + .addImm(ShiftAmount); + + Register DestReg = II->getOperand(0).getReg(); + Register Base = II->getOperand(1).getReg(); + bool IsBaseKill = II->getOperand(1).isKill(); + Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass); + for (unsigned I = 0; I < NF; ++I) { + BuildMI(MBB, II, DL, TII->get(Opcode), + TRI->getSubReg(DestReg, SubRegIdx + I)) + .addReg(Base, getKillRegState(I == NF - 1)) + .addMemOperand(*(II->memoperands_begin())); + if (I != NF - 1) + BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase) + .addReg(Base, getKillRegState(I != 0 || IsBaseKill)) + .addReg(VL, getKillRegState(I == NF - 2)); + Base = NewBase; + } + II->eraseFromParent(); +} bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, @@ -257,7 +389,6 @@ MachineFunction &MF = *MI.getParent()->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); const RISCVSubtarget &ST = MF.getSubtarget(); - const RISCVInstrInfo *TII = ST.getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); @@ -331,21 +462,39 @@ return true; } - auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(MI.getOpcode()); - if (ZvlssegInfo) { - MachineBasicBlock &MBB = *MI.getParent(); - Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass); - BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL); - uint32_t ShiftAmount = Log2_32(ZvlssegInfo->second); - if (ShiftAmount != 0) - BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL) - .addReg(VL) - .addImm(ShiftAmount); - // The last argument of pseudo spilling opcode for zvlsseg is the length of - // one element of zvlsseg types. For example, for vint32m2x2_t, it will be - // the length of vint32m2_t. - MI.getOperand(FIOperandNum + 1).ChangeToRegister(VL, /*isDef=*/false); + // Handle spill/fill of synthetic register classes for segment operations to + // ensure correctness in the edge case one gets spilled. There are many + // possible optimizations here, but given the extreme rarity of such spills, + // we prefer simplicity of implementation for now. + switch (MI.getOpcode()) { + case RISCV::PseudoVSPILL2_M1: + case RISCV::PseudoVSPILL2_M2: + case RISCV::PseudoVSPILL2_M4: + case RISCV::PseudoVSPILL3_M1: + case RISCV::PseudoVSPILL3_M2: + case RISCV::PseudoVSPILL4_M1: + case RISCV::PseudoVSPILL4_M2: + case RISCV::PseudoVSPILL5_M1: + case RISCV::PseudoVSPILL6_M1: + case RISCV::PseudoVSPILL7_M1: + case RISCV::PseudoVSPILL8_M1: + lowerVSPILL(II); + return true; + case RISCV::PseudoVRELOAD2_M1: + case RISCV::PseudoVRELOAD2_M2: + case RISCV::PseudoVRELOAD2_M4: + case RISCV::PseudoVRELOAD3_M1: + case RISCV::PseudoVRELOAD3_M2: + case RISCV::PseudoVRELOAD4_M1: + case RISCV::PseudoVRELOAD4_M2: + case RISCV::PseudoVRELOAD5_M1: + case RISCV::PseudoVRELOAD6_M1: + case RISCV::PseudoVRELOAD7_M1: + case RISCV::PseudoVRELOAD8_M1: + lowerVRELOAD(II); + return true; } + return false; } diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-subreg-range.mir b/llvm/test/CodeGen/RISCV/rvv/undef-subreg-range.mir deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-subreg-range.mir +++ /dev/null @@ -1,32 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc %s -O2 -mtriple riscv64 -riscv-enable-subreg-liveness \ -# RUN: -verify-machineinstrs -run-pass=riscv-expand-pseudo -o - 2>&1 \ -# RUN: | FileCheck %s ---- | - define void @foo() #0 { - entry: - ret void - } -... ---- -name: foo -alignment: 4 -tracksRegLiveness: true -fixedStack: [] -stack: - - { id: 0, name: '', type: spill-slot, offset: 0, size: 32, alignment: 8, - stack-id: scalable-vector, callee-saved-register: '', callee-saved-restored: true } -body: | - bb.0.entry: - liveins: $v8m2, $x10, $x11 - ; CHECK-LABEL: name: foo - ; CHECK: liveins: $v8m2, $x10, $x11 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: VS2R_V $v8m2, $x10, implicit $v8m2_v10m2 :: (store unknown-size into %stack.0, align 8) - ; CHECK-NEXT: $x10 = ADD $x10, $x11 - ; CHECK-NEXT: VS2R_V $v10m2, $x10, implicit $v8m2_v10m2 :: (store unknown-size into %stack.0, align 8) - ; CHECK-NEXT: PseudoRET - PseudoVSPILL2_M2 killed $v8m2_v10m2, killed $x10, killed $x11 :: (store unknown-size into %stack.0, align 8) - PseudoRET - -... diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -185,29 +185,6 @@ $v12m2 = COPY $v28m2 ... --- -name: copy_from_reload -tracksRegLiveness: true -body: | - bb.0: - liveins: $x14, $x16, $x17 - ; 73 = e16,m2 - ; CHECK-LABEL: name: copy_from_reload - ; CHECK: liveins: $x14, $x16, $x17 - ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v2m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x12 = PseudoReadVLENB - ; CHECK-NEXT: $x12 = SLLI $x12, 1 - ; CHECK-NEXT: $v2m2_v4m2 = PseudoVRELOAD2_M2 killed $x17, killed $x12 - ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v2m2 - $x15 = PseudoVSETVLI $x14, 73, implicit-def $vl, implicit-def $vtype - $v2m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype - $x12 = PseudoReadVLENB - $x12 = SLLI $x12, 1 - $v2m2_v4m2 = PseudoVRELOAD2_M2 killed $x17, killed $x12 - $v12m2 = COPY $v2m2 -... ---- name: copy_with_different_vlmax tracksRegLiveness: true body: | diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -31,10 +31,34 @@ ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 renamable $x10, $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x11 = ADDI $x2, 16 ; CHECK-NEXT: $x12 = PseudoReadVLENB - ; CHECK-NEXT: PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, killed $x11, killed $x12 + ; CHECK-NEXT: VS1R_V $v0, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: VS1R_V $v1, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: VS1R_V $v2, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: VS1R_V $v3, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: VS1R_V $v4, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: VS1R_V $v5, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12 + ; CHECK-NEXT: VS1R_V $v6, killed $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8) ; CHECK-NEXT: $x11 = ADDI $x2, 16 ; CHECK-NEXT: $x12 = PseudoReadVLENB - ; CHECK-NEXT: dead renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 killed $x11, killed $x12, implicit-def $v8 + ; CHECK-NEXT: $v7 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: $v8 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: $v9 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: $v10 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: $v11 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, $x12 + ; CHECK-NEXT: $v12 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8) + ; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12 + ; CHECK-NEXT: $v13 = VL1RE8_V killed $x11 :: (load unknown-size from %stack.0, align 8) ; CHECK-NEXT: VS1R_V killed $v8, killed renamable $x10 ; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB ; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3 @@ -44,8 +68,8 @@ %0:gpr = COPY $x10 %1:gprnox0 = COPY $x11 $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 %0, %1, 6 - PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0, $x0 - renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0, $x0 + PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0 :: (store unknown-size into %stack.0, align 8) + renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0 :: (load unknown-size from %stack.0, align 8) VS1R_V killed $v8, %0:gpr PseudoRET ...