diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -95,6 +95,10 @@ cl::desc("Do not align and prefetch loops"), cl::init(false)); +static cl::opt VGPRReserveforSGPRSpill( + "amdgpu-reserve-vgpr-for-sgpr-spill", + cl::desc("Allocates one VGPR for future SGPR Spill"), cl::init(true)); + static bool hasFP32Denormals(const MachineFunction &MF) { const SIMachineFunctionInfo *Info = MF.getInfo(); return Info->getMode().allFP32Denormals(); @@ -10853,6 +10857,16 @@ } TargetLoweringBase::finalizeLowering(MF); + + // Allocate a VGPR for future SGPR Spill if + // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used + if (VGPRReserveforSGPRSpill && !Info->VGPRReservedForSGPRSpill && + !Info->isEntryFunction()) { + int FI = MF.getFrameInfo().CreateStackObject(4, 4, false, nullptr, + TargetStackID::SGPRSpill); + Info->allocateSGPRSpillToVGPR(MF, FI, true); + MF.getFrameInfo().RemoveStackObject(FI); + } } void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp --- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp @@ -231,6 +231,54 @@ return false; } +static ArrayRef getAllVGPR32(const GCNSubtarget &ST, + const MachineFunction &MF) { + return makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), ST.getMaxNumVGPRs(MF)); +} + +// Find lowest available VGPR and use it as VGPR reserved for SGPR spills. +static bool lowerShiftReservedVGPR(MachineFunction &MF, + const GCNSubtarget &ST) { + MachineRegisterInfo &MRI = MF.getRegInfo(); + MachineFrameInfo &FrameInfo = MF.getFrameInfo(); + SIMachineFunctionInfo *FuncInfo = MF.getInfo(); + Register LowestAvailableVGPR = AMDGPU::NoRegister, ReservedVGPR; + ArrayRef AllVGPR32s = getAllVGPR32(ST, MF); + for (MCPhysReg Reg : AllVGPR32s) { + if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { + LowestAvailableVGPR = Reg; + break; + } + } + + if (LowestAvailableVGPR != AMDGPU::NoRegister) { + ReservedVGPR = FuncInfo->VGPRReservedForSGPRSpill; + const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); + int i = 0; + for (auto &Reg : FuncInfo->getSGPRSpillVGPRs()) { + if (Reg.VGPR == ReservedVGPR) { + for (MachineBasicBlock &MBB : MF) { + if (MBB.isLiveIn(ReservedVGPR)) { + MBB.removeLiveIn(ReservedVGPR); + } + MBB.addLiveIn(LowestAvailableVGPR); + MBB.sortUniqueLiveIns(); + } + Optional FI; + if (FuncInfo->isCalleeSavedReg(CSRegs, LowestAvailableVGPR)) + FI = FrameInfo.CreateSpillStackObject(4, Align(4)); + + FuncInfo->setSGPRSpillVGPRs(LowestAvailableVGPR, FI, i); + break; + } + ++i; + } + return true; + } + + return false; +} + bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) { const GCNSubtarget &ST = MF.getSubtarget(); TII = ST.getInstrInfo(); @@ -270,6 +318,9 @@ // // This operates under the assumption that only other SGPR spills are users // of the frame index. + + lowerShiftReservedVGPR(MF, ST); + for (MachineBasicBlock &MBB : MF) { MachineBasicBlock::iterator Next; for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { @@ -318,6 +369,8 @@ } MadeChange = true; + } else if (FuncInfo->VGPRReservedForSGPRSpill) { + FuncInfo->resetSGPRSpillVGPRs(FuncInfo->VGPRReservedForSGPRSpill, MF); } SaveBlocks.clear(); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -485,6 +485,9 @@ Register SGPRForFPSaveRestoreCopy; Optional FramePointerSaveIndex; + Register VGPRReservedForSGPRSpill; + bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg); + public: SIMachineFunctionInfo(const MachineFunction &MF); @@ -500,6 +503,13 @@ return SpillVGPRs; } + void setSGPRSpillVGPRs(Register NewVGPR, Optional newFI, int Index) { + SpillVGPRs[Index].VGPR = NewVGPR; + SpillVGPRs[Index].FI = newFI; + } + + bool resetSGPRSpillVGPRs(Register ReservedVGPR, MachineFunction &MF); + ArrayRef getAGPRSpillVGPRs() const { return SpillAGPR; } @@ -516,7 +526,8 @@ bool haveFreeLanesForSGPRSpill(const MachineFunction &MF, unsigned NumLane) const; - bool allocateSGPRSpillToVGPR(MachineFunction &MF, int FI); + bool allocateSGPRSpillToVGPR(MachineFunction &MF, int FI, + bool ReserveHighestVGPR = false); bool allocateVGPRSpillToAGPR(MachineFunction &MF, int FI, bool isAGPRtoVGPR); void removeDeadFrameIndices(MachineFrameInfo &MFI); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -261,7 +261,8 @@ return ArgInfo.ImplicitBufferPtr.getRegister(); } -static bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg) { +bool SIMachineFunctionInfo::isCalleeSavedReg(const MCPhysReg *CSRegs, + MCPhysReg Reg) { for (unsigned I = 0; CSRegs[I]; ++I) { if (CSRegs[I] == Reg) return true; @@ -281,9 +282,10 @@ return NumVGPRSpillLanes + NumNeed <= WaveSize * SpillVGPRs.size(); } -/// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI. -bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, - int FI) { +/// Reserve a slice of a lowest unused VGPR to support spilling for FrameIndex +/// \p FI. If \p ReserveHighestVGPR = true, reserve highest unused VGPR. +bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, int FI, + bool ReserveHighestVGPR) { std::vector &SpillLanes = SGPRToVGPRSpills[FI]; // This has already been allocated. @@ -295,6 +297,7 @@ MachineFrameInfo &FrameInfo = MF.getFrameInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned WaveSize = ST.getWavefrontSize(); + SIMachineFunctionInfo *FuncInfo = MF.getInfo(); unsigned Size = FrameInfo.getObjectSize(FI); assert(Size >= 4 && Size <= 64 && "invalid sgpr spill size"); @@ -311,7 +314,8 @@ unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize); if (VGPRIndex == 0) { - LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF); + LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF, + ReserveHighestVGPR); if (LaneVGPR == AMDGPU::NoRegister) { // We have no VGPRs left for spilling SGPRs. Reset because we will not // partially spill the SGPR to VGPRs. @@ -322,20 +326,22 @@ Optional CSRSpillFI; if ((FrameInfo.hasCalls() || !isEntryFunction()) && CSRegs && - isCalleeSavedReg(CSRegs, LaneVGPR)) { + isCalleeSavedReg(CSRegs, LaneVGPR) && !ReserveHighestVGPR) { CSRSpillFI = FrameInfo.CreateSpillStackObject(4, Align(4)); } + if (ReserveHighestVGPR) + FuncInfo->VGPRReservedForSGPRSpill = LaneVGPR; SpillVGPRs.push_back(SGPRSpillVGPRCSR(LaneVGPR, CSRSpillFI)); // Add this register as live-in to all blocks to avoid machine verifer // complaining about use of an undefined physical register. - for (MachineBasicBlock &BB : MF) - BB.addLiveIn(LaneVGPR); + if (!ReserveHighestVGPR) + for (MachineBasicBlock &BB : MF) + BB.addLiveIn(LaneVGPR); } else { LaneVGPR = SpillVGPRs.back().VGPR; } - SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex)); } @@ -533,3 +539,20 @@ WaveLimiter = YamlMFI.WaveLimiter; return false; } + +bool SIMachineFunctionInfo::resetSGPRSpillVGPRs(Register ReservedVGPR, + MachineFunction &MF) { + for (auto *i = SpillVGPRs.begin(); i < SpillVGPRs.end(); i++) { + if (i->VGPR == ReservedVGPR) { + SpillVGPRs.erase(i); + + for (MachineBasicBlock &MBB : MF) { + if (MBB.isLiveIn(ReservedVGPR)) + MBB.removeLiveIn(ReservedVGPR); + MBB.sortUniqueLiveIns(); + } + return true; + } + } + return false; +} diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h @@ -196,7 +196,8 @@ MCRegister findUnusedRegister(const MachineRegisterInfo &MRI, const TargetRegisterClass *RC, - const MachineFunction &MF) const; + const MachineFunction &MF, + bool ReserveHighestVGPR = false) const; const TargetRegisterClass *getRegClassForReg(const MachineRegisterInfo &MRI, Register Reg) const; diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -320,6 +320,9 @@ for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs()) reserveRegisterTuples(Reserved, Reg); + for (auto SSpill : MFI->getSGPRSpillVGPRs()) + reserveRegisterTuples(Reserved, SSpill.VGPR); + return Reserved; } @@ -1529,17 +1532,23 @@ return getCommonSubClass(DefRC, SrcRC) != nullptr; } -/// Returns a register that is not used at any point in the function. +/// Returns a lowest register that is not used at any point in the function. /// If all registers are used, then this function will return -// AMDGPU::NoRegister. -MCRegister -SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, - const TargetRegisterClass *RC, - const MachineFunction &MF) const { - - for (MCRegister Reg : *RC) - if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) - return Reg; +/// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return +/// highest unused register. +MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, + const TargetRegisterClass *RC, + const MachineFunction &MF, + bool ReserveHighestVGPR) const { + if (ReserveHighestVGPR) { + for (MCRegister Reg : reverse(*RC)) + if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) + return Reg; + } else { + for (MCRegister Reg : *RC) + if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) + return Reg; + } return MCRegister(); } diff --git a/llvm/test/CodeGen/AMDGPU/reserve-vgpr-for-sgpr-spill.ll b/llvm/test/CodeGen/AMDGPU/reserve-vgpr-for-sgpr-spill.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/reserve-vgpr-for-sgpr-spill.ll @@ -0,0 +1,20 @@ +; RUN: llc -mtriple amdgcn-amd-amdhsa -mcpu=gfx803 -O0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s + +declare i32 @child_func(i32 %0, [255 x i32] %b) #0 +; GCN-LABEL: {{^}}parent_func: +; CHECK: v_writelane_b32 v32, s34, 4 +; CHECK: v_writelane_b32 v32, s30, 2 +; CHECK: v_writelane_b32 v32, s31, 3 +; CHECK: v_readlane_b32 s6, v32, 2 +; CHECK: v_readlane_b32 s7, v32, 3 +; CHECK: v_readlane_b32 s34, v32, 4 +; GCN: ; NumVgprs: 256 +define fastcc i32 @parent_func(i32 %0, i32 %1, [255 x i32] %b) #1 { +entry: + %ret0 = tail call fastcc i32 @child_func(i32 %0, [255 x i32] %b) + %res0 = add i32 %ret0, %0 + ret i32 %res0 +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind } \ No newline at end of file