diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -95,6 +95,10 @@ cl::desc("Do not align and prefetch loops"), cl::init(false)); +static cl::opt VGPRReserveforSGPRSpill( + "amdgpu-reserve-vgpr-for-sgpr-spill", + cl::desc("Allocates one VGPR for future SGPR Spill"), cl::init(true)); + static bool hasFP32Denormals(const MachineFunction &MF) { const SIMachineFunctionInfo *Info = MF.getInfo(); return Info->getMode().allFP32Denormals(); @@ -10755,6 +10759,16 @@ } TargetLoweringBase::finalizeLowering(MF); + + // Allocate a VGPR for future SGPR Spill if + // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used + if (VGPRReserveforSGPRSpill) { + int FI = MF.getFrameInfo().CreateStackObject(4, 4, false, nullptr, + TargetStackID::SGPRSpill); + Info->ReservedVGPRforSGPRIndex = FI; + Info->allocateSGPRSpillToHighestVGPR(MF, FI); + MF.getFrameInfo().RemoveStackObject(FI); + } } void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp --- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp @@ -267,6 +267,17 @@ // // This operates under the assumption that only other SGPR spills are users // of the frame index. + + MCPhysReg LowestFreeVGPRReg = AMDGPU::NoRegister; + for (MCPhysReg Reg : AMDGPU::VGPR_32RegClass.getRegisters()) { + if (!MRI.isPhysRegUsed(Reg)) { + LowestFreeVGPRReg = Reg; + break; + } + } + + FuncInfo->setSGPRSpillVGPRs(LowestFreeVGPRReg, 0); + for (MachineBasicBlock &MBB : MF) { MachineBasicBlock::iterator Next; for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { @@ -304,6 +315,7 @@ for (MachineBasicBlock &MBB : MF) { for (auto SSpill : FuncInfo->getSGPRSpillVGPRs()) MBB.addLiveIn(SSpill.VGPR); + // MBB.addLiveIn(LowestFreeVGPRReg); for (MCPhysReg Reg : FuncInfo->getVGPRSpillAGPRs()) MBB.addLiveIn(Reg); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -485,6 +485,9 @@ Register SGPRForFPSaveRestoreCopy; Optional FramePointerSaveIndex; + Register VGPRReservedForSGPRSpill; + Optional ReservedVGPRforSGPRIndex; + public: SIMachineFunctionInfo(const MachineFunction &MF); @@ -500,6 +503,10 @@ return SpillVGPRs; } + void setSGPRSpillVGPRs(MCPhysReg LaneVGPR, int Index) { + SpillVGPRs[Index].VGPR = (Register)LaneVGPR; + } + ArrayRef getAGPRSpillVGPRs() const { return SpillAGPR; } @@ -517,6 +524,7 @@ bool haveFreeLanesForSGPRSpill(const MachineFunction &MF, unsigned NumLane) const; bool allocateSGPRSpillToVGPR(MachineFunction &MF, int FI); + bool allocateSGPRSpillToHighestVGPR(MachineFunction &MF, int FI); bool allocateVGPRSpillToAGPR(MachineFunction &MF, int FI, bool isAGPRtoVGPR); void removeDeadFrameIndices(MachineFrameInfo &MFI); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -342,6 +342,70 @@ return true; } +/// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI. +bool SIMachineFunctionInfo::allocateSGPRSpillToHighestVGPR(MachineFunction &MF, + int FI) { + std::vector &SpillLanes = SGPRToVGPRSpills[FI]; + + // This has already been allocated. + if (!SpillLanes.empty()) + return true; + + const GCNSubtarget &ST = MF.getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + MachineFrameInfo &FrameInfo = MF.getFrameInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned WaveSize = ST.getWavefrontSize(); + + unsigned Size = FrameInfo.getObjectSize(FI); + assert(Size >= 4 && Size <= 64 && "invalid sgpr spill size"); + assert(TRI->spillSGPRToVGPR() && "not spilling SGPRs to VGPRs"); + + int NumLanes = Size / 4; + + const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs(); + + // Make sure to handle the case where a wide SGPR spill may span between two + // VGPRs. + for (int I = 0; I < NumLanes; ++I, ++NumVGPRSpillLanes) { + Register LaneVGPR; + unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize); + + if (VGPRIndex == 0) { + auto temp = AMDGPU::VGPR_32RegClass.getRegisters().end(); + LaneVGPR = *(--temp); + + if (LaneVGPR == AMDGPU::NoRegister) { + // We have no VGPRs left for spilling SGPRs. Reset because we will not + // partially spill the SGPR to VGPRs. + SGPRToVGPRSpills.erase(FI); + NumVGPRSpillLanes -= I; + return false; + } + + Optional CSRSpillFI; + if ((FrameInfo.hasCalls() || !isEntryFunction()) && CSRegs && + isCalleeSavedReg(CSRegs, LaneVGPR)) { + CSRSpillFI = FrameInfo.CreateSpillStackObject(4, Align(4)); + } + + SpillVGPRs.push_back(SGPRSpillVGPRCSR(LaneVGPR, CSRSpillFI)); + SIMachineFunctionInfo *FuncInfo = MF.getInfo(); + FuncInfo->VGPRReservedForSGPRSpill = LaneVGPR; + + // Add this register as live-in to all blocks to avoid machine verifer + // complaining about use of an undefined physical register. + for (MachineBasicBlock &BB : MF) + BB.addLiveIn(LaneVGPR); + } else { + LaneVGPR = SpillVGPRs.back().VGPR; + } + + SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex)); + } + return true; +} + /// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI. /// Either AGPR is spilled to VGPR to vice versa. /// Returns true if a \p FI can be eliminated completely. diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -308,6 +308,9 @@ for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs()) reserveRegisterTuples(Reserved, Reg); + for (auto SSpill : MFI->getSGPRSpillVGPRs()) + reserveRegisterTuples(Reserved, SSpill.VGPR); + return Reserved; } diff --git a/llvm/test/CodeGen/AMDGPU/reserve-vgpr-for-sgpr-spill.ll b/llvm/test/CodeGen/AMDGPU/reserve-vgpr-for-sgpr-spill.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/reserve-vgpr-for-sgpr-spill.ll @@ -0,0 +1,20 @@ +; RUN: llc -mtriple amdgcn-amd-amdhsa -mcpu=gfx803 -O0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s + +declare i32 @child_func(i32 %0, [255 x i32] %b) #0 +; GCN-LABEL: {{^}}parent_func: +; CHECK: v_writelane_b32 v32, s34, 4 +; CHECK: v_writelane_b32 v32, s30, 2 +; CHECK: v_writelane_b32 v32, s31, 3 +; CHECK: v_readlane_b32 s6, v32, 2 +; CHECK: v_readlane_b32 s7, v32, 3 +; CHECK: v_readlane_b32 s34, v32, 4 +; GCN: ; NumVgprs: 256 +define fastcc i32 @parent_func(i32 %0, i32 %1, [255 x i32] %b) #1 { +entry: + %ret0 = tail call fastcc i32 @child_func(i32 %0, [255 x i32] %b) + %res0 = add i32 %ret0, %0 + ret i32 %res0 +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind } \ No newline at end of file