Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -100,6 +100,11 @@ cl::desc("Do not align and prefetch loops"), cl::init(false)); +static cl::opt VGPRReserveforSGPRSpill( + "amdgpu-reserve-vgpr-for-sgpr-spill", + cl::desc("Allocates one VGPR for future SGPR Spill"), + cl::init(false)); + static unsigned findFirstFreeSGPR(CCState &CCInfo) { unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { @@ -10745,6 +10750,7 @@ const GCNSubtarget &ST = MF.getSubtarget(); const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); + if (Info->isEntryFunction()) { // Callable functions have fixed registers used for stack access. reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); @@ -10790,6 +10796,15 @@ } TargetLoweringBase::finalizeLowering(MF); + + // Allocate a VGPR for future SGPR Spill if + // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used + if (VGPRReserveforSGPRSpill) { + int fi = MF.getFrameInfo().CreateStackObject(4,4,false, nullptr, TargetStackID::SGPRSpill); + Info->allocateSGPRSpillToVGPR(MF, fi); + MF.getFrameInfo().RemoveStackObject(fi); + } + } void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op,