diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -687,6 +687,7 @@ MachineFunction *MF = MI->getParent()->getParent(); const SIInstrInfo *TII = ST.getInstrInfo(); const MachineFrameInfo &MFI = MF->getFrameInfo(); + const SIMachineFunctionInfo *FuncInfo = MF->getInfo(); const MCInstrDesc &Desc = TII->get(LoadStoreOp); const DebugLoc &DL = MI->getDebugLoc(); @@ -725,22 +726,26 @@ SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false); if (!SOffset) { - if (!ScratchOffsetReg) { - report_fatal_error("could not scavenge SGPR to spill in entry function"); - } // There are no free SGPRs, and since we are in the process of spilling // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true // on SI/CI and on VI it is true until we implement spilling using scalar // stores), we have no way to free up an SGPR. Our solution here is to - // add the offset directly to the ScratchOffset register, and then - // subtract the offset after the spill to return ScratchOffset to it's - // original value. - SOffset = ScratchOffsetReg; + // add the offset directly to the ScratchOffset or StackPtrOffset + // register, and then subtract the offset after the spill to return the + // register to it's original value. + if (ScratchOffsetReg) { + SOffset = ScratchOffsetReg; + } else { + SOffset = FuncInfo->getStackPtrOffsetReg(); + } ScratchOffsetRegDelta = Offset; } else { Scavenged = true; } + if (!SOffset) + report_fatal_error("could not scavenge SGPR to spill in entry function"); + if (ScratchOffsetReg == AMDGPU::NoRegister) { BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset) .addImm(Offset); @@ -811,8 +816,8 @@ if (ScratchOffsetRegDelta != 0) { // Subtract the offset we added to the ScratchOffset register. - BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffsetReg) - .addReg(ScratchOffsetReg) + BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset) + .addReg(SOffset) .addImm(ScratchOffsetRegDelta); } } diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll --- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -34,7 +34,33 @@ ret void } +; Just test that it compiles successfully. +; CHECK-LABEL: test_limited_sgpr +define amdgpu_kernel void @test_limited_sgpr(<1280 x i32> addrspace(1)* %out, <1280 x i32> addrspace(1)* %in) #0 { +entry: + %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) + %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) + + %aptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %in, i32 %tid + %a = load <1280 x i32>, <1280 x i32> addrspace(1)* %aptr + +; mark most VGPR registers as used to increase register pressure + call void asm sideeffect "", "~{v4},~{v8},~{v12},~{v16},~{v20},~{v24},~{v28},~{v32}" () + call void asm sideeffect "", "~{v36},~{v40},~{v44},~{v48},~{v52},~{v56},~{v60},~{v64}" () + call void asm sideeffect "", "~{v68},~{v72},~{v76},~{v80},~{v84},~{v88},~{v92},~{v96}" () + call void asm sideeffect "", "~{v100},~{v104},~{v108},~{v112},~{v116},~{v120},~{v124},~{v128}" () + call void asm sideeffect "", "~{v132},~{v136},~{v140},~{v144},~{v148},~{v152},~{v156},~{v160}" () + call void asm sideeffect "", "~{v164},~{v168},~{v172},~{v176},~{v180},~{v184},~{v188},~{v192}" () + call void asm sideeffect "", "~{v196},~{v200},~{v204},~{v208},~{v212},~{v216},~{v220},~{v224}" () + + %outptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %out, i32 %tid + store <1280 x i32> %a, <1280 x i32> addrspace(1)* %outptr + + ret void +} + declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1 declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #1 +attributes #0 = { "amdgpu-num-sgpr"="30" } attributes #1 = { nounwind readnone }