diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -458,6 +458,9 @@ assert(ScratchWaveOffsetReg); if (MF.getFrameInfo().hasCalls()) { + // Note: Spilling code in SIRegisterInfo.cpp assumes that if !hasCalls() it + // can overwrite StackPtrOffsetReg - updates are required there if changes + // are made here to that assumption. Register SPReg = MFI->getStackPtrOffsetReg(); assert(SPReg != AMDGPU::SP_REG); BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg) diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -738,6 +738,7 @@ bool Scavenged = false; MCRegister SOffset = ScratchOffsetReg; + bool UninitStackPtrOffset = false; const unsigned EltSize = 4; const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg); @@ -776,8 +777,13 @@ // add the offset directly to the ScratchOffset or StackPtrOffset // register, and then subtract the offset after the spill to return the // register to it's original value. - if (!ScratchOffsetReg) + // In the case where StackPtrOffset is not initialized/otherwise used + // (hasCalls is false), we can just use the register directly with no + // adjustment required. + if (!ScratchOffsetReg) { ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg(); + UninitStackPtrOffset = !MFI.hasCalls(); + } SOffset = ScratchOffsetReg; ScratchOffsetRegDelta = Offset; } else { @@ -787,7 +793,7 @@ if (!SOffset) report_fatal_error("could not scavenge SGPR to spill in entry function"); - if (ScratchOffsetReg == AMDGPU::NoRegister) { + if (ScratchOffsetReg == AMDGPU::NoRegister || UninitStackPtrOffset) { BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset) .addImm(Offset); } else { @@ -855,7 +861,7 @@ MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState); } - if (ScratchOffsetRegDelta != 0) { + if (!UninitStackPtrOffset && ScratchOffsetRegDelta != 0) { // Subtract the offset we added to the ScratchOffset register. BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset) .addReg(SOffset) diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll --- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -35,9 +35,8 @@ } ; CHECK-LABEL: test_limited_sgpr -; GFX6: s_add_u32 s32, s32, 0x[[OFFSET:[0-9]+]] +; GFX6: s_mov_b32 s32, 0x{{[0-9]+}} ; GFX6-NEXT: buffer_load_dword v{{[0-9]+}}, off, s[{{[0-9:]+}}], s32 -; GFX6-NEXT: s_sub_u32 s32, s32, 0x[[OFFSET:[0-9]+]] ; GFX6: NumSgprs: 48 ; GFX6: ScratchSize: 8624 define amdgpu_kernel void @test_limited_sgpr(<64 x i32> addrspace(1)* %out, <64 x i32> addrspace(1)* %in) #0 { @@ -91,8 +90,69 @@ ret void } +; CHECK-LABEL: test_limited_sgpr_with_call +; GFX6: s_add_u32 s32, s32, 0x[[OFFSET:[0-9]+]] +; GFX6-NEXT: buffer_load_dword v{{[0-9]+}}, off, s[{{[0-9:]+}}], s32 +; GFX6-NEXT: s_sub_u32 s32, s32, 0x[[OFFSET]] +; GFX6: NumSgprs: 48 +; GFX6: ScratchSize: 24992 +define amdgpu_kernel void @test_limited_sgpr_with_call(<64 x i32> addrspace(1)* %out, i32 addrspace(1)* %call_out, <64 x i32> addrspace(1)* %in) #0 { +entry: + %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) + %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) + +; allocate enough scratch to go beyond 2^12 addressing + %scratch = alloca <1280 x i32>, align 8, addrspace(5) + +; load VGPR data + %aptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %in, i32 %tid + %a = load <64 x i32>, <64 x i32> addrspace(1)* %aptr + +; make sure scratch is used + %x = extractelement <64 x i32> %a, i32 0 + %sptr0 = getelementptr <1280 x i32>, <1280 x i32> addrspace(5)* %scratch, i32 %x, i32 0 + store i32 1, i32 addrspace(5)* %sptr0 + +; fill up SGPRs + %sgpr0 = call <8 x i32> asm sideeffect "; def $0", "=s" () + %sgpr1 = call <8 x i32> asm sideeffect "; def $0", "=s" () + %sgpr2 = call <8 x i32> asm sideeffect "; def $0", "=s" () + %sgpr3 = call <8 x i32> asm sideeffect "; def $0", "=s" () + %sgpr4 = call <4 x i32> asm sideeffect "; def $0", "=s" () + %sgpr5 = call <2 x i32> asm sideeffect "; def $0", "=s" () + %sgpr6 = call <2 x i32> asm sideeffect "; def $0", "=s" () + %sgpr7 = call i32 asm sideeffect "; def $0", "=s" () + + %cmp = icmp eq i32 %x, 0 + br i1 %cmp, label %bb0, label %ret + +bb0: +; create SGPR pressure + call void asm sideeffect "; use $0,$1,$2,$3,$4,$5,$6", "s,s,s,s,s,s,s,s"(<8 x i32> %sgpr0, <8 x i32> %sgpr1, <8 x i32> %sgpr2, <8 x i32> %sgpr3, <4 x i32> %sgpr4, <2 x i32> %sgpr5, <2 x i32> %sgpr6, i32 %sgpr7) + +; mark most VGPR registers as used to increase register pressure + call void asm sideeffect "", "~{v4},~{v8},~{v12},~{v16},~{v20},~{v24},~{v28},~{v32}" () + call void asm sideeffect "", "~{v36},~{v40},~{v44},~{v48},~{v52},~{v56},~{v60},~{v64}" () + call void asm sideeffect "", "~{v68},~{v72},~{v76},~{v80},~{v84},~{v88},~{v92},~{v96}" () + call void asm sideeffect "", "~{v100},~{v104},~{v108},~{v112},~{v116},~{v120},~{v124},~{v128}" () + call void asm sideeffect "", "~{v132},~{v136},~{v140},~{v144},~{v148},~{v152},~{v156},~{v160}" () + call void asm sideeffect "", "~{v164},~{v168},~{v172},~{v176},~{v180},~{v184},~{v188},~{v192}" () + call void asm sideeffect "", "~{v196},~{v200},~{v204},~{v208},~{v212},~{v216},~{v220},~{v224}" () + br label %ret + +ret: + %outptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %out, i32 %tid + store <64 x i32> %a, <64 x i32> addrspace(1)* %outptr + + %call_tmp = call i32 @a_func(i32 1) + store volatile i32 %call_tmp, i32 addrspace(1)* %call_out + + ret void +} + declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1 declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #1 +declare i32 @a_func(i32) #1 attributes #0 = { "amdgpu-waves-per-eu"="10,10" } attributes #1 = { nounwind readnone }