diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -687,6 +687,7 @@ MachineFunction *MF = MI->getParent()->getParent(); const SIInstrInfo *TII = ST.getInstrInfo(); const MachineFrameInfo &MFI = MF->getFrameInfo(); + const SIMachineFunctionInfo *FuncInfo = MF->getInfo(); const MCInstrDesc &Desc = TII->get(LoadStoreOp); const DebugLoc &DL = MI->getDebugLoc(); @@ -725,22 +726,24 @@ SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false); if (!SOffset) { - if (!ScratchOffsetReg) { - report_fatal_error("could not scavenge SGPR to spill in entry function"); - } // There are no free SGPRs, and since we are in the process of spilling // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true // on SI/CI and on VI it is true until we implement spilling using scalar // stores), we have no way to free up an SGPR. Our solution here is to - // add the offset directly to the ScratchOffset register, and then - // subtract the offset after the spill to return ScratchOffset to it's - // original value. + // add the offset directly to the ScratchOffset or StackPtrOffset + // register, and then subtract the offset after the spill to return the + // register to it's original value. + if (!ScratchOffsetReg) + ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg(); SOffset = ScratchOffsetReg; ScratchOffsetRegDelta = Offset; } else { Scavenged = true; } + if (!SOffset) + report_fatal_error("could not scavenge SGPR to spill in entry function"); + if (ScratchOffsetReg == AMDGPU::NoRegister) { BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset) .addImm(Offset); @@ -811,8 +814,8 @@ if (ScratchOffsetRegDelta != 0) { // Subtract the offset we added to the ScratchOffset register. - BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffsetReg) - .addReg(ScratchOffsetReg) + BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset) + .addReg(SOffset) .addImm(ScratchOffsetRegDelta); } } diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll --- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -1,6 +1,6 @@ -; RUN: llc -march=amdgcn -mcpu=verde -enable-misched=0 -post-RA-scheduler=0 < %s | FileCheck %s -; RUN: llc -regalloc=basic -march=amdgcn -mcpu=tonga -enable-misched=0 -post-RA-scheduler=0 < %s | FileCheck %s - ; +; RUN: llc -march=amdgcn -mcpu=verde -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 < %s | FileCheck -check-prefixes=CHECK,GFX6 %s +; RUN: llc -regalloc=basic -march=amdgcn -mcpu=tonga -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 < %s | FileCheck -check-prefixes=CHECK,GFX7 %s +; ; There is something about Tonga that causes this test to spend a lot of time ; in the default register allocator. @@ -34,7 +34,57 @@ ret void } +; CHECK-LABEL: test_limited_sgpr +; GFX6: s_add_u32 s32, s32, 0x81400 +; GFX6: buffer_store_dword v{{[0-9]+}}, off, s[24:27], s32 +; GFX6: s_sub_u32 s32, s32, 0x81400 +; GFX6: NumSgprs: 35 +; GFX6: ScratchSize: 8336 +define amdgpu_kernel void @test_limited_sgpr(<64 x i32> addrspace(1)* %out, <64 x i32> addrspace(1)* %in) #0 { +entry: + %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) + %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) + +; allocate enough scratch to go beyond 2^12 addressing + %scratch = alloca <1280 x i32>, align 8, addrspace(5) + +; load VGPR data + %aptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %in, i32 %tid + %a = load <64 x i32>, <64 x i32> addrspace(1)* %aptr + +; make sure scratch is used + %x = extractelement <64 x i32> %a, i32 0 + %sptr0 = getelementptr <1280 x i32>, <1280 x i32> addrspace(5)* %scratch, i32 %x, i32 0 + store i32 1, i32 addrspace(5)* %sptr0 + + %wide.sgpr0 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0 + %wide.sgpr1 = call <16 x i32> asm sideeffect "; def $0", "=s" () #0 + %cmp = icmp eq i32 %x, 0 + br i1 %cmp, label %bb0, label %ret + +bb0: + call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr0) #0 + call void asm sideeffect "; use $0", "s"(<16 x i32> %wide.sgpr1) #0 + +; mark most VGPR registers as used to increase register pressure + call void asm sideeffect "", "~{v4},~{v8},~{v12},~{v16},~{v20},~{v24},~{v28},~{v32}" () + call void asm sideeffect "", "~{v36},~{v40},~{v44},~{v48},~{v52},~{v56},~{v60},~{v64}" () + call void asm sideeffect "", "~{v68},~{v72},~{v76},~{v80},~{v84},~{v88},~{v92},~{v96}" () + call void asm sideeffect "", "~{v100},~{v104},~{v108},~{v112},~{v116},~{v120},~{v124},~{v128}" () + call void asm sideeffect "", "~{v132},~{v136},~{v140},~{v144},~{v148},~{v152},~{v156},~{v160}" () + call void asm sideeffect "", "~{v164},~{v168},~{v172},~{v176},~{v180},~{v184},~{v188},~{v192}" () + call void asm sideeffect "", "~{v196},~{v200},~{v204},~{v208},~{v212},~{v216},~{v220},~{v224}" () + br label %ret + +ret: + %outptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %out, i32 %tid + store <64 x i32> %a, <64 x i32> addrspace(1)* %outptr + + ret void +} + declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1 declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #1 +attributes #0 = { "amdgpu-num-sgpr"="30" } attributes #1 = { nounwind readnone }