Index: lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.cpp +++ lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -234,6 +234,7 @@ bool IsLoad = TII->get(LoadStoreOp).mayLoad(); bool RanOutOfSGPRs = false; + bool Scavenged = false; unsigned SOffset = ScratchOffset; unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode()); @@ -244,6 +245,8 @@ if (SOffset == AMDGPU::NoRegister) { RanOutOfSGPRs = true; SOffset = AMDGPU::SGPR0; + } else { + Scavenged = true; } BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset) .addReg(ScratchOffset) @@ -259,10 +262,14 @@ getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) : Value; + unsigned SOffsetRegState = 0; + if (i + 1 == e && Scavenged) + SOffsetRegState |= RegState::Kill; + BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) .addReg(SubReg, getDefRegState(IsLoad)) .addReg(ScratchRsrcReg) - .addReg(SOffset) + .addReg(SOffset, SOffsetRegState) .addImm(Offset) .addImm(0) // glc .addImm(0) // slc Index: test/CodeGen/AMDGPU/spill-scavenge-offset.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -0,0 +1,32 @@ +; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck %s + +; When the offset of VGPR spills into scratch space gets too large, an additional SGPR +; is used to calculate the scratch load/store address. Make sure that this +; mechanism works even when many spills happen. + +; Just test that it compiles successfully. +; CHECK-LABEL: test +define void @test(<1280 x i32> addrspace(1)* %out, <1280 x i32> addrspace(1)* %in, + <96 x i32> addrspace(1)* %sdata_out, <96 x i32> %sdata_in) { +entry: + %tid = call i32 @llvm.SI.tid() nounwind readnone + + %aptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %in, i32 %tid + %a = load <1280 x i32>, <1280 x i32> addrspace(1)* %aptr + +; mark most VGPR registers as used to increase register pressure + call void asm sideeffect "", "~{VGPR4},~{VGPR8},~{VGPR12},~{VGPR16},~{VGPR20},~{VGPR24},~{VGPR28},~{VGPR32}" () + call void asm sideeffect "", "~{VGPR36},~{VGPR40},~{VGPR44},~{VGPR48},~{VGPR52},~{VGPR56},~{VGPR60},~{VGPR64}" () + call void asm sideeffect "", "~{VGPR68},~{VGPR72},~{VGPR76},~{VGPR80},~{VGPR84},~{VGPR88},~{VGPR92},~{VGPR96}" () + call void asm sideeffect "", "~{VGPR100},~{VGPR104},~{VGPR108},~{VGPR112},~{VGPR116},~{VGPR120},~{VGPR124},~{VGPR128}" () + call void asm sideeffect "", "~{VGPR132},~{VGPR136},~{VGPR140},~{VGPR144},~{VGPR148},~{VGPR152},~{VGPR156},~{VGPR160}" () + call void asm sideeffect "", "~{VGPR164},~{VGPR168},~{VGPR172},~{VGPR176},~{VGPR180},~{VGPR184},~{VGPR188},~{VGPR192}" () + call void asm sideeffect "", "~{VGPR196},~{VGPR200},~{VGPR204},~{VGPR208},~{VGPR212},~{VGPR216},~{VGPR220},~{VGPR224}" () + + %outptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %in, i32 %tid + store <1280 x i32> %a, <1280 x i32> addrspace(1)* %outptr + + ret void +} + +declare i32 @llvm.SI.tid() nounwind readnone