diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -105,7 +105,7 @@ int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr, TargetStackID::SGPRSpill); - if (MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { + if (TRI->spillSGPRToVGPR() && MFI->allocateSGPRSpillToVGPR(MF, NewFI)) { // 3: There's no free lane to spill, and no free register to save FP/BP, // so we're forced to spill another VGPR to use for the spill. FrameIndex = NewFI; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -11487,8 +11487,9 @@ // Allocate a VGPR for future SGPR Spill if // "amdgpu-reserve-vgpr-for-sgpr-spill" option is used // FIXME: We won't need this hack if we split SGPR allocation from VGPR - if (VGPRReserveforSGPRSpill && !Info->VGPRReservedForSGPRSpill && - !Info->isEntryFunction() && MF.getFrameInfo().hasStackObjects()) + if (VGPRReserveforSGPRSpill && TRI->spillSGPRToVGPR() && + !Info->VGPRReservedForSGPRSpill && !Info->isEntryFunction() && + MF.getFrameInfo().hasStackObjects()) Info->reserveVGPRforSGPRSpills(MF); } diff --git a/llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll @@ -0,0 +1,31 @@ +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-spill-sgpr-to-vgpr=true < %s | FileCheck -check-prefixes=GCN,SPILL-TO-VGPR %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-spill-sgpr-to-vgpr=false < %s | FileCheck -check-prefixes=GCN,NO-SPILL-TO-VGPR %s + +; Check frame setup where SGPR spills to VGPRs are disabled or enabled. + +declare hidden void @external_void_func_void() #0 + +; GCN-LABEL: {{^}}callee_with_stack_and_call: +; SPILL-TO-VGPR: buffer_store_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SPILL-TO-VGPR: v_writelane_b32 v40, s33, 2 +; NO-SPILL-TO-VGPR: v_mov_b32_e32 v0, s33 +; NO-SPILL-TO-VGPR: buffer_store_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill + +; GCN: s_swappc_b64 s[30:31], s[4:5] + +; SPILL-TO-VGPR: v_readlane_b32 s4, v40, 0 +; SPILL-TO-VGPR: v_readlane_b32 s5, v40, 1 +; NO-SPILL-TO-VGPR: v_readlane_b32 s4, v1, 0 +; NO-SPILL-TO-VGPR: v_readlane_b32 s5, v1, 1 + +; SPILL-TO-VGPR: v_readlane_b32 s33, v40, 2 +; NO-SPILL-TO-VGPR: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; NO-SPILL-TO-VGPR: v_readfirstlane_b32 s33, v0 +define void @callee_with_stack_and_call() #0 { + %alloca = alloca i32, addrspace(5) + store volatile i32 0, i32 addrspace(5)* %alloca + call void @external_void_func_void() + ret void +} + +attributes #0 = { nounwind }