Index: llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -145,9 +145,20 @@ KernargSegmentPtr = true; if (ST.hasFlatAddressSpace() && isEntryFunction() && isAmdHsaOrMesa) { + auto hasNonSpillStackObjects = [&]() { + // Avoid expensive checking if there's no stack objects. + if (!HasStackObjects) + return false; + for (auto OI = FrameInfo.getObjectIndexBegin(), + OE = FrameInfo.getObjectIndexEnd(); OI != OE; ++OI) + if (!FrameInfo.isSpillSlotObjectIndex(OI)) + return true; + // All stack objects are spill slots. + return false; + }; // TODO: This could be refined a lot. The attribute is a poor way of // detecting calls that may require it before argument lowering. - if (HasStackObjects || F.hasFnAttribute("amdgpu-flat-scratch")) + if (hasNonSpillStackObjects() || F.hasFnAttribute("amdgpu-flat-scratch")) FlatScratchInit = true; } Index: llvm/trunk/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir +++ llvm/trunk/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir @@ -41,7 +41,7 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: $vgpr0, $sgpr4_sgpr5 + liveins: $vgpr0, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13 $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(4)* undef`) @@ -66,6 +66,7 @@ bb.2: successors: + liveins: $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13 $sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (load 8 from %stack.0, align 4, addrspace 5) $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc @@ -105,7 +106,7 @@ body: | bb.0: successors: %bb.1, %bb.2 - liveins: $vgpr0, $sgpr4_sgpr5 + liveins: $vgpr0, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13 $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(4)* undef`) @@ -130,6 +131,7 @@ bb.2: successors: + liveins: $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13 $sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (load 8 from %stack.0, align 4, addrspace 5) $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc