Index: lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.cpp +++ lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -584,7 +584,7 @@ // We don't have access to the register scavenger if this function is called // during PEI::scavengeFrameVirtualRegs(). if (RS) - SOffset = RS->FindUnusedReg(&AMDGPU::SGPR_32RegClass); + SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false); if (SOffset == AMDGPU::NoRegister) { // There are no free SGPRs, and since we are in the process of spilling Index: test/CodeGen/AMDGPU/pei-reg-scavenger-position.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/pei-reg-scavenger-position.mir @@ -0,0 +1,37 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck %s + +# The wrong form of scavengeRegister was used, so it wasn't accounting +# for the iterator passed to eliminateFrameIndex. It was instead using +# the current iterator in the scavenger, which was not yet set if the +# spill was the first instruction in the block. + +--- +name: scavenge_register_position +tracksRegLiveness: true + +# Force a frame larger than the immediate field with a large alignment. +stack: + - { id: 0, type: default, offset: 4096, size: 4, alignment: 8192 } + +machineFunctionInfo: + isEntryFunction: true +body: | + ; CHECK-LABEL: name: scavenge_register_position + ; CHECK: bb.0: + ; CHECK: successors: %bb.1(0x80000000) + ; CHECK: $sgpr6 = S_ADD_U32 $sgpr5, 524288, implicit-def $scc + ; CHECK: $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, killed $sgpr6, 0, 0, 0, 0, implicit $exec :: (load 4 from %stack.0, align 8192, addrspace 5) + ; CHECK: S_BRANCH %bb.1 + ; CHECK: bb.1: + ; CHECK: $sgpr6 = S_ADD_U32 $sgpr5, 524288, implicit-def $scc + ; CHECK: $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, killed $sgpr6, 0, 0, 0, 0, implicit $exec :: (load 4 from %stack.0, align 8192, addrspace 5) + ; CHECK: S_ENDPGM 0, implicit $vgpr0 + bb.0: + $vgpr0 = SI_SPILL_V32_RESTORE %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (load 4 from %stack.0, addrspace 5) + S_BRANCH %bb.1 + + bb.1: + $vgpr0 = SI_SPILL_V32_RESTORE %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (load 4 from %stack.0, addrspace 5) + S_ENDPGM 0, implicit $vgpr0 +...