Index: lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -323,6 +323,8 @@ } } +// FIXME: Returns uniform if there's no source value information. This is +// probably wrong. static bool isInstrUniformNonExtLoadAlign4(const MachineInstr &MI) { if (!MI.hasOneMemOperand()) return false; @@ -1047,8 +1049,13 @@ SmallVector SrcRegs(OpdMapper.getVRegs(1)); // If the pointer is an SGPR, we have nothing to do. - if (SrcRegs.empty()) - return false; + if (SrcRegs.empty()) { + Register PtrReg = MI.getOperand(1).getReg(); + const RegisterBank *PtrBank = getRegBank(PtrReg, MRI, *TRI); + if (PtrBank == &AMDGPU::SGPRRegBank) + return false; + SrcRegs.push_back(PtrReg); + } assert(LoadSize % MaxNonSmrdLoadSize == 0); @@ -2025,7 +2032,7 @@ const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); - SmallVector OpdsMapping(MI.getNumOperands()); + SmallVector OpdsMapping(2); unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); LLT LoadTy = MRI.getType(MI.getOperand(0).getReg()); Register PtrReg = MI.getOperand(1).getReg(); @@ -2036,7 +2043,10 @@ const ValueMapping *ValMapping; const ValueMapping *PtrMapping; - if ((AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::REGION_ADDRESS && + const RegisterBank *PtrBank = getRegBank(PtrReg, MRI, *TRI); + + if (PtrBank == &AMDGPU::SGPRRegBank && + (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::REGION_ADDRESS && AS != AMDGPUAS::PRIVATE_ADDRESS) && isInstrUniformNonExtLoadAlign4(MI)) { // We have a uniform instruction so we want to use an SMRD load Index: test/CodeGen/AMDGPU/GlobalISel/legalize-extract.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-extract.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-extract.mir @@ -937,6 +937,7 @@ ... --- + name: extract_s16_v2s16_offset0 body: | bb.0: @@ -1113,3 +1114,18 @@ %2:_(s32) = G_ANYEXT %1 $vgpr0 = COPY %2 ... + +name: extract_s16_s64_offset18 + +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; CHECK-LABEL: name: extract_s16_s64_offset18 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[EXTRACT:%[0-9]+]]:_(s16) = G_EXTRACT [[COPY]](s64), 18 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[EXTRACT]](s16) + ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s16) = G_EXTRACT %0, 18 + %2:_(s32) = G_ANYEXT %1 + $vgpr0 = COPY %2 Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir @@ -70,6 +70,7 @@ define amdgpu_kernel void @load_constant_i32_uniform_align1() {ret void} define amdgpu_kernel void @load_private_uniform_sgpr_i32() {ret void} define amdgpu_kernel void @load_constant_v8i32_vgpr_crash() { ret void } + define amdgpu_kernel void @load_constant_v8i32_vgpr_crash_loop_phi() { ret void } declare i32 @llvm.amdgcn.workitem.id.x() #0 attributes #0 = { nounwind readnone } @@ -657,12 +658,43 @@ --- name: load_constant_v8i32_vgpr_crash legalized: true +tracksRegLiveness: true body: | bb.0: liveins: $vgpr0_vgpr1 - %0:_(p4) = COPY $vgpr0_vgpr1 - %1:_(<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4) + ; CHECK-LABEL: name: load_constant_v8i32_vgpr_crash + ; CHECK: %0:vgpr(p4) = COPY $vgpr0_vgpr1 + ; CHECK: vgpr(<4 x s32>) = G_LOAD %0(p4) + ; CHECK: vgpr(<4 x s32>) = G_LOAD + ; CHECK: G_CONCAT_VECTORS + %0:_(p4) = COPY $vgpr0_vgpr1 + %1:_(<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4) +... + +--- +name: load_constant_v8i32_vgpr_crash_loop_phi +legalized: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 + + ; CHECK-LABEL: name: load_constant_v8i32_vgpr_crash_loop_phi + ; CHECK: G_PHI + ; CHECK: vgpr(<4 x s32>) = G_LOAD + ; CHECK: vgpr(<4 x s32>) = G_LOAD + ; CHECK: G_CONCAT_VECTORS + + %0:_(p4) = COPY $sgpr0_sgpr1 + %1:_(p4) = COPY $sgpr2_sgpr3 + G_BR %bb.1 + + bb.1: + %2:_(p4) = G_PHI %0, %bb.0, %4, %bb.1 + %3:_(<8 x s32>) = G_LOAD %2 :: (load 32, addrspace 4) + %4:_(p4) = COPY %1 + G_BR %bb.1 ...