diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp --- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -845,6 +845,18 @@ TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg) .add(MI.getOperand(1)); MI.getOperand(1).setReg(TmpReg); + } else { + MachineInstr *DefMI = MRI->getVRegDef(SrcReg); + if (DefMI && DefMI->isMoveImmediate()) { + MachineOperand SrcConst = DefMI->getOperand(1); + assert(SrcConst.isImm() && "Operand should be immediate"); + Register TmpReg = + MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), + TII->get(AMDGPU::S_MOV_B32), TmpReg) + .addImm(SrcConst.getImm()); + MI.getOperand(1).setReg(TmpReg); + } } return true; } diff --git a/llvm/test/CodeGen/AMDGPU/unknown_call.ll b/llvm/test/CodeGen/AMDGPU/unknown_call.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/unknown_call.ll @@ -0,0 +1,61 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O0 -mcpu=gfx1030 < %s | FileCheck %s + +target triple = "amdgcn-amd-amdhsa" + +; Unknown functions are conservatively passed all implicit parameters +declare void @unknown_call() +; Use the same constant as a sgpr parameter (for the kernel id) and for a vector operation +define protected amdgpu_kernel void @kern(ptr %addr) !llvm.amdgcn.lds.kernel.id !0 { +; CHECK-LABEL: kern: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_add_u32 s12, s12, s17 +; CHECK-NEXT: s_addc_u32 s13, s13, 0 +; CHECK-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12 +; CHECK-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 +; CHECK-NEXT: s_add_u32 s0, s0, s17 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_writelane_b32 v40, s16, 0 +; CHECK-NEXT: s_mov_b32 s13, s15 +; CHECK-NEXT: s_mov_b32 s12, s14 +; CHECK-NEXT: v_readlane_b32 s14, v40, 0 +; CHECK-NEXT: s_mov_b64 s[16:17], s[8:9] +; CHECK-NEXT: s_load_dwordx2 s[8:9], s[16:17], 0x0 +; CHECK-NEXT: v_mov_b32_e32 v5, 42 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v3, s8 +; CHECK-NEXT: v_mov_b32_e32 v4, s9 +; CHECK-NEXT: flat_store_dword v[3:4], v5 +; CHECK-NEXT: s_mov_b64 s[18:19], 8 +; CHECK-NEXT: s_mov_b32 s8, s16 +; CHECK-NEXT: s_mov_b32 s9, s17 +; CHECK-NEXT: s_mov_b32 s16, s18 +; CHECK-NEXT: s_mov_b32 s15, s19 +; CHECK-NEXT: s_add_u32 s8, s8, s16 +; CHECK-NEXT: s_addc_u32 s15, s9, s15 +; CHECK-NEXT: ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9 +; CHECK-NEXT: s_mov_b32 s9, s15 +; CHECK-NEXT: s_getpc_b64 s[16:17] +; CHECK-NEXT: s_add_u32 s16, s16, unknown_call@gotpcrel32@lo+4 +; CHECK-NEXT: s_addc_u32 s17, s17, unknown_call@gotpcrel32@hi+12 +; CHECK-NEXT: s_load_dwordx2 s[16:17], s[16:17], 0x0 +; CHECK-NEXT: s_mov_b64 s[22:23], s[2:3] +; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1] +; CHECK-NEXT: s_mov_b32 s15, 20 +; CHECK-NEXT: v_lshlrev_b32_e64 v2, s15, v2 +; CHECK-NEXT: s_mov_b32 s15, 10 +; CHECK-NEXT: v_lshlrev_b32_e64 v1, s15, v1 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b32 s15, 42 +; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21] +; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23] +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: s_endpgm + store i32 42, ptr %addr + call fastcc void @unknown_call() + ret void +} + +!0 = !{i32 42}