diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -11877,6 +11877,12 @@ return isa(Op); } +static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, + uint64_t Val) { + SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); + return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); +} + /// Legalize target independent instructions (e.g. INSERT_SUBREG) /// with frame index operands. /// LLVM assumes that inputs are to these instructions are registers. @@ -11905,6 +11911,30 @@ DAG.RemoveDeadNode(Node); return ToResultReg.getNode(); } + + if (SrcVal.getValueType() == MVT::i32 && DestReg->getReg().isPhysical()) { + // CopyToReg may be writing a constant to a sgpr as part of a calling + // convention. If that constant is selected to a vgpr then we later need + // to copy it into a sgpr. Instead, special case the copying-to-sgpr here to + // force the instantiation into a sgpr independent of what lowering might + // happen to other uses of that constant node. + if (ConstantSDNode *C = dyn_cast(SrcVal)) { + MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); + const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); + + if (TRI->isSGPRReg(MRI, DestReg->getReg())) { + uint64_t Value = C->getZExtValue(); + SDLoc DL(Node); + // Fourth argument to CopyToReg (glue) can be missing + SmallVector Ops; + for (unsigned I = 0; I < Node->getNumOperands(); I++) { + Ops.push_back((I == 2) ? buildSMovImm32(DAG, DL, Value) + : Node->getOperand(I)); + } + return DAG.UpdateNodeOperands(Node, Ops); + } + } + } } SmallVector Ops; @@ -12151,12 +12181,6 @@ } } -static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, - uint64_t Val) { - SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); - return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); -} - MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr) const { diff --git a/llvm/test/CodeGen/AMDGPU/CopyToReg-into-sgpr-into-MOV_B32.ll b/llvm/test/CodeGen/AMDGPU/CopyToReg-into-sgpr-into-MOV_B32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/CopyToReg-into-sgpr-into-MOV_B32.ll @@ -0,0 +1,62 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O0 -mcpu=gfx1030 < %s | FileCheck %s + +target triple = "amdgcn-amd-amdhsa" + +; Unknown functions are conservatively passed all implicit parameters +declare void @unknown_call() + +; Use the same constant as a sgpr parameter (for the kernel id) and for a vector operation +define protected amdgpu_kernel void @kern(ptr %addr) !llvm.amdgcn.lds.kernel.id !0 { +; CHECK-LABEL: kern: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_mov_b32 s32, 0 +; CHECK-NEXT: s_add_u32 s12, s12, s17 +; CHECK-NEXT: s_addc_u32 s13, s13, 0 +; CHECK-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12 +; CHECK-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13 +; CHECK-NEXT: s_add_u32 s0, s0, s17 +; CHECK-NEXT: s_addc_u32 s1, s1, 0 +; CHECK-NEXT: v_writelane_b32 v40, s16, 0 +; CHECK-NEXT: s_mov_b32 s13, s15 +; CHECK-NEXT: s_mov_b32 s12, s14 +; CHECK-NEXT: v_readlane_b32 s14, v40, 0 +; CHECK-NEXT: s_mov_b64 s[16:17], s[8:9] +; CHECK-NEXT: s_load_dwordx2 s[8:9], s[16:17], 0x0 +; CHECK-NEXT: v_mov_b32_e32 v5, 42 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v3, s8 +; CHECK-NEXT: v_mov_b32_e32 v4, s9 +; CHECK-NEXT: flat_store_dword v[3:4], v5 +; CHECK-NEXT: s_mov_b64 s[18:19], 8 +; CHECK-NEXT: s_mov_b32 s8, s16 +; CHECK-NEXT: s_mov_b32 s9, s17 +; CHECK-NEXT: s_mov_b32 s16, s18 +; CHECK-NEXT: s_mov_b32 s15, s19 +; CHECK-NEXT: s_add_u32 s8, s8, s16 +; CHECK-NEXT: s_addc_u32 s15, s9, s15 +; CHECK-NEXT: ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9 +; CHECK-NEXT: s_mov_b32 s9, s15 +; CHECK-NEXT: s_getpc_b64 s[16:17] +; CHECK-NEXT: s_add_u32 s16, s16, unknown_call@gotpcrel32@lo+4 +; CHECK-NEXT: s_addc_u32 s17, s17, unknown_call@gotpcrel32@hi+12 +; CHECK-NEXT: s_load_dwordx2 s[16:17], s[16:17], 0x0 +; CHECK-NEXT: s_mov_b32 s15, 42 +; CHECK-NEXT: s_mov_b64 s[22:23], s[2:3] +; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1] +; CHECK-NEXT: s_mov_b32 s18, 20 +; CHECK-NEXT: v_lshlrev_b32_e64 v2, s18, v2 +; CHECK-NEXT: s_mov_b32 s18, 10 +; CHECK-NEXT: v_lshlrev_b32_e64 v1, s18, v1 +; CHECK-NEXT: v_or3_b32 v31, v0, v1, v2 +; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21] +; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23] +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] +; CHECK-NEXT: s_endpgm + store i32 42, ptr %addr + call fastcc void @unknown_call() + ret void +} + +!0 = !{i32 42}