Index: lib/Target/AMDGPU/SIFormMemoryClauses.cpp =================================================================== --- lib/Target/AMDGPU/SIFormMemoryClauses.cpp +++ lib/Target/AMDGPU/SIFormMemoryClauses.cpp @@ -119,6 +119,17 @@ return false; if (!IsVMEMClause && !isSMEMClauseInst(MI)) return false; + // If this is a load instruction where the result has been coalesced with an operand, then we cannot clause it. + for (const MachineOperand &ResMO : MI.defs()) { + unsigned ResReg = ResMO.getReg(); + for (const MachineOperand &MO : MI.uses()) { + if (!MO.isReg() || MO.isDef()) + continue; + if (MO.getReg() == ResReg) + return false; + } + break; // Only check the first def. + } return true; } Index: test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir @@ -0,0 +1,45 @@ +# RUN: llc -march=amdgcn -mcpu=gfx902 -o - %s -run-pass si-form-memory-clauses -verify-machineinstrs | FileCheck -check-prefixes=CHECK,XNACK %s + +# The SIFormMemoryClauses pass must not form a clause (indicated by BUNDLE) +# from the two adjacent smem instructions, because the first one has its +# result coalesced with an operand. + +# CHECK-LABEL: body: +# XNACK-NOT: BUNDLE + +--- +name: _amdgpu_cs_main +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +fixedStack: [] +stack: [] +constants: [] +body: | + bb.0: + liveins: $sgpr2, $sgpr3, $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1 + + %18:vgpr_32 = COPY $vgpr1 + %12:sgpr_32 = COPY $sgpr12 + %3:sgpr_32 = COPY $sgpr3 + undef %36.sub0:sgpr_128 = COPY $sgpr2 + %17:vgpr_32 = COPY $vgpr0 + %14:sgpr_32 = COPY $sgpr14 + %13:sgpr_32 = COPY $sgpr13 + %22:sreg_64_xexec = S_GETPC_B64 + %22.sub0:sreg_64_xexec = COPY %12 + %36.sub1:sgpr_128 = S_AND_B32 %3, 65535, implicit-def dead $scc + %36.sub3:sgpr_128 = S_MOV_B32 151468 + %36.sub2:sgpr_128 = S_MOV_B32 -1 + %22.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %22, 48, 0 :: (load 4 from `i8 addrspace(4)* undef`, addrspace 4) + %37:sreg_64_xexec = S_BUFFER_LOAD_DWORDX2_IMM %36, 640, 0 :: (dereferenceable invariant load 8) + undef %169.sub0:vreg_128 = V_LSHL_ADD_U32 %13, 4, %17, implicit $exec + %169.sub1:vreg_128 = V_LSHL_ADD_U32 %14, 4, %18, implicit $exec + S_ENDPGM + +...