Index: lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h =================================================================== --- lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h +++ lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h @@ -19,6 +19,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineModuleInfoImpls.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/Support/ErrorOr.h" namespace llvm { @@ -35,6 +36,24 @@ /// \brief Wavefront synchronization scope ID. SyncScope::ID WavefrontSSID; + /// \returns \p SSID's position in the total ordering of synchronization + /// scopes such that a wider scope has a higher value than a narrower scope, + /// or error if \p SSID is an unknown synchronization scope. + ErrorOr getSSIDWidth(SyncScope::ID SSID) const { + if (SSID == SyncScope::SingleThread) + return 0; + else if (SSID == getWavefrontSSID()) + return 1; + else if (SSID == getWorkgroupSSID()) + return 2; + else if (SSID == getAgentSSID()) + return 3; + else if (SSID == SyncScope::System) + return 4; + + return std::error_code(-1, std::generic_category()); + } + public: AMDGPUMachineModuleInfo(const MachineModuleInfo &MMI); @@ -50,6 +69,20 @@ SyncScope::ID getWavefrontSSID() const { return WavefrontSSID; } + + /// \returns True if \p SSID is wider than \p Other, false if not, or error + /// if \p SSID and/or \p Other is an unknown synchronization scope. + ErrorOr isSSIDWiderThan(SyncScope::ID SSID, SyncScope::ID Other) const { + auto SSIDWidth = getSSIDWidth(SSID); + if (!SSIDWidth) + return SSIDWidth.getError(); + + auto OtherWidth = getSSIDWidth(Other); + if (!OtherWidth) + return OtherWidth.getError(); + + return SSIDWidth.get() > OtherWidth.get(); + } }; } // end namespace llvm Index: lib/Target/AMDGPU/SIMemoryLegalizer.cpp =================================================================== --- lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -60,6 +60,11 @@ AtomicOrdering FailureOrdering) : SSID(SSID), Ordering(Ordering), FailureOrdering(FailureOrdering) {} + /// \returns Info constructed from \p MI. + static Optional constructFromMI( + const AMDGPUMachineModuleInfo &MMI, + const MachineBasicBlock::iterator &MI); + public: /// \returns Synchronization scope ID of the machine instruction used to /// create this SIMemOpInfo. @@ -85,29 +90,36 @@ /// \returns Load info if \p MI is a load operation, "None" otherwise. static Optional getLoadInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI); /// \returns Store info if \p MI is a store operation, "None" otherwise. static Optional getStoreInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI); /// \returns Atomic fence info if \p MI is an atomic fence operation, /// "None" otherwise. static Optional getAtomicFenceInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI); /// \returns Atomic cmpxchg info if \p MI is an atomic cmpxchg operation, /// "None" otherwise. static Optional getAtomicCmpxchgInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI); /// \returns Atomic rmw info if \p MI is an atomic rmw operation, /// "None" otherwise. static Optional getAtomicRmwInfo( + const AMDGPUMachineModuleInfo &MMI, + const MachineBasicBlock::iterator &MI); + + /// \brief Reports unknown synchronization scope used in \p MI to LLVM + /// context. + static void reportUnknownSyncScope( const MachineBasicBlock::iterator &MI); }; class SIMemoryLegalizer final : public MachineFunctionPass { private: - /// \brief LLVM context. - LLVMContext *CTX = nullptr; - /// \brief Machine module info. const AMDGPUMachineModuleInfo *MMI = nullptr; @@ -140,10 +152,6 @@ /// function. Returns true if current function is modified, false otherwise. bool removeAtomicPseudoMIs(); - /// \brief Reports unknown synchronization scope used in \p MI to LLVM - /// context. - void reportUnknownSynchScope(const MachineBasicBlock::iterator &MI); - /// \brief Expands load operation \p MI. Returns true if instructions are /// added/deleted or \p MI is modified, false otherwise. bool expandLoad(const SIMemOpInfo &MOI, @@ -185,37 +193,73 @@ } // end namespace anonymous /* static */ +Optional SIMemOpInfo::constructFromMI( + const AMDGPUMachineModuleInfo &MMI, + const MachineBasicBlock::iterator &MI) { + assert(MI->getNumMemOperands() > 0); + + SyncScope::ID SSID = SyncScope::SingleThread; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; + AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; + + // Validator should check whether or not MMOs cover the entire set of + // locations accessed by the memory instruction. + for (const auto &MMO : MI->memoperands()) { + auto ErrorOrSSID = MMI.isSSIDWiderThan(SSID, MMO->getSyncScopeID()); + if (!ErrorOrSSID) { + reportUnknownSyncScope(MI); + return None; + } + + SSID = ErrorOrSSID.get() ? + SSID : MMO->getSyncScopeID(); + Ordering = isStrongerThan(Ordering, MMO->getOrdering()) ? + Ordering : MMO->getOrdering(); + FailureOrdering = isStrongerThan(FailureOrdering, + MMO->getFailureOrdering()) ? + FailureOrdering : MMO->getFailureOrdering(); + } + + return SIMemOpInfo(SSID, Ordering, FailureOrdering); +} + +/* static */ Optional SIMemOpInfo::getLoadInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI) { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (!(MI->mayLoad() && !MI->mayStore())) return None; - if (!MI->hasOneMemOperand()) + + // Be conservative if there are no memory operands. + if (MI->getNumMemOperands() == 0) return SIMemOpInfo(SyncScope::System, AtomicOrdering::SequentiallyConsistent); - const MachineMemOperand *MMO = *MI->memoperands_begin(); - return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering()); + return SIMemOpInfo::constructFromMI(MMI, MI); } /* static */ Optional SIMemOpInfo::getStoreInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI) { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (!(!MI->mayLoad() && MI->mayStore())) return None; - if (!MI->hasOneMemOperand()) + + // Be conservative if there are no memory operands. + if (MI->getNumMemOperands() == 0) return SIMemOpInfo(SyncScope::System, AtomicOrdering::SequentiallyConsistent); - const MachineMemOperand *MMO = *MI->memoperands_begin(); - return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering()); + return SIMemOpInfo::constructFromMI(MMI, MI); } /* static */ Optional SIMemOpInfo::getAtomicFenceInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI) { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); @@ -231,34 +275,46 @@ /* static */ Optional SIMemOpInfo::getAtomicCmpxchgInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI) { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (!(MI->mayLoad() && MI->mayStore())) return None; - if (!MI->hasOneMemOperand()) + + // Be conservative if there are no memory operands. + if (MI->getNumMemOperands() == 0) return SIMemOpInfo(SyncScope::System, AtomicOrdering::SequentiallyConsistent, AtomicOrdering::SequentiallyConsistent); - const MachineMemOperand *MMO = *MI->memoperands_begin(); - return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering(), - MMO->getFailureOrdering()); + return SIMemOpInfo::constructFromMI(MMI, MI); } /* static */ Optional SIMemOpInfo::getAtomicRmwInfo( + const AMDGPUMachineModuleInfo &MMI, const MachineBasicBlock::iterator &MI) { assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); if (!(MI->mayLoad() && MI->mayStore())) return None; - if (!MI->hasOneMemOperand()) + + // Be conservative if there are no memory operands. + if (MI->getNumMemOperands() == 0) return SIMemOpInfo(SyncScope::System, AtomicOrdering::SequentiallyConsistent); - const MachineMemOperand *MMO = *MI->memoperands_begin(); - return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering()); + return SIMemOpInfo::constructFromMI(MMI, MI); +} + +/* static */ +void SIMemOpInfo::reportUnknownSyncScope( + const MachineBasicBlock::iterator &MI) { + DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(), + "Unsupported synchronization scope"); + LLVMContext *CTX = &MI->getParent()->getParent()->getFunction()->getContext(); + CTX->diagnose(Diag); } bool SIMemoryLegalizer::insertBufferWbinvl1Vol(MachineBasicBlock::iterator &MI, @@ -317,13 +373,6 @@ return true; } -void SIMemoryLegalizer::reportUnknownSynchScope( - const MachineBasicBlock::iterator &MI) { - DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(), - "Unsupported synchronization scope"); - CTX->diagnose(Diag); -} - bool SIMemoryLegalizer::expandLoad(const SIMemOpInfo &MOI, MachineBasicBlock::iterator &MI) { assert(MI->mayLoad() && !MI->mayStore()); @@ -352,8 +401,7 @@ MOI.getSSID() == MMI->getWavefrontSSID()) { return Changed; } else { - reportUnknownSynchScope(MI); - return Changed; + llvm_unreachable("Unsupported synchronization scope"); } } @@ -379,8 +427,7 @@ MOI.getSSID() == MMI->getWavefrontSSID()) { return Changed; } else { - reportUnknownSynchScope(MI); - return Changed; + llvm_unreachable("Unsupported synchronization scope"); } } @@ -415,8 +462,7 @@ AtomicPseudoMIs.push_back(MI); return Changed; } else { - reportUnknownSynchScope(MI); - return Changed; + SIMemOpInfo::reportUnknownSyncScope(MI); } } @@ -454,8 +500,7 @@ Changed |= setGLC(MI); return Changed; } else { - reportUnknownSynchScope(MI); - return Changed; + llvm_unreachable("Unsupported synchronization scope"); } } @@ -490,8 +535,7 @@ Changed |= setGLC(MI); return Changed; } else { - reportUnknownSynchScope(MI); - return Changed; + llvm_unreachable("Unsupported synchronization scope"); } } @@ -503,7 +547,6 @@ const SISubtarget &ST = MF.getSubtarget(); const IsaInfo::IsaVersion IV = IsaInfo::getIsaVersion(ST.getFeatureBits()); - CTX = &MF.getFunction()->getContext(); MMI = &MF.getMMI().getObjFileInfo(); TII = ST.getInstrInfo(); @@ -517,15 +560,15 @@ if (!(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic)) continue; - if (const auto &MOI = SIMemOpInfo::getLoadInfo(MI)) + if (const auto &MOI = SIMemOpInfo::getLoadInfo(*MMI, MI)) Changed |= expandLoad(MOI.getValue(), MI); - else if (const auto &MOI = SIMemOpInfo::getStoreInfo(MI)) + else if (const auto &MOI = SIMemOpInfo::getStoreInfo(*MMI, MI)) Changed |= expandStore(MOI.getValue(), MI); - else if (const auto &MOI = SIMemOpInfo::getAtomicFenceInfo(MI)) + else if (const auto &MOI = SIMemOpInfo::getAtomicFenceInfo(*MMI, MI)) Changed |= expandAtomicFence(MOI.getValue(), MI); - else if (const auto &MOI = SIMemOpInfo::getAtomicCmpxchgInfo(MI)) + else if (const auto &MOI = SIMemOpInfo::getAtomicCmpxchgInfo(*MMI, MI)) Changed |= expandAtomicCmpxchg(MOI.getValue(), MI); - else if (const auto &MOI = SIMemOpInfo::getAtomicRmwInfo(MI)) + else if (const auto &MOI = SIMemOpInfo::getAtomicRmwInfo(*MMI, MI)) Changed |= expandAtomicRmw(MOI.getValue(), MI); } } Index: test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir =================================================================== --- /dev/null +++ test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir @@ -0,0 +1,163 @@ +# RUN: llc -march=amdgcn -mcpu=gfx803 -run-pass si-memory-legalizer %s -o - | FileCheck %s + +--- | + ; ModuleID = 'memory-legalizer-multiple-mem-operands.ll' + source_filename = "memory-legalizer-multiple-mem-operands.ll" + target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + + define amdgpu_kernel void @multiple_mem_operands(i32 addrspace(1)* %out, i32 %cond, i32 %if_offset, i32 %else_offset) #0 { + entry: + %scratch0 = alloca [8192 x i32] + %scratch1 = alloca [8192 x i32] + %scratchptr01 = bitcast [8192 x i32]* %scratch0 to i32* + store i32 1, i32* %scratchptr01 + %scratchptr12 = bitcast [8192 x i32]* %scratch1 to i32* + store i32 2, i32* %scratchptr12 + %cmp = icmp eq i32 %cond, 0 + br i1 %cmp, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0 + + if: ; preds = %entry + %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset, !amdgpu.uniform !0 + %if_value = load atomic i32, i32* %if_ptr syncscope("workgroup") seq_cst, align 4 + br label %done, !structurizecfg.uniform !0 + + else: ; preds = %entry + %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset, !amdgpu.uniform !0 + %else_value = load atomic i32, i32* %else_ptr syncscope("agent") unordered, align 4 + br label %done, !structurizecfg.uniform !0 + + done: ; preds = %else, %if + %value = phi i32 [ %if_value, %if ], [ %else_value, %else ] + store i32 %value, i32 addrspace(1)* %out + ret void + } + + ; Function Attrs: convergent nounwind + declare { i1, i64 } @llvm.amdgcn.if(i1) #1 + + ; Function Attrs: convergent nounwind + declare { i1, i64 } @llvm.amdgcn.else(i64) #1 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.break(i64) #2 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.if.break(i1, i64) #2 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.else.break(i64, i64) #2 + + ; Function Attrs: convergent nounwind + declare i1 @llvm.amdgcn.loop(i64) #1 + + ; Function Attrs: convergent nounwind + declare void @llvm.amdgcn.end.cf(i64) #1 + + attributes #0 = { "target-cpu"="gfx803" } + attributes #1 = { convergent nounwind } + attributes #2 = { convergent nounwind readnone } + + !0 = !{} + +... +--- + +# CHECK-LABEL: name: multiple_mem_operands + +# CHECK-LABEL: bb.3.done: +# CHECK: S_WAITCNT 3952 +# CHECK-NEXT: BUFFER_LOAD_DWORD_OFFEN +# CHECK-NEXT: S_WAITCNT 3952 +# CHECK-NEXT: BUFFER_WBINVL1_VOL + +name: multiple_mem_operands +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '' } + - { reg: '%sgpr3', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 65540 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + savePoint: '' + restorePoint: '' +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 4, stack-id: 0, + isImmutable: false, isAliased: false, callee-saved-register: '' } +stack: + - { id: 0, name: scratch0, type: default, offset: 4, size: 32768, alignment: 4, + stack-id: 0, callee-saved-register: '', local-offset: 0, di-variable: '', + di-expression: '', di-location: '' } + - { id: 1, name: scratch1, type: default, offset: 32772, size: 32768, + alignment: 4, stack-id: 0, callee-saved-register: '', local-offset: 32768, + di-variable: '', di-expression: '', di-location: '' } +constants: +body: | + bb.0.entry: + successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000) + liveins: %sgpr0_sgpr1, %sgpr3 + + %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + %sgpr8 = S_MOV_B32 $SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %sgpr9 = S_MOV_B32 $SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %vgpr0 = V_MOV_B32_e32 1, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01) + S_WAITCNT 127 + S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 2, implicit %exec + %vgpr1 = V_MOV_B32_e32 32772, implicit %exec + BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12) + S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc + + bb.2.else: + successors: %bb.3.done(0x80000000) + liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + + %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 32772, implicit %exec + S_BRANCH %bb.3.done + + bb.1.if: + successors: %bb.3.done(0x80000000) + liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + + %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 4, implicit %exec + + bb.3.done: + liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0 + + S_WAITCNT 127 + %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc + %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec + %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr) + %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5 + %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec + S_WAITCNT 3952 + FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out) + S_ENDPGM + +...