diff --git a/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp b/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp @@ -16,6 +16,7 @@ #include "GCNSubtarget.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" #include "SIDefines.h" +#include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/STLExtras.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineOperand.h" @@ -29,9 +30,6 @@ public: static char ID; - const SIInstrInfo *SII; - const SIRegisterInfo *TRI; - AMDGPUReleaseVGPRs() : MachineFunctionPass(ID) {} void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -39,50 +37,64 @@ MachineFunctionPass::getAnalysisUsage(AU); } - // Used to cache the result of isLastInstructionVMEMStore for each block - using BlockVMEMStoreType = DenseMap; - BlockVMEMStoreType BlockVMEMStore; - - // Return true if the last instruction referencing a vgpr in this MBB - // is a VMEM store, otherwise return false. - // Visit previous basic blocks to find this last instruction if needed. - // Because this pass is late in the pipeline, it is expected that the + // Track if the last instruction referencing a vgpr in a MBB is a VMEM + // store. Because this pass is late in the pipeline, it is expected that the // last vgpr use will likely be one of vmem store, ds, exp. // Loads and others vgpr operations would have been // deleted by this point, except for complex control flow involving loops. // This is why we are just testing the type of instructions rather // than the operands. - bool isLastVGPRUseVMEMStore(MachineBasicBlock &MBB) { - // Use the cache to break infinite loop and save some time. Initialize to - // false in case we have a cycle. - BlockVMEMStoreType::iterator It; - bool Inserted; - std::tie(It, Inserted) = BlockVMEMStore.insert({&MBB, false}); - bool &CacheEntry = It->second; - if (!Inserted) - return CacheEntry; - - for (auto &MI : reverse(MBB.instrs())) { - // If it's a VMEM store, a vgpr will be used, return true. - if ((SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI)) && MI.mayStore()) - return CacheEntry = true; - - // If it's referencing a VGPR but is not a VMEM store, return false. - if (SIInstrInfo::isDS(MI) || SIInstrInfo::isEXP(MI) || - SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI) || - SIInstrInfo::isVALU(MI)) - return CacheEntry = false; + class LastVGPRUseIsVMEMStore { + BitVector BlockVMEMStore; + + static bool lastBlockVGPRUseIsVMEMStore(MachineBasicBlock const &MBB) { + for (auto &MI : reverse(MBB.instrs())) { + // If it's a VMEM store, a vgpr will be used, return true. + if ((SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI)) && + MI.mayStore()) + return true; + + // If it's referencing a VGPR but is not a VMEM store, return false. + if (SIInstrInfo::isDS(MI) || SIInstrInfo::isEXP(MI) || + SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI) || + SIInstrInfo::isVALU(MI)) + return false; + } + // Otherwise, wait for the results of the predecessors + return false; } - // Recursive call into parent blocks. Look into predecessors if there is no - // vgpr used in this block. - return CacheEntry = llvm::any_of(MBB.predecessors(), - [this](MachineBasicBlock *Parent) { - return isLastVGPRUseVMEMStore(*Parent); - }); - } + public: + LastVGPRUseIsVMEMStore(MachineFunction const &MF) { + BlockVMEMStore.resize(MF.getNumBlockIDs()); + for (auto &MBB : MF) { + BlockVMEMStore[MBB.getNumber()] = lastBlockVGPRUseIsVMEMStore(MBB); + } - bool runOnMachineBasicBlock(MachineBasicBlock &MBB) { + SmallVector PostOrder(post_order(&MF)); + for (auto *MBB : reverse(PostOrder)) { + auto LastUseIsVMEMStore = BlockVMEMStore[MBB->getNumber()]; + LastUseIsVMEMStore = LastUseIsVMEMStore || + any_of(MBB->predecessors(), + [this](MachineBasicBlock const *Parent) { + // Recursive call into parent blocks. Look + // into predecessors if there is no vgpr + // used in this block. + return isLastVGPRUseVMEMStore(*Parent); + }); + } + } + + // Return true if the last instruction referencing a vgpr in this MBB + // is a VMEM store, otherwise return false. + bool isLastVGPRUseVMEMStore(MachineBasicBlock const &MBB) const { + return BlockVMEMStore[MBB.getNumber()]; + } + }; + + static bool + runOnMachineBasicBlock(MachineBasicBlock &MBB, const SIInstrInfo *SII, + LastVGPRUseIsVMEMStore const &BlockVMEMStore) { bool Changed = false; @@ -93,7 +105,7 @@ // If the last instruction using a VGPR in the block is a VMEM store, // release VGPRs. The VGPRs release will be placed just before ending // the program - if (isLastVGPRUseVMEMStore(MBB)) { + if (BlockVMEMStore.isLastVGPRUseVMEMStore(MBB)) { BuildMI(MBB, MI, DebugLoc(), SII->get(AMDGPU::S_SENDMSG)) .addImm(AMDGPU::SendMsg::ID_DEALLOC_VGPRS_GFX11Plus); Changed = true; @@ -117,16 +129,14 @@ LLVM_DEBUG(dbgs() << "AMDGPUReleaseVGPRs running on " << MF.getName() << "\n"); - SII = ST.getInstrInfo(); - TRI = ST.getRegisterInfo(); + const SIInstrInfo *SII = ST.getInstrInfo(); + LastVGPRUseIsVMEMStore BlockVMEMStore(MF); bool Changed = false; for (auto &MBB : MF) { - Changed |= runOnMachineBasicBlock(MBB); + Changed |= runOnMachineBasicBlock(MBB, SII, BlockVMEMStore); } - BlockVMEMStore.clear(); - return Changed; } };