diff --git a/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp b/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp @@ -16,7 +16,7 @@ #include "GCNSubtarget.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" #include "SIDefines.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/DepthFirstIterator.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineOperand.h" using namespace llvm; @@ -29,9 +29,6 @@ public: static char ID; - const SIInstrInfo *SII; - const SIRegisterInfo *TRI; - AMDGPUReleaseVGPRs() : MachineFunctionPass(ID) {} void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -39,50 +36,69 @@ MachineFunctionPass::getAnalysisUsage(AU); } - // Used to cache the result of isLastInstructionVMEMStore for each block - using BlockVMEMStoreType = DenseMap; - BlockVMEMStoreType BlockVMEMStore; - - // Return true if the last instruction referencing a vgpr in this MBB - // is a VMEM store, otherwise return false. - // Visit previous basic blocks to find this last instruction if needed. - // Because this pass is late in the pipeline, it is expected that the + // Track if the last instruction referencing a vgpr in a MBB is a VMEM + // store. Because this pass is late in the pipeline, it is expected that the // last vgpr use will likely be one of vmem store, ds, exp. // Loads and others vgpr operations would have been // deleted by this point, except for complex control flow involving loops. // This is why we are just testing the type of instructions rather // than the operands. - bool isLastVGPRUseVMEMStore(MachineBasicBlock &MBB) { - // Use the cache to break infinite loop and save some time. Initialize to - // false in case we have a cycle. - BlockVMEMStoreType::iterator It; - bool Inserted; - std::tie(It, Inserted) = BlockVMEMStore.insert({&MBB, false}); - bool &CacheEntry = It->second; - if (!Inserted) - return CacheEntry; - - for (auto &MI : reverse(MBB.instrs())) { - // If it's a VMEM store, a vgpr will be used, return true. - if ((SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI)) && MI.mayStore()) - return CacheEntry = true; - - // If it's referencing a VGPR but is not a VMEM store, return false. - if (SIInstrInfo::isDS(MI) || SIInstrInfo::isEXP(MI) || - SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI) || - SIInstrInfo::isVALU(MI)) - return CacheEntry = false; + class LastVGPRUseIsVMEMStore { + BitVector BlockVMEMStore; + + static Optional lastVGPRUseIsStore(MachineBasicBlock const &MBB) { + for (auto &MI : reverse(MBB.instrs())) { + // If it's a VMEM store, a VGPR will be used, return true. + if ((SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI)) && + MI.mayStore()) + return true; + + // If it's referencing a VGPR but is not a VMEM store, return false. + if (SIInstrInfo::isDS(MI) || SIInstrInfo::isEXP(MI) || + SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI) || + SIInstrInfo::isVALU(MI)) + return false; + } + // Wait until the values are propagated from the predecessors + return None; } - // Recursive call into parent blocks. Look into predecessors if there is no - // vgpr used in this block. - return CacheEntry = llvm::any_of(MBB.predecessors(), - [this](MachineBasicBlock *Parent) { - return isLastVGPRUseVMEMStore(*Parent); - }); - } + public: + LastVGPRUseIsVMEMStore(MachineFunction const &MF) + : BlockVMEMStore(MF.getNumBlockIDs()) { + + df_iterator_default_set Visited; + SmallVector EndWithVMEMStoreBlocks; + + for (auto const &MBB : MF) { + auto LastUseIsStore = lastVGPRUseIsStore(MBB); + if (!LastUseIsStore.has_value()) + continue; + + if (*LastUseIsStore) { + EndWithVMEMStoreBlocks.push_back(&MBB); + } else { + Visited.insert(&MBB); + } + } + + for (MachineBasicBlock const *MBB : EndWithVMEMStoreBlocks) { + for (MachineBasicBlock const *Succ : depth_first_ext(MBB, Visited)) { + BlockVMEMStore[Succ->getNumber()] = true; + } + } + } + + // Return true if the last instruction referencing a vgpr in this MBB + // is a VMEM store, otherwise return false. + bool isLastVGPRUseVMEMStore(MachineBasicBlock const &MBB) const { + return BlockVMEMStore[MBB.getNumber()]; + } + }; - bool runOnMachineBasicBlock(MachineBasicBlock &MBB) { + static bool + runOnMachineBasicBlock(MachineBasicBlock &MBB, const SIInstrInfo *SII, + LastVGPRUseIsVMEMStore const &BlockVMEMStore) { bool Changed = false; @@ -93,7 +109,7 @@ // If the last instruction using a VGPR in the block is a VMEM store, // release VGPRs. The VGPRs release will be placed just before ending // the program - if (isLastVGPRUseVMEMStore(MBB)) { + if (BlockVMEMStore.isLastVGPRUseVMEMStore(MBB)) { BuildMI(MBB, MI, DebugLoc(), SII->get(AMDGPU::S_SENDMSG)) .addImm(AMDGPU::SendMsg::ID_DEALLOC_VGPRS_GFX11Plus); Changed = true; @@ -117,16 +133,14 @@ LLVM_DEBUG(dbgs() << "AMDGPUReleaseVGPRs running on " << MF.getName() << "\n"); - SII = ST.getInstrInfo(); - TRI = ST.getRegisterInfo(); + const SIInstrInfo *SII = ST.getInstrInfo(); + LastVGPRUseIsVMEMStore BlockVMEMStore(MF); bool Changed = false; for (auto &MBB : MF) { - Changed |= runOnMachineBasicBlock(MBB); + Changed |= runOnMachineBasicBlock(MBB, SII, BlockVMEMStore); } - BlockVMEMStore.clear(); - return Changed; } };