diff --git a/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp b/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp @@ -57,7 +57,7 @@ // false in case we have a cycle. BlockVMEMStoreType::iterator It; bool Inserted; - std::tie(It, Inserted) = BlockVMEMStore.insert({&MBB, false}); + std::tie(It, Inserted) = BlockVMEMStore.try_emplace(&MBB, false); bool &CacheEntry = It->second; if (!Inserted) return CacheEntry; @@ -76,6 +76,8 @@ // Recursive call into parent blocks. Look into predecessors if there is no // vgpr used in this block. + // The iterator is not invalidated by the recursive calls since we grew the + // dictionary to hold all the entries in advance return CacheEntry = llvm::any_of(MBB.predecessors(), [this](MachineBasicBlock *Parent) { return isLastVGPRUseVMEMStore(*Parent); @@ -120,6 +122,9 @@ SII = ST.getInstrInfo(); TRI = ST.getRegisterInfo(); + // we grow BlockVMEMStore to prevent the invalidation of its iterators + BlockVMEMStore.grow(MF.size()); + bool Changed = false; for (auto &MBB : MF) { Changed |= runOnMachineBasicBlock(MBB);