diff --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h --- a/llvm/include/llvm/Analysis/MemorySSA.h +++ b/llvm/include/llvm/Analysis/MemorySSA.h @@ -346,9 +346,9 @@ setOperand(0, DMA); } - /// The defining access of a MemoryUses are always optimized if queried from - /// outside MSSA construction itself. This result is only useful inside - /// the MSSA implementation. + /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called, + /// uses will usually be optimized, but this is not guaranteed (e.g. due to + /// invalidation and optimization limits.) bool isOptimized() const { return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID(); } @@ -801,6 +801,13 @@ /// about the beginning or end of a block. enum InsertionPlace { Beginning, End, BeforeTerminator }; + /// By default, uses are *not* optimized during MemorySSA construction. + /// Calling this method will attempt to optimize all MemoryUses, if this has + /// not happened yet for this MemorySSA instance. This should be done if you + /// plan to query the clobbering access for most uses, or if you walk the + /// def-use chain of uses. + void ensureOptimizedUses(); + protected: // Used by Memory SSA dumpers and wrapper pass friend class MemorySSAPrinterLegacyPass; @@ -903,6 +910,7 @@ std::unique_ptr> Walker; std::unique_ptr> SkipWalker; unsigned NextID = 0; + bool IsOptimized = false; }; /// Enables verification of MemorySSA. diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -1397,6 +1397,9 @@ continue; } + if (MU->isOptimized()) + continue; + if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); continue; @@ -1585,10 +1588,6 @@ SmallPtrSet Visited; renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); - ClobberWalkerBase WalkerBase(this, &BAA, DT); - CachingWalker WalkerLocal(this, &WalkerBase); - OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses(); - // Mark the uses in unreachable blocks as live on entry, so that they go // somewhere. for (auto &BB : F) @@ -2178,6 +2177,17 @@ return dominates(Dominator, cast(Dominatee.getUser())); } +void MemorySSA::ensureOptimizedUses() { + if (IsOptimized) + return; + + BatchAAResults BatchAA(*AA); + ClobberWalkerBase WalkerBase(this, &BatchAA, DT); + CachingWalker WalkerLocal(this, &WalkerBase); + OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses(); + IsOptimized = true; +} + void MemoryAccess::print(raw_ostream &OS) const { switch (getValueID()) { case MemoryPhiVal: return static_cast(this)->print(OS); @@ -2350,6 +2360,7 @@ bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { auto &MSSA = getAnalysis().getMSSA(); + MSSA.ensureOptimizedUses(); if (DotCFGMSSA != "") { DOTFuncMSSAInfo CFGInfo(F, MSSA); WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); @@ -2382,6 +2393,7 @@ PreservedAnalyses MemorySSAPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { auto &MSSA = AM.getResult(F).getMSSA(); + MSSA.ensureOptimizedUses(); if (DotCFGMSSA != "") { DOTFuncMSSAInfo CFGInfo(F, MSSA); WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -1933,6 +1933,7 @@ const LoopInfo &LI) { bool MadeChange = false; + MSSA.ensureOptimizedUses(); DSEState State(F, AA, MSSA, DT, PDT, TLI, LI); // For each store: for (unsigned I = 0; I < State.MemDefs.size(); I++) { diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -598,7 +598,10 @@ const TargetTransformInfo &TTI, DominatorTree &DT, AssumptionCache &AC, MemorySSA *MSSA) : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA), - MSSAUpdater(std::make_unique(MSSA)) {} + MSSAUpdater(std::make_unique(MSSA)) { + if (MSSA) + MSSA->ensureOptimizedUses(); + } bool run(); diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -259,7 +259,9 @@ GVNHoist(DominatorTree *DT, PostDominatorTree *PDT, AliasAnalysis *AA, MemoryDependenceResults *MD, MemorySSA *MSSA) : DT(DT), PDT(PDT), AA(AA), MD(MD), MSSA(MSSA), - MSSAUpdater(std::make_unique(MSSA)) {} + MSSAUpdater(std::make_unique(MSSA)) { + MSSA->ensureOptimizedUses(); + } bool run(Function &F); diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -374,6 +374,7 @@ bool Changed = false; assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); + MSSA->ensureOptimizedUses(); // If this loop has metadata indicating that LICM is not to be performed then // just exit. diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp --- a/llvm/unittests/Analysis/MemorySSATest.cpp +++ b/llvm/unittests/Analysis/MemorySSATest.cpp @@ -1026,6 +1026,7 @@ setupAnalyses(); MemorySSA &MSSA = *Analyses->MSSA; + MSSA.ensureOptimizedUses(); unsigned I = 0; for (LoadInst *V : {LA1, LA2}) { @@ -1119,6 +1120,7 @@ setupAnalyses(); MemorySSA &MSSA = *Analyses->MSSA; + MSSA.ensureOptimizedUses(); unsigned I = 0; for (LoadInst *V : {LA1, LB1}) {