diff --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h --- a/llvm/include/llvm/Analysis/MemorySSA.h +++ b/llvm/include/llvm/Analysis/MemorySSA.h @@ -794,6 +794,8 @@ /// def-use chain of uses. void ensureOptimizedUses(); + AliasAnalysis &getAA() { return *AA; } + protected: // Used by Memory SSA dumpers and wrapper pass friend class MemorySSAPrinterLegacyPass; @@ -840,12 +842,12 @@ bool CreationMustSucceed = true); private: - template class ClobberWalkerBase; - template class CachingWalker; - template class SkipSelfWalker; + class ClobberWalkerBase; + class CachingWalker; + class SkipSelfWalker; class OptimizeUses; - CachingWalker *getWalkerImpl(); + CachingWalker *getWalkerImpl(); void buildMemorySSA(BatchAAResults &BAA); void prepareForMoveTo(MemoryAccess *, BasicBlock *); @@ -892,9 +894,9 @@ mutable DenseMap BlockNumbering; // Memory SSA building info - std::unique_ptr> WalkerBase; - std::unique_ptr> Walker; - std::unique_ptr> SkipWalker; + std::unique_ptr WalkerBase; + std::unique_ptr Walker; + std::unique_ptr SkipWalker; unsigned NextID = 0; bool IsOptimized = false; }; @@ -1041,15 +1043,17 @@ /// /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef /// in the if (a) branch. - MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { + MemoryAccess *getClobberingMemoryAccess(const Instruction *I, + BatchAAResults &AA) { MemoryAccess *MA = MSSA->getMemoryAccess(I); assert(MA && "Handed an instruction that MemorySSA doesn't recognize?"); - return getClobberingMemoryAccess(MA); + return getClobberingMemoryAccess(MA, AA); } /// Does the same thing as getClobberingMemoryAccess(const Instruction *I), /// but takes a MemoryAccess instead of an Instruction. - virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0; + virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, + BatchAAResults &AA) = 0; /// Given a potentially clobbering memory access and a new location, /// calling this will give you the nearest dominating clobbering MemoryAccess @@ -1063,7 +1067,24 @@ /// will return that MemoryDef, whereas the above would return the clobber /// starting from the use side of the memory def. virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, - const MemoryLocation &) = 0; + const MemoryLocation &, + BatchAAResults &AA) = 0; + + MemoryAccess *getClobberingMemoryAccess(const Instruction *I) { + BatchAAResults BAA(MSSA->getAA()); + return getClobberingMemoryAccess(I, BAA); + } + + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) { + BatchAAResults BAA(MSSA->getAA()); + return getClobberingMemoryAccess(MA, BAA); + } + + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, + const MemoryLocation &Loc) { + BatchAAResults BAA(MSSA->getAA()); + return getClobberingMemoryAccess(MA, Loc, BAA); + } /// Given a memory access, invalidate anything this walker knows about /// that access. @@ -1086,9 +1107,11 @@ // getClobberingMemoryAccess. using MemorySSAWalker::getClobberingMemoryAccess; - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, - const MemoryLocation &) override; + BatchAAResults &) override; + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, + const MemoryLocation &, + BatchAAResults &) override; }; using MemoryAccessPair = std::pair; diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -125,10 +125,11 @@ class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter { MemorySSA *MSSA; MemorySSAWalker *Walker; + BatchAAResults BAA; public: MemorySSAWalkerAnnotatedWriter(MemorySSA *M) - : MSSA(M), Walker(M->getWalker()) {} + : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {} void emitBasicBlockStartAnnot(const BasicBlock *BB, formatted_raw_ostream &OS) override { @@ -139,7 +140,7 @@ void emitInstructionAnnot(const Instruction *I, formatted_raw_ostream &OS) override { if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) { - MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA); + MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA, BAA); OS << "; " << *MA; if (Clobber) { OS << " - clobbered by "; @@ -372,8 +373,7 @@ } // end anonymous namespace -template -static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, +static bool isUseTriviallyOptimizableToLiveOnEntry(BatchAAResults &AA, const Instruction *I) { // If the memory can't be changed, then loads of the memory can't be // clobbered. @@ -398,11 +398,10 @@ /// \param AA The AliasAnalysis we used for our search. /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. -template LLVM_ATTRIBUTE_UNUSED static void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, - const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, + const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber = false) { assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"); @@ -493,7 +492,7 @@ /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up /// in one class. -template class ClobberWalker { +class ClobberWalker { /// Save a few bytes by using unsigned instead of size_t. using ListIndex = unsigned; @@ -517,8 +516,8 @@ }; const MemorySSA &MSSA; - AliasAnalysisType &AA; DominatorTree &DT; + BatchAAResults *AA; UpwardsMemoryQuery *Query; unsigned *UpwardWalkLimit; @@ -584,7 +583,7 @@ if (!--*UpwardWalkLimit) return {Current, true}; - if (instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA)) + if (instructionClobbersQuery(MD, Desc.Loc, Query->Inst, *AA)) return {MD, true}; } } @@ -928,14 +927,14 @@ } public: - ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT) - : MSSA(MSSA), AA(AA), DT(DT) {} + ClobberWalker(const MemorySSA &MSSA, DominatorTree &DT) + : MSSA(MSSA), DT(DT) {} - AliasAnalysisType *getAA() { return &AA; } /// Finds the nearest clobber for the given query, optimizing phis if /// possible. - MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, - unsigned &UpWalkLimit) { + MemoryAccess *findClobber(BatchAAResults &BAA, MemoryAccess *Start, + UpwardsMemoryQuery &Q, unsigned &UpWalkLimit) { + AA = &BAA; Query = &Q; UpwardWalkLimit = &UpWalkLimit; // Starting limit must be > 0. @@ -965,7 +964,7 @@ #ifdef EXPENSIVE_CHECKS if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0) - checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); + checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, BAA); #endif return Result; } @@ -991,63 +990,65 @@ namespace llvm { -template class MemorySSA::ClobberWalkerBase { - ClobberWalker Walker; +class MemorySSA::ClobberWalkerBase { + ClobberWalker Walker; MemorySSA *MSSA; public: - ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D) - : Walker(*M, *A, *D), MSSA(M) {} + ClobberWalkerBase(MemorySSA *M, DominatorTree *D) : Walker(*M, *D), MSSA(M) {} MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, - unsigned &); + BatchAAResults &, unsigned &); // Third argument (bool), defines whether the clobber search should skip the // original queried access. If true, there will be a follow-up query searching // for a clobber access past "self". Note that the Optimized access is not // updated if a new clobber is found by this SkipSelf search. If this // additional query becomes heavily used we may decide to cache the result. // Walker instantiations will decide how to set the SkipSelf bool. - MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool, + MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, BatchAAResults &, + unsigned &, bool, bool UseInvariantGroup = true); }; /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no /// longer does caching on its own, but the name has been retained for the /// moment. -template class MemorySSA::CachingWalker final : public MemorySSAWalker { - ClobberWalkerBase *Walker; + ClobberWalkerBase *Walker; public: - CachingWalker(MemorySSA *M, ClobberWalkerBase *W) + CachingWalker(MemorySSA *M, ClobberWalkerBase *W) : MemorySSAWalker(M), Walker(W) {} ~CachingWalker() override = default; using MemorySSAWalker::getClobberingMemoryAccess; - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { - return Walker->getClobberingMemoryAccessBase(MA, UWL, false); + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, + unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false); } MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, - unsigned &UWL) { - return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); + BatchAAResults &BAA, unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL); } // This method is not accessible outside of this file. - MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, - unsigned &UWL) { - return Walker->getClobberingMemoryAccessBase(MA, UWL, false, false); + MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup( + MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false, false); } - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, + BatchAAResults &BAA) override { unsigned UpwardWalkLimit = MaxCheckLimit; - return getClobberingMemoryAccess(MA, UpwardWalkLimit); + return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit); } MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, - const MemoryLocation &Loc) override { + const MemoryLocation &Loc, + BatchAAResults &BAA) override { unsigned UpwardWalkLimit = MaxCheckLimit; - return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); + return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit); } void invalidateInfo(MemoryAccess *MA) override { @@ -1056,34 +1057,36 @@ } }; -template class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { - ClobberWalkerBase *Walker; + ClobberWalkerBase *Walker; public: - SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W) + SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W) : MemorySSAWalker(M), Walker(W) {} ~SkipSelfWalker() override = default; using MemorySSAWalker::getClobberingMemoryAccess; - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { - return Walker->getClobberingMemoryAccessBase(MA, UWL, true); + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, + unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, true); } MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, - unsigned &UWL) { - return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); + BatchAAResults &BAA, unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL); } - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, + BatchAAResults &BAA) override { unsigned UpwardWalkLimit = MaxCheckLimit; - return getClobberingMemoryAccess(MA, UpwardWalkLimit); + return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit); } MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, - const MemoryLocation &Loc) override { + const MemoryLocation &Loc, + BatchAAResults &BAA) override { unsigned UpwardWalkLimit = MaxCheckLimit; - return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); + return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit); } void invalidateInfo(MemoryAccess *MA) override { @@ -1284,8 +1287,8 @@ /// which is walking bottom-up. class MemorySSA::OptimizeUses { public: - OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, - BatchAAResults *BAA, DominatorTree *DT) + OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, + DominatorTree *DT) : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} void optimizeUses(); @@ -1313,7 +1316,7 @@ DenseMap &); MemorySSA *MSSA; - CachingWalker *Walker; + CachingWalker *Walker; BatchAAResults *AA; DominatorTree *DT; }; @@ -1441,7 +1444,7 @@ // support updates, so don't use it to optimize uses. MemoryAccess *Result = Walker->getClobberingMemoryAccessWithoutInvariantGroup( - MU, UpwardWalkLimit); + MU, *AA, UpwardWalkLimit); // We are guaranteed to find it or something is wrong. while (VersionStack[UpperBound] != Result) { assert(UpperBound != 0); @@ -1556,16 +1559,14 @@ MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } -MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { +MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { if (Walker) return Walker.get(); if (!WalkerBase) - WalkerBase = - std::make_unique>(this, AA, DT); + WalkerBase = std::make_unique(this, DT); - Walker = - std::make_unique>(this, WalkerBase.get()); + Walker = std::make_unique(this, WalkerBase.get()); return Walker.get(); } @@ -1574,11 +1575,9 @@ return SkipWalker.get(); if (!WalkerBase) - WalkerBase = - std::make_unique>(this, AA, DT); + WalkerBase = std::make_unique(this, DT); - SkipWalker = - std::make_unique>(this, WalkerBase.get()); + SkipWalker = std::make_unique(this, WalkerBase.get()); return SkipWalker.get(); } @@ -2148,8 +2147,8 @@ return; BatchAAResults BatchAA(*AA); - ClobberWalkerBase WalkerBase(this, &BatchAA, DT); - CachingWalker WalkerLocal(this, &WalkerBase); + ClobberWalkerBase WalkerBase(this, DT); + CachingWalker WalkerLocal(this, &WalkerBase); OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses(); IsOptimized = true; } @@ -2418,11 +2417,9 @@ /// the MemoryAccess that actually clobbers Loc. /// /// \returns our clobbering memory access -template -MemoryAccess * -MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( +MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( MemoryAccess *StartingAccess, const MemoryLocation &Loc, - unsigned &UpwardWalkLimit) { + BatchAAResults &BAA, unsigned &UpwardWalkLimit) { assert(!isa(StartingAccess) && "Use cannot be defining access"); Instruction *I = nullptr; @@ -2448,7 +2445,7 @@ // handed something we already believe is the clobbering access. // We never set SkipSelf to true in Q in this method. MemoryAccess *Clobber = - Walker.findClobber(StartingAccess, Q, UpwardWalkLimit); + Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit); LLVM_DEBUG({ dbgs() << "Clobber starting at access " << *StartingAccess << "\n"; if (I) @@ -2517,11 +2514,9 @@ return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction; } -template -MemoryAccess * -MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( - MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf, - bool UseInvariantGroup) { +MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( + MemoryAccess *MA, BatchAAResults &BAA, unsigned &UpwardWalkLimit, + bool SkipSelf, bool UseInvariantGroup) { auto *StartingAccess = dyn_cast(MA); // If this is a MemoryPhi, we can't do anything. if (!StartingAccess) @@ -2560,7 +2555,7 @@ UpwardsMemoryQuery Q(I, StartingAccess); - if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) { + if (isUseTriviallyOptimizableToLiveOnEntry(BAA, I)) { MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); StartingAccess->setOptimized(LiveOnEntry); return LiveOnEntry; @@ -2578,7 +2573,8 @@ return DefiningAccess; } - OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); + OptimizedAccess = + Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit); StartingAccess->setOptimized(OptimizedAccess); } else OptimizedAccess = StartingAccess->getOptimized(); @@ -2593,7 +2589,7 @@ isa(StartingAccess) && UpwardWalkLimit) { assert(isa(Q.OriginalAccess)); Q.SkipSelfAccess = true; - Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit); + Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit); } else Result = OptimizedAccess; @@ -2604,14 +2600,15 @@ } MemoryAccess * -DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { +DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA, + BatchAAResults &) { if (auto *Use = dyn_cast(MA)) return Use->getDefiningAccess(); return MA; } MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( - MemoryAccess *StartingAccess, const MemoryLocation &) { + MemoryAccess *StartingAccess, const MemoryLocation &, BatchAAResults &) { if (auto *Use = dyn_cast(StartingAccess)) return Use->getDefiningAccess(); return StartingAccess;