Index: llvm/trunk/include/llvm/Analysis/MemorySSA.h =================================================================== --- llvm/trunk/include/llvm/Analysis/MemorySSA.h +++ llvm/trunk/include/llvm/Analysis/MemorySSA.h @@ -828,6 +828,7 @@ const MemoryUseOrDef *Template = nullptr); private: + class ClobberWalkerBase; class CachingWalker; class OptimizeUses; @@ -882,6 +883,7 @@ mutable DenseMap BlockNumbering; // Memory SSA building info + std::unique_ptr WalkerBase; std::unique_ptr Walker; unsigned NextID; }; Index: llvm/trunk/lib/Analysis/MemorySSA.cpp =================================================================== --- llvm/trunk/lib/Analysis/MemorySSA.cpp +++ llvm/trunk/lib/Analysis/MemorySSA.cpp @@ -946,28 +946,51 @@ namespace llvm { +class MemorySSA::ClobberWalkerBase { + ClobberWalker Walker; + MemorySSA *MSSA; + +public: + ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D) + : Walker(*M, *A, *D), MSSA(M) {} + + MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, + const MemoryLocation &); + // Second argument (bool), defines whether the clobber search should skip the + // original queried access. If true, there will be a follow-up query searching + // for a clobber access past "self". Note that the Optimized access is not + // updated if a new clobber is found by this SkipSelf search. If this + // additional query becomes heavily used we may decide to cache the result. + // Walker instantiations will decide how to set the SkipSelf bool. + MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool); + void verify(const MemorySSA *MSSA) { Walker.verify(MSSA); } +}; + /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no /// longer does caching on its own, but the name has been retained for the /// moment. class MemorySSA::CachingWalker final : public MemorySSAWalker { - ClobberWalker Walker; - - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); + ClobberWalkerBase *Walker; public: - CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); + CachingWalker(MemorySSA *M, ClobberWalkerBase *W) + : MemorySSAWalker(M), Walker(W) {} ~CachingWalker() override = default; using MemorySSAWalker::getClobberingMemoryAccess; - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; - MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, - const MemoryLocation &) override; - void invalidateInfo(MemoryAccess *) override; + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override; + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, + const MemoryLocation &Loc) override; + + void invalidateInfo(MemoryAccess *MA) override { + if (auto *MUD = dyn_cast(MA)) + MUD->resetOptimized(); + } void verify(const MemorySSA *MSSA) override { MemorySSAWalker::verify(MSSA); - Walker.verify(MSSA); + Walker->verify(MSSA); } }; @@ -1437,7 +1460,10 @@ if (Walker) return Walker.get(); - Walker = llvm::make_unique(this, AA, DT); + if (!WalkerBase) + WalkerBase = llvm::make_unique(this, AA, DT); + + Walker = llvm::make_unique(this, WalkerBase.get()); return Walker.get(); } @@ -2142,25 +2168,11 @@ MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} -MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A, - DominatorTree *D) - : MemorySSAWalker(M), Walker(*M, *A, *D) {} - -void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { - if (auto *MUD = dyn_cast(MA)) - MUD->resetOptimized(); -} - -/// Walk the use-def chains starting at \p MA and find +/// Walk the use-def chains starting at \p StartingAccess and find /// the MemoryAccess that actually clobbers Loc. /// /// \returns our clobbering memory access -MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( - MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) { - return Walker.findClobber(StartingAccess, Q); -} - -MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( +MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( MemoryAccess *StartingAccess, const MemoryLocation &Loc) { if (isa(StartingAccess)) return StartingAccess; @@ -2184,11 +2196,12 @@ // Unlike the other function, do not walk to the def of a def, because we are // handed something we already believe is the clobbering access. + // We never set SkipSelf to true in Q in this method. MemoryAccess *DefiningAccess = isa(StartingUseOrDef) ? StartingUseOrDef->getDefiningAccess() : StartingUseOrDef; - MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); + MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q); LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n"); LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); @@ -2197,17 +2210,23 @@ } MemoryAccess * -MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { +MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA, + bool SkipSelf) { auto *StartingAccess = dyn_cast(MA); // If this is a MemoryPhi, we can't do anything. if (!StartingAccess) return MA; + bool IsOptimized = false; + // If this is an already optimized use or def, return the optimized result. // Note: Currently, we store the optimized def result in a separate field, // since we can't use the defining access. - if (StartingAccess->isOptimized()) - return StartingAccess->getOptimized(); + if (StartingAccess->isOptimized()) { + if (!SkipSelf || !isa(StartingAccess)) + return StartingAccess->getOptimized(); + IsOptimized = true; + } const Instruction *I = StartingAccess->getMemoryInst(); // We can't sanely do anything with a fence, since they conservatively clobber @@ -2225,33 +2244,60 @@ return LiveOnEntry; } - // Start with the thing we already think clobbers this location - MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); + MemoryAccess *OptimizedAccess; + if (!IsOptimized) { + // Start with the thing we already think clobbers this location + MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); + + // At this point, DefiningAccess may be the live on entry def. + // If it is, we will not get a better result. + if (MSSA->isLiveOnEntryDef(DefiningAccess)) { + StartingAccess->setOptimized(DefiningAccess); + StartingAccess->setOptimizedAccessType(None); + return DefiningAccess; + } + + OptimizedAccess = Walker.findClobber(DefiningAccess, Q); + StartingAccess->setOptimized(OptimizedAccess); + if (MSSA->isLiveOnEntryDef(OptimizedAccess)) + StartingAccess->setOptimizedAccessType(None); + else if (Q.AR == MustAlias) + StartingAccess->setOptimizedAccessType(MustAlias); + } else + OptimizedAccess = StartingAccess->getOptimized(); - // At this point, DefiningAccess may be the live on entry def. - // If it is, we will not get a better result. - if (MSSA->isLiveOnEntryDef(DefiningAccess)) { - StartingAccess->setOptimized(DefiningAccess); - StartingAccess->setOptimizedAccessType(None); - return DefiningAccess; - } - - MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); - LLVM_DEBUG(dbgs() << *DefiningAccess << "\n"); - LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); - LLVM_DEBUG(dbgs() << *Result << "\n"); + LLVM_DEBUG(dbgs() << *StartingAccess << "\n"); + LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is "); + LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n"); + + MemoryAccess *Result; + if (SkipSelf && isa(OptimizedAccess) && + isa(StartingAccess)) { + assert(isa(Q.OriginalAccess)); + Q.SkipSelfAccess = true; + Result = Walker.findClobber(OptimizedAccess, Q); + } else + Result = OptimizedAccess; - StartingAccess->setOptimized(Result); - if (MSSA->isLiveOnEntryDef(Result)) - StartingAccess->setOptimizedAccessType(None); - else if (Q.AR == MustAlias) - StartingAccess->setOptimizedAccessType(MustAlias); + LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf); + LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n"); return Result; } MemoryAccess * +MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { + return Walker->getClobberingMemoryAccessBase(MA, false); +} + +MemoryAccess * +MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA, + const MemoryLocation &Loc) { + return Walker->getClobberingMemoryAccessBase(MA, Loc); +} + +MemoryAccess * DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { if (auto *Use = dyn_cast(MA)) return Use->getDefiningAccess();