Index: lib/Analysis/MemorySSA.cpp =================================================================== --- lib/Analysis/MemorySSA.cpp +++ lib/Analysis/MemorySSA.cpp @@ -539,9 +539,11 @@ /// /// This does not test for whether StopAt is a clobber UpwardsWalkResult - walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, + walkToPhiOrClobber(DefPath &Desc, unsigned &UpwardWalkLimit, + const MemoryAccess *StopAt = nullptr, const MemoryAccess *SkipStopAt = nullptr) const { assert(!isa(Desc.Last) && "Uses don't exist in my world"); + assert(UpwardWalkLimit > 0 && "Need a positive walk limit"); for (MemoryAccess *Current : def_chain(Desc.Last)) { Desc.Last = Current; @@ -551,6 +553,10 @@ if (auto *MD = dyn_cast(Current)) { if (MSSA.isLiveOnEntryDef(MD)) return {MD, true, MustAlias}; + + if (--UpwardWalkLimit == 0) + return {Current, true, MayAlias}; + ClobberAlias CA = instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); if (CA.IsClobber) @@ -589,11 +595,10 @@ /// /// If this returns None, NewPaused is a vector of searches that terminated /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. - Optional - getBlockingAccess(const MemoryAccess *StopWhere, - SmallVectorImpl &PausedSearches, - SmallVectorImpl &NewPaused, - SmallVectorImpl &Terminated) { + Optional getBlockingAccess( + const MemoryAccess *StopWhere, SmallVectorImpl &PausedSearches, + SmallVectorImpl &NewPaused, + SmallVectorImpl &Terminated, unsigned &UpwardWalkLimit) { assert(!PausedSearches.empty() && "No searches to continue?"); // BFS vs DFS really doesn't make a difference here, so just do a DFS with @@ -629,10 +634,12 @@ SkipStopWhere = Query->OriginalAccess; } - UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere, + UpwardsWalkResult Res = walkToPhiOrClobber(Node, UpwardWalkLimit, + /*StopAt=*/StopWhere, /*SkipStopAt=*/SkipStopWhere); if (Res.IsKnownClobber) { assert(Res.Result != StopWhere && Res.Result != SkipStopWhere); + // If this wasn't a cache hit, we hit a clobber when walking. That's a // failure. TerminatedPath Term{Res.Result, PathIndex}; @@ -731,7 +738,8 @@ /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, - const MemoryLocation &Loc) { + const MemoryLocation &Loc, + unsigned &UpwardWalkLimit) { assert(Paths.empty() && VisitedPhis.empty() && "Reset the optimization state."); @@ -774,8 +782,9 @@ // FIXME: This is broken, because the Blocker may be reported to be // liveOnEntry, and we'll happily wait for that to disappear (read: never) // For the moment, this is fine, since we do nothing with blocker info. - if (Optional Blocker = getBlockingAccess( - Target, PausedSearches, NewPaused, TerminatedPaths)) { + if (Optional Blocker = + getBlockingAccess(Target, PausedSearches, NewPaused, + TerminatedPaths, UpwardWalkLimit)) { // Find the node we started at. We can't search based on N->Last, since // we may have gone around a loop with a different MemoryLocation. @@ -828,7 +837,8 @@ MemoryAccess *DefChainEnd = nullptr; SmallVector Clobbers; for (ListIndex Paused : NewPaused) { - UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); + UpwardsWalkResult WR = + walkToPhiOrClobber(Paths[Paused], UpwardWalkLimit); if (WR.IsKnownClobber) Clobbers.push_back({WR.Result, Paused}); else @@ -896,7 +906,8 @@ AliasAnalysisType *getAA() { return &AA; } /// Finds the nearest clobber for the given query, optimizing phis if /// possible. - MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) { + MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, + unsigned UpwardWalkLimit) { Query = &Q; MemoryAccess *Current = Start; @@ -908,21 +919,23 @@ DefPath FirstDesc(Q.StartingLoc, Current, Current, None); // Fast path for the overly-common case (no crazy phi optimization // necessary) - UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); + UpwardsWalkResult WalkResult = + walkToPhiOrClobber(FirstDesc, UpwardWalkLimit); MemoryAccess *Result; if (WalkResult.IsKnownClobber) { Result = WalkResult.Result; Q.AR = WalkResult.AR; } else { - OptznResult OptRes = tryOptimizePhi(cast(FirstDesc.Last), - Current, Q.StartingLoc); + OptznResult OptRes = + tryOptimizePhi(cast(FirstDesc.Last), Current, + Q.StartingLoc, UpwardWalkLimit); verifyOptResult(OptRes); resetPhiOptznState(); Result = OptRes.PrimaryClobber.Clobber; } #ifdef EXPENSIVE_CHECKS - if (!Q.SkipSelfAccess) + if (!Q.SkipSelfAccess && UpwardWalkLimit > 0) checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); #endif return Result; @@ -960,14 +973,15 @@ : Walker(*M, *A, *D), MSSA(M) {} MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, - const MemoryLocation &); + const MemoryLocation &, + unsigned &); // Second argument (bool), defines whether the clobber search should skip the // original queried access. If true, there will be a follow-up query searching // for a clobber access past "self". Note that the Optimized access is not // updated if a new clobber is found by this SkipSelf search. If this // additional query becomes heavily used we may decide to cache the result. // Walker instantiations will decide how to set the SkipSelf bool. - MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool); + MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool); void verify(const MemorySSA *MSSA) { Walker.verify(MSSA); } }; @@ -985,12 +999,23 @@ using MemorySSAWalker::getClobberingMemoryAccess; + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, UWL, false); + } + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, + const MemoryLocation &Loc, + unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); + } + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { - return Walker->getClobberingMemoryAccessBase(MA, false); + unsigned UpwardWalkLimit = MaxCheckLimit; + return getClobberingMemoryAccess(MA, UpwardWalkLimit); } MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc) override { - return Walker->getClobberingMemoryAccessBase(MA, Loc); + unsigned UpwardWalkLimit = MaxCheckLimit; + return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); } void invalidateInfo(MemoryAccess *MA) override { @@ -1015,12 +1040,23 @@ using MemorySSAWalker::getClobberingMemoryAccess; + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, UWL, true); + } + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, + const MemoryLocation &Loc, + unsigned &UWL) { + return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); + } + MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { - return Walker->getClobberingMemoryAccessBase(MA, true); + unsigned UpwardWalkLimit = MaxCheckLimit; + return getClobberingMemoryAccess(MA, UpwardWalkLimit); } MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc) override { - return Walker->getClobberingMemoryAccessBase(MA, Loc); + unsigned UpwardWalkLimit = MaxCheckLimit; + return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); } void invalidateInfo(MemoryAccess *MA) override { @@ -1210,8 +1246,8 @@ /// which is walking bottom-up. class MemorySSA::OptimizeUses { public: - OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, BatchAAResults *BAA, - DominatorTree *DT) + OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, + BatchAAResults *BAA, DominatorTree *DT) : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} void optimizeUses(); @@ -1240,7 +1276,7 @@ DenseMap &); MemorySSA *MSSA; - MemorySSAWalker *Walker; + CachingWalker *Walker; BatchAAResults *AA; DominatorTree *DT; }; @@ -1358,11 +1394,12 @@ continue; } bool FoundClobberResult = false; + unsigned UpwardWalkLimit = MaxCheckLimit; while (UpperBound > LocInfo.LowerBound) { if (isa(VersionStack[UpperBound])) { // For phis, use the walker, see where we ended up, go there - Instruction *UseInst = MU->getMemoryInst(); - MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst); + MemoryAccess *Result = + Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit); // We are guaranteed to find it or something is wrong while (VersionStack[UpperBound] != Result) { assert(UpperBound != 0); @@ -2216,7 +2253,8 @@ template MemoryAccess * MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( - MemoryAccess *StartingAccess, const MemoryLocation &Loc) { + MemoryAccess *StartingAccess, const MemoryLocation &Loc, + unsigned &UpwardWalkLimit) { if (isa(StartingAccess)) return StartingAccess; @@ -2244,7 +2282,8 @@ ? StartingUseOrDef->getDefiningAccess() : StartingUseOrDef; - MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q); + MemoryAccess *Clobber = + Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n"); LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); @@ -2255,7 +2294,7 @@ template MemoryAccess * MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase( - MemoryAccess *MA, bool SkipSelf) { + MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) { auto *StartingAccess = dyn_cast(MA); // If this is a MemoryPhi, we can't do anything. if (!StartingAccess) @@ -2301,7 +2340,7 @@ return DefiningAccess; } - OptimizedAccess = Walker.findClobber(DefiningAccess, Q); + OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); StartingAccess->setOptimized(OptimizedAccess); if (MSSA->isLiveOnEntryDef(OptimizedAccess)) StartingAccess->setOptimizedAccessType(None); @@ -2317,10 +2356,10 @@ MemoryAccess *Result; if (SkipSelf && isa(OptimizedAccess) && - isa(StartingAccess)) { + isa(StartingAccess) && UpwardWalkLimit) { assert(isa(Q.OriginalAccess)); Q.SkipSelfAccess = true; - Result = Walker.findClobber(OptimizedAccess, Q); + Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit); } else Result = OptimizedAccess;