Index: include/llvm/Analysis/IVUsers.h =================================================================== --- include/llvm/Analysis/IVUsers.h +++ include/llvm/Analysis/IVUsers.h @@ -193,7 +193,7 @@ public: typedef IVUsers Result; - IVUsers run(Loop &L, LoopAnalysisManager &AM); + IVUsers run(Loop &L, LoopAnalysisManager &AM, LPMAnalysisResults &AR); }; /// Printer pass for the \c IVUsers for a loop. @@ -202,7 +202,8 @@ public: explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {} - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } Index: include/llvm/Analysis/LoopAccessAnalysis.h =================================================================== --- include/llvm/Analysis/LoopAccessAnalysis.h +++ include/llvm/Analysis/LoopAccessAnalysis.h @@ -753,8 +753,8 @@ public: typedef LoopAccessInfo Result; - Result run(Loop &, LoopAnalysisManager &); - static StringRef name() { return "LoopAccessAnalysis"; } + + Result run(Loop &L, LoopAnalysisManager &AM, LPMAnalysisResults &AR); }; /// \brief Printer pass for the \c LoopAccessInfo results. @@ -764,7 +764,8 @@ public: explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {} - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; inline Instruction *MemoryDepChecker::Dependence::getSource( Index: include/llvm/Analysis/LoopInfo.h =================================================================== --- include/llvm/Analysis/LoopInfo.h +++ include/llvm/Analysis/LoopInfo.h @@ -853,17 +853,8 @@ void getAnalysisUsage(AnalysisUsage &AU) const override; }; -/// \brief Pass for printing a loop's contents as LLVM's text IR assembly. -class PrintLoopPass : public PassInfoMixin { - raw_ostream &OS; - std::string Banner; - -public: - PrintLoopPass(); - PrintLoopPass(raw_ostream &OS, const std::string &Banner = ""); - - PreservedAnalyses run(Loop &L, AnalysisManager &); -}; +/// Method to print a loop's contents as LLVM's text IR assembly. +void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = ""); } // End llvm namespace Index: include/llvm/Analysis/LoopPassManager.h =================================================================== --- include/llvm/Analysis/LoopPassManager.h +++ include/llvm/Analysis/LoopPassManager.h @@ -8,63 +8,323 @@ //===----------------------------------------------------------------------===// /// \file /// -/// This header provides classes for managing passes over loops in LLVM IR. +/// This header provides classes for managing a pipeline of passes over loops +/// in LLVM IR. +/// +/// The primary loop pass pipeline is managed in a very particular way to +/// provide a set of core guarantees: +/// 1) Loops are, where possible in simplified form. +/// 2) Loops are *always* in LCSSA form. +/// 3) A collection of Loop-specific analysis results are available: +/// - LoopInfo +/// - DominatorTree +/// - ScalarEvolution +/// - AAManager +/// 4) All loop passes preserve #1 (where possible), #2, and #3. +/// 5) Loop passes run over each loop in the loop nest from the inner most to +/// the outer most. Specifically, all inner loops are processed before +/// passes run over outer loops. When running the pipeline across an inner +/// loop creates new inner loops, those are added and processed in this +/// order as well. +/// +/// This process is designed to facilitate transformations which simplify, +/// reduce, and remove loops. For passes which are more oriented towards +/// optimizing loops, especially optimizing loop *nests* instead of single +/// loops in isolation, this framework is less interesting. /// //===----------------------------------------------------------------------===// #ifndef LLVM_ANALYSIS_LOOPPASSMANAGER_H #define LLVM_ANALYSIS_LOOPPASSMANAGER_H +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/PriorityWorklist.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/PassManager.h" namespace llvm { -extern template class PassManager; -/// \brief The loop pass manager. -/// -/// See the documentation for the PassManager template for details. It runs a -/// sequency of loop passes over each loop that the manager is run over. This -/// typedef serves as a convenient way to refer to this construct. -typedef PassManager LoopPassManager; +// Forward declarations of a update tracking and analysis result tracking +// structures used in the API of loop passes that work within this +// infrastructuer. +class LPMUpdateResult; +struct LPMAnalysisResults; + +/// Extern template declaration for the analysis set for this IR unit. +extern template class AllAnalysesOn; -extern template class AnalysisManager; +extern template class AnalysisManager; /// \brief The loop analysis manager. /// /// See the documentation for the AnalysisManager template for detail /// documentation. This typedef serves as a convenient way to refer to this /// construct in the adaptors and proxies used to integrate this into the larger /// pass manager infrastructure. -typedef AnalysisManager LoopAnalysisManager; +typedef AnalysisManager LoopAnalysisManager; + +// Explicit specialization and instantiation declarations for the pass manager. +// See the comments on the definition of the specialization for details on how +// it differs from the primary template. +template <> +PreservedAnalyses +PassManager::run(Loop &InitialL, LoopAnalysisManager &AM, + LPMAnalysisResults &AnalysisResults, + LPMUpdateResult &UR); +extern template class PassManager; + +/// \brief The CGSCC pass manager. +/// +/// See the documentation for the PassManager template for details. It runs +/// a sequency of SCC passes over each SCC that the manager is run over. This +/// typedef serves as a convenient way to refer to this construct. +typedef PassManager + LoopPassManager; + +/// A partial specialization of the require analysis template pass. +template +struct RequireAnalysisPass + : PassInfoMixin< + RequireAnalysisPass> { + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &) { + (void)AM.template getResult(L, AR); + return PreservedAnalyses::all(); + } +}; + +/// An alias template to easily name a require analysis loop pass. +template +using RequireAnalysisLoopPass = + RequireAnalysisPass; /// A proxy from a \c LoopAnalysisManager to a \c Function. typedef InnerAnalysisManagerProxy LoopAnalysisManagerFunctionProxy; -/// Specialization of the invalidate method for the \c -/// LoopAnalysisManagerFunctionProxy's result. +/// We need a specialized result for the \c LoopAnalysisManagerFunctionProxy so +/// it can have access to the call graph in order to walk all the SCCs when +/// invalidating things. +template <> class LoopAnalysisManagerFunctionProxy::Result { +public: + explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI) + : InnerAM(&InnerAM), LI(&LI) {} + Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI) { + // We have to null out the analysis manager in the moved-from state + // because we are taking ownership of the responsibilty to clear the + // analysis state. + Arg.InnerAM = nullptr; + } + Result &operator=(Result &&RHS) { + InnerAM = RHS.InnerAM; + LI = RHS.LI; + // We have to null out the analysis manager in the moved-from state + // because we are taking ownership of the responsibilty to clear the + // analysis state. + RHS.InnerAM = nullptr; + return *this; + } + ~Result() { + // InnerAM is cleared in a moved from state where there is nothing to do. + if (!InnerAM) + return; + + // Clear out the analysis manager if we're being destroyed -- it means we + // didn't even see an invalidate call when we got invalidated. + InnerAM->clear(); + } + + /// \brief Accessor for the analysis manager. + LoopAnalysisManager &getManager() { return *InnerAM; } + + /// \brief Handler for invalidation of the Module. + /// + /// If the proxy analysis itself is preserved, then we assume that the set of + /// SCCs in the Module hasn't changed. Thus any pointers to SCCs in the + /// LoopAnalysisManager are still valid, and we don't need to call \c clear + /// on the LoopAnalysisManager. + /// + /// Regardless of whether this analysis is marked as preserved, all of the + /// analyses in the \c LoopAnalysisManager are potentially invalidated based + /// on the set of preserved analyses. + bool invalidate(Function &F, const PreservedAnalyses &PA, + FunctionAnalysisManager::Invalidator &Inv); + +private: + LoopAnalysisManager *InnerAM; + LoopInfo *LI; +}; + +/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy +/// so it can pass the lazy call graph to the result. template <> -bool LoopAnalysisManagerFunctionProxy::Result::invalidate( - Function &F, const PreservedAnalyses &PA, - FunctionAnalysisManager::Invalidator &Inv); +LoopAnalysisManagerFunctionProxy::Result +LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM); // Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern // template. extern template class InnerAnalysisManagerProxy; -extern template class OuterAnalysisManagerProxy; +extern template class OuterAnalysisManagerProxy; /// A proxy from a \c FunctionAnalysisManager to a \c Loop. -typedef OuterAnalysisManagerProxy +typedef OuterAnalysisManagerProxy FunctionAnalysisManagerLoopProxy; /// Returns the minimum set of Analyses that all loop passes must preserve. PreservedAnalyses getLoopPassPreservedAnalyses(); +namespace internal { +/// Helper to implement correct appending of loops onto a worklist. +/// +/// The worklist is a LIFO data structure and we want the observed order to be +/// postorder so we append in RPO. +template +inline void appendLoopsInRPO(RangeT &&Loops, + SmallPriorityWorklist &Worklist) { + // We want a reverse postorder traveral of the loop nest to feed into the + // worklist because we pop off of its back and want to observe the loops + // in postorder. However, because this is a tree, that is equivalent to + // a preorder traversal. We use an internal worklist to build up the preorder + // traversal without recursion. + SmallVector PreOrderLoops, PreOrderWorklist; + + // We walk the initial sequence of loops in reverse because we generally want + // to visit defs before uses between unrelated loop nests and the worklist is + // LIFO. + for (Loop *RootL : reverse(Loops)) { + assert(PreOrderLoops.empty() && "Must start with an empty preorder walk."); + assert(PreOrderWorklist.empty() && + "Must start with an empty preorder walk worklist."); + PreOrderWorklist.push_back(RootL); + do { + Loop *L = PreOrderWorklist.pop_back_val(); + PreOrderWorklist.append(L->begin(), L->end()); + PreOrderLoops.push_back(L); + } while (!PreOrderWorklist.empty()); + + // Now move the postorder sequence into a priority worklist. + Worklist.insert(std::move(PreOrderLoops)); + PreOrderLoops.clear(); + } +} +} + +/// The adaptor from a function pass to a loop pass directly computes a set of +/// analyses that are especially useful to loop passes and makes them available +/// in the API. Loop passes are also expected to update all of these so that +/// they remain correct across the entire loop pipeline. +struct LPMAnalysisResults { + AAResults &AA; + AssumptionCache &AC; + DominatorTree &DT; + LoopInfo &LI; + ScalarEvolution &SE; + TargetLibraryInfo &TLI; + TargetTransformInfo &TTI; +}; + +template class FunctionToLoopPassAdaptor; + +/// This class provides an interface for update the loop pass manager based on +/// mutations to the loop nest. +/// +/// Loop passes which modify the loop nest should ensure they use one of these +/// APIs to update LPM infrastructure. +/// +/// Note that this class cannot be directly constructed, and is instead +/// provided as an argument to each loop pass. +class LPMUpdateResult { +public: + /// This can be queried by loop passes which run other loop passes (like pass + /// managers) to know whether the loop needs to be skipped due to updates to + /// the loop nest. + /// + /// Once this returns true, it is important to not access the loop object + /// directly + /// as it may no longer be in a valid state. + bool skipCurrentLoop() const { return SkipCurrentLoop; } + + /// Loop passes should use this method to indicate they have deleted a loop + /// from the nest. + /// + /// Note that this loop must either be the current loop or a subloop of the + /// current loop. This routine must be called prior to removing the loop from + /// the loop nest. + /// + /// If this is called for the current loop, in addition to clearing any + /// state, this routine will mark that the current loop should be skipped by + /// the rest of the pass management infrastructure. + void markLoopAsDeleted(Loop &L) { + LAM.clear(L); + assert(CurrentL->contains(&L) && "Cannot delete a loop outside of the " + "subloop tree currently being processed."); + if (&L == CurrentL) + SkipCurrentLoop = true; + } + + /// Loop passes should use this method to indicate they have added new child + /// loops of the current loop. + /// + /// The \p NewChildLoops must contain only the immediate children. Any nested + /// loops within them will be visited in postorder as usual for the loop pass + /// manager. + void addChildLoops(ArrayRef NewChildLoops) { + // Insert ourselves back into the worklist first as this loop should be + // revisited after all the children have been processed. + Worklist.insert(CurrentL); + + internal::appendLoopsInRPO(NewChildLoops, Worklist); + + // Also skip further processing of the current loop, it will be revisited + // after all of its newly added children are accounted for. + SkipCurrentLoop = true; + } + + /// Loop passes should use this method to indicate they have added new + /// sibling loops to the current loop. + /// + /// The \p NewSibLoops must only contain the immediate sibling loops. Any + /// nested loops within them will be visited in postorder as usual for the + /// loop pass manager. + void addSiblingLoops(ArrayRef NewSibLoops) { + internal::appendLoopsInRPO(NewSibLoops, Worklist); + + // No need to skip the current loop or revisit it as sibling loops + // shouldn't impact anything. + } + +private: + template friend class llvm::FunctionToLoopPassAdaptor; + + SmallPriorityWorklist &Worklist; + + LoopAnalysisManager &LAM; + + Loop *CurrentL; + + bool SkipCurrentLoop; + + LPMUpdateResult(SmallPriorityWorklist &Worklist, + LoopAnalysisManager &LAM) + : Worklist(Worklist), LAM(LAM) {} +}; + /// \brief Adaptor that maps from a function to its loops. /// /// Designed to allow composition of a LoopPass(Manager) and a @@ -87,42 +347,58 @@ // Get the loop structure for this function LoopInfo &LI = AM.getResult(F); - // Also precompute all of the function analyses used by loop passes. - // FIXME: These should be handed into the loop passes when the loop pass - // management layer is reworked to follow the design of CGSCC. - (void)AM.getResult(F); - (void)AM.getResult(F); - (void)AM.getResult(F); - (void)AM.getResult(F); + // If there are no loops, there is nothing to do here. + if (LI.empty()) + return PreservedAnalyses::all(); + + // Get the analysis results needed by loop passes. + LPMAnalysisResults LAR = {AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F)}; PreservedAnalyses PA = PreservedAnalyses::all(); - // We want to visit the loops in reverse post-order. We'll build the stack - // of loops to visit in Loops by first walking the loops in pre-order. - SmallVector Loops; - SmallVector WorkList(LI.begin(), LI.end()); - while (!WorkList.empty()) { - Loop *L = WorkList.pop_back_val(); - WorkList.insert(WorkList.end(), L->begin(), L->end()); - Loops.push_back(L); - } - - // Now pop each element off of the stack to visit the loops in reverse - // post-order. - for (auto *L : reverse(Loops)) { - PreservedAnalyses PassPA = Pass.run(*L, LAM); + // A postorder worklist of loops to process. + SmallPriorityWorklist Worklist; + + // Setup the update result struct to let passes control our walk of the + // loops. + LPMUpdateResult UR(Worklist, LAM); + + // Add the loop nests in the reverse order of LoopInfo. For some reason, + // they are stored in RPO w.r.t. the control flow graph in LoopInfo. For + // the purpose of unrolling, loop deletion, and LICM, we largely want to + // work forward across the CFG so that we visit defs before uses and can + // propagate simplifications from one loop nest into the next. + // FIXME: Consider changing the order in LoopInfo. + internal::appendLoopsInRPO(reverse(LI), Worklist); + + do { + Loop *L = Worklist.pop_back_val(); + + // Reset the update structure for this loop. + UR.CurrentL = L; + UR.SkipCurrentLoop = false; + + PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, UR); // FIXME: We should verify the set of analyses relevant to Loop passes // are preserved. - // We know that the loop pass couldn't have invalidated any other loop's - // analyses (that's the contract of a loop pass), so directly handle the - // loop analysis manager's invalidation here. - LAM.invalidate(*L, PassPA); + // If the loop hasn't been deleted, we need to handle invalidation here. + if (!UR.skipCurrentLoop()) + // We know that the loop pass couldn't have invalidated any other + // loop's analyses (that's the contract of a loop pass), so directly + // handle the loop analysis manager's invalidation here. + LAM.invalidate(*L, PassPA); // Then intersect the preserved set so that invalidation of module // analyses will eventually occur when the module pass completes. PA.intersect(std::move(PassPA)); - } + } while (!Worklist.empty()); // By definition we preserve the proxy. We also preserve all analyses on // Loops. This precludes *any* invalidation of loop analyses by the proxy, @@ -130,6 +406,19 @@ // loop analysis manager incrementally above. PA.preserveSet>(); PA.preserve(); + // We also preserve the set of analyses queried up-front and preserved + // throughout the run. This avoids each individual loop pass having to mark + // this. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // FIXME: What we really want to do here is preserve an AA category, but + // that concept doesn't exist yet. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); return PA; } @@ -144,6 +433,19 @@ createFunctionToLoopPassAdaptor(LoopPassT Pass) { return FunctionToLoopPassAdaptor(std::move(Pass)); } + +/// \brief Pass for printing a loop's contents as LLVM's text IR assembly. +class PrintLoopPass : public PassInfoMixin { + raw_ostream &OS; + std::string Banner; + +public: + PrintLoopPass(); + PrintLoopPass(raw_ostream &OS, const std::string &Banner = ""); + + PreservedAnalyses run(Loop &L, LoopAnalysisManager &, LPMAnalysisResults &, + LPMUpdateResult &); +}; } #endif // LLVM_ANALYSIS_LOOPPASSMANAGER_H Index: include/llvm/Transforms/Scalar/IndVarSimplify.h =================================================================== --- include/llvm/Transforms/Scalar/IndVarSimplify.h +++ include/llvm/Transforms/Scalar/IndVarSimplify.h @@ -23,7 +23,8 @@ class IndVarSimplifyPass : public PassInfoMixin { public: - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } Index: include/llvm/Transforms/Scalar/LICM.h =================================================================== --- include/llvm/Transforms/Scalar/LICM.h +++ include/llvm/Transforms/Scalar/LICM.h @@ -42,7 +42,8 @@ /// Performs Loop Invariant Code Motion Pass. class LICMPass : public PassInfoMixin { public: - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } // end namespace llvm Index: include/llvm/Transforms/Scalar/LoopDeletion.h =================================================================== --- include/llvm/Transforms/Scalar/LoopDeletion.h +++ include/llvm/Transforms/Scalar/LoopDeletion.h @@ -24,7 +24,8 @@ class LoopDeletionPass : public PassInfoMixin { public: LoopDeletionPass() {} - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); bool runImpl(Loop *L, DominatorTree &DT, ScalarEvolution &SE, LoopInfo &loopInfo); Index: include/llvm/Transforms/Scalar/LoopIdiomRecognize.h =================================================================== --- include/llvm/Transforms/Scalar/LoopIdiomRecognize.h +++ include/llvm/Transforms/Scalar/LoopIdiomRecognize.h @@ -25,7 +25,8 @@ /// Performs Loop Idiom Recognize Pass. class LoopIdiomRecognizePass : public PassInfoMixin { public: - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } // end namespace llvm Index: include/llvm/Transforms/Scalar/LoopInstSimplify.h =================================================================== --- include/llvm/Transforms/Scalar/LoopInstSimplify.h +++ include/llvm/Transforms/Scalar/LoopInstSimplify.h @@ -23,7 +23,8 @@ /// Performs Loop Inst Simplify Pass. class LoopInstSimplifyPass : public PassInfoMixin { public: - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } // end namespace llvm Index: include/llvm/Transforms/Scalar/LoopRotation.h =================================================================== --- include/llvm/Transforms/Scalar/LoopRotation.h +++ include/llvm/Transforms/Scalar/LoopRotation.h @@ -24,7 +24,8 @@ class LoopRotatePass : public PassInfoMixin { public: LoopRotatePass(bool EnableHeaderDuplication = true); - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); private: const bool EnableHeaderDuplication; Index: include/llvm/Transforms/Scalar/LoopSimplifyCFG.h =================================================================== --- include/llvm/Transforms/Scalar/LoopSimplifyCFG.h +++ include/llvm/Transforms/Scalar/LoopSimplifyCFG.h @@ -26,7 +26,8 @@ /// Performs basic CFG simplifications to assist other loop passes. class LoopSimplifyCFGPass : public PassInfoMixin { public: - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } // end namespace llvm Index: include/llvm/Transforms/Scalar/LoopStrengthReduce.h =================================================================== --- include/llvm/Transforms/Scalar/LoopStrengthReduce.h +++ include/llvm/Transforms/Scalar/LoopStrengthReduce.h @@ -31,7 +31,8 @@ /// Performs Loop Strength Reduce Pass. class LoopStrengthReducePass : public PassInfoMixin { public: - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } // end namespace llvm Index: include/llvm/Transforms/Scalar/LoopUnrollPass.h =================================================================== --- include/llvm/Transforms/Scalar/LoopUnrollPass.h +++ include/llvm/Transforms/Scalar/LoopUnrollPass.h @@ -23,7 +23,8 @@ Optional ProvidedRuntime; Optional ProvidedUpperBound; - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM); + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR); }; } // end namespace llvm Index: lib/Analysis/IVUsers.cpp =================================================================== --- lib/Analysis/IVUsers.cpp +++ lib/Analysis/IVUsers.cpp @@ -36,19 +36,15 @@ AnalysisKey IVUsersAnalysis::Key; -IVUsers IVUsersAnalysis::run(Loop &L, LoopAnalysisManager &AM) { - const auto &FAM = - AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - return IVUsers(&L, FAM.getCachedResult(*F), - FAM.getCachedResult(*F), - FAM.getCachedResult(*F), - FAM.getCachedResult(*F)); +IVUsers IVUsersAnalysis::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR) { + return IVUsers(&L, &AR.AC, &AR.LI, &AR.DT, &AR.SE); } -PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM) { - AM.getResult(L).print(OS); +PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { + AM.getResult(L, AR).print(OS); return PreservedAnalyses::all(); } Index: lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- lib/Analysis/LoopAccessAnalysis.cpp +++ lib/Analysis/LoopAccessAnalysis.cpp @@ -2120,31 +2120,17 @@ AnalysisKey LoopAccessAnalysis::Key; -LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM) { - const FunctionAnalysisManager &FAM = - AM.getResult(L).getManager(); - Function &F = *L.getHeader()->getParent(); - auto *SE = FAM.getCachedResult(F); - auto *TLI = FAM.getCachedResult(F); - auto *AA = FAM.getCachedResult(F); - auto *DT = FAM.getCachedResult(F); - auto *LI = FAM.getCachedResult(F); - if (!SE) - report_fatal_error( - "ScalarEvolution must have been cached at a higher level"); - if (!AA) - report_fatal_error("AliasAnalysis must have been cached at a higher level"); - if (!DT) - report_fatal_error("DominatorTree must have been cached at a higher level"); - if (!LI) - report_fatal_error("LoopInfo must have been cached at a higher level"); - return LoopAccessInfo(&L, SE, TLI, AA, DT, LI); +LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR) { + return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); } PreservedAnalyses LoopAccessInfoPrinterPass::run(Loop &L, - LoopAnalysisManager &AM) { + LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { Function &F = *L.getHeader()->getParent(); - auto &LAI = AM.getResult(L); + auto &LAI = AM.getResult(L, AR); OS << "Loop access info in function '" << F.getName() << "':\n"; OS.indent(2) << L.getHeader()->getName() << ":\n"; LAI.print(OS, 4); Index: lib/Analysis/LoopInfo.cpp =================================================================== --- lib/Analysis/LoopInfo.cpp +++ lib/Analysis/LoopInfo.cpp @@ -689,18 +689,13 @@ return PreservedAnalyses::all(); } -PrintLoopPass::PrintLoopPass() : OS(dbgs()) {} -PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner) - : OS(OS), Banner(Banner) {} - -PreservedAnalyses PrintLoopPass::run(Loop &L, AnalysisManager &) { +void llvm::printLoop(Loop &L, raw_ostream &OS, const std::string &Banner) { OS << Banner; for (auto *Block : L.blocks()) if (Block) Block->print(OS); else OS << "Printing block"; - return PreservedAnalyses::all(); } //===----------------------------------------------------------------------===// Index: lib/Analysis/LoopPass.cpp =================================================================== --- lib/Analysis/LoopPass.cpp +++ lib/Analysis/LoopPass.cpp @@ -32,13 +32,14 @@ /// PrintLoopPass - Print a Function corresponding to a Loop. /// class PrintLoopPassWrapper : public LoopPass { - PrintLoopPass P; + raw_ostream &OS; + std::string Banner; public: static char ID; - PrintLoopPassWrapper() : LoopPass(ID) {} + PrintLoopPassWrapper() : LoopPass(ID), OS(dbgs()) {} PrintLoopPassWrapper(raw_ostream &OS, const std::string &Banner) - : LoopPass(ID), P(OS, Banner) {} + : LoopPass(ID), OS(OS), Banner(Banner) {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); @@ -49,8 +50,7 @@ [](BasicBlock *BB) { return BB; }); if (BBI != L->blocks().end() && isFunctionInPrintList((*BBI)->getParent()->getName())) { - LoopAnalysisManager DummyLAM; - P.run(*L, DummyLAM); + printLoop(*L, OS, Banner); } return false; } Index: lib/Analysis/LoopPassManager.cpp =================================================================== --- lib/Analysis/LoopPassManager.cpp +++ lib/Analysis/LoopPassManager.cpp @@ -20,34 +20,191 @@ // Explicit template instantiations and specialization defininitions for core // template typedefs. namespace llvm { -template class PassManager; -template class AnalysisManager; +template class AllAnalysesOn; +template class AnalysisManager; +template class PassManager; template class InnerAnalysisManagerProxy; -template class OuterAnalysisManagerProxy; +template class OuterAnalysisManagerProxy; +/// Explicitly specialize the pass manager run method to handle call graph +/// updates. template <> +PreservedAnalyses PassManager::run(Loop &L, + LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { + PreservedAnalyses PA = PreservedAnalyses::all(); + + if (DebugLogging) + dbgs() << "Starting Loop pass manager run.\n"; + + for (auto &Pass : Passes) { + if (DebugLogging) + dbgs() << "Running pass: " << Pass->name() << " on " << L; + + PreservedAnalyses PassPA = Pass->run(L, AM, AR, UR); + + // If the loop was deleted, abort the run and return to the outer walk. + if (UR.skipCurrentLoop()) { + PA.intersect(std::move(PassPA)); + break; + } + + // Update the analysis manager as each pass runs and potentially + // invalidates analyses. + AM.invalidate(L, PassPA); + + // Finally, we intersect the final preserved analyses to compute the + // aggregate preserved set for this pass manager. + PA.intersect(std::move(PassPA)); + + // FIXME: Historically, the pass managers all called the LLVM context's + // yield function here. We don't have a generic way to acquire the + // context and it isn't yet clear what the right pattern is for yielding + // in the new pass manager so it is currently omitted. + // ...getContext().yield(); + } + + // Invaliadtion was handled after each pass in the above loop for the current + // SCC. Therefore, the remaining analysis results in the AnalysisManager are + // preserved. We mark this with a set so that we don't need to inspect each + // one individually. + PA.preserveSet>(); + + if (DebugLogging) + dbgs() << "Finished Loop pass manager run.\n"; + + return PA; +} + bool LoopAnalysisManagerFunctionProxy::Result::invalidate( Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv) { - // If this proxy isn't marked as preserved, the set of Function objects in - // the module may have changed. We therefore can't call - // InnerAM->invalidate(), because any pointers to Functions it has may be - // stale. + // First compute the sequence of IR units covered by this proxy. We will want + // to visit this in postorder, but because this is a tree structure we can + // build a preorder sequence and walk it in reverse to do this. + SmallVector PreOrderLoops, PreOrderWorklist; + // Note that we want to walk the roots in reverse order because we will end + // up reversing the preorder sequence. However, it happens that the loop nest + // roots are in reverse order within the LoopInfo object so we just walk + // forward here. + // FIXME: If we change the order of LoopInfo we will want to add a reverse + // here. + for (Loop *RootL : *LI) { + assert(PreOrderWorklist.empty() && + "Must start with an empty preorder walk worklist."); + PreOrderWorklist.push_back(RootL); + do { + Loop *L = PreOrderWorklist.pop_back_val(); + PreOrderWorklist.append(L->begin(), L->end()); + PreOrderLoops.push_back(L); + } while (!PreOrderWorklist.empty()); + } + + // If this proxy or the loop info is going to be invalidated, we also need + // to clear all the keys coming from that analysis. We also completely blow + // away the loop analyses if any of the "bundled" analyses provided by the + // loop pass manager go away so that loop analyses can freely use these + // without worrying about declaring dependencies on them etc. + // FIXME: It isn't clear if this is the right tradeoff. We could instead make + // loop analyses declare any dependencies on these and use the more general + // invalidation logic below to act on that. auto PAC = PA.getChecker(); - if (!PAC.preserved() && !PAC.preservedSet>()) - InnerAM->clear(); + if (!(PAC.preserved() || PAC.preservedSet>()) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA)) { + // Note that the LoopInfo may be stale at this point, however the loop + // objects themselves remain the only viable keys that could be in the + // analysis manager's cache. So we just walk the keys and forcibly clear + // those results. Note that the order doesn't matter here as this will just + // directly destroy the results without calling methods on them. + for (Loop *L : PreOrderLoops) + InnerAM->clear(*L); + + // We also need to null out the inner AM. The primary purpose is to avoid + // clearing the *entire* analysis manager when the result is destroyed. + // Because we received an invalidation event we were able to walk the + // loops, we don't need this last-ditch invalidation approach and would + // like to preserve the integrity of other cached entries in the analysis + // manager. This also should catch any attempt to use the analysis manager + // via this proxy prior to it being rebuilt. + // FIXME: This last property isn't very nice. Most analyses try to remain + // valid during invalidation. Maybe we should add an `IsClean` flag? + InnerAM = nullptr; + + // New return true to indicate this *is* invalid and a fresh proxy result + // needs to be built. + return true; + } + + // Directly check if the relevant set is preserved so we can short circuit + // invalidating loops. + bool AreLoopAnalysesPreserved = + PA.allAnalysesInSetPreserved>(); + + // Since we have a valid LoopInfo we can actually leave the cached results in + // the analysis manager associated with the Loop keys, but we need to + // propagate any necessary invalidation logic into them. We'd like to + // invalidate things in roughly the same order as they were put into the + // cache and so we walk the preorder list in reverse to form a valid + // postorder. + for (Loop *L : reverse(PreOrderLoops)) { + Optional InnerPA; + + // Check to see whether the preserved set needs to be adjusted based on + // function-level analysis invalidation triggering deferred invalidation + // for this loop. + if (auto *OuterProxy = + InnerAM->getCachedResult(*L)) + for (const auto &OuterInvalidationPair : + OuterProxy->getOuterInvalidations()) { + AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first; + const auto &InnerAnalysisIDs = OuterInvalidationPair.second; + if (Inv.invalidate(OuterAnalysisID, F, PA)) { + if (!InnerPA) + InnerPA = PA; + for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs) + InnerPA->abandon(InnerAnalysisID); + } + } + + // Check if we needed a custom PA set. If so we'll need to run the inner + // invalidation. + if (InnerPA) { + InnerAM->invalidate(*L, *InnerPA); + continue; + } - // FIXME: Proper suppor for invalidation isn't yet implemented for the LPM. + // Otherwise we only need to do invalidation if the original PA set didn't + // preserve all Loop analyses. + if (!AreLoopAnalysesPreserved) + InnerAM->invalidate(*L, PA); + } // Return false to indicate that this result is still a valid proxy. return false; } + +template <> +LoopAnalysisManagerFunctionProxy::Result +LoopAnalysisManagerFunctionProxy::run(Function &F, + FunctionAnalysisManager &AM) { + return Result(*InnerAM, AM.getResult(F)); +} } PreservedAnalyses llvm::getLoopPassPreservedAnalyses() { PreservedAnalyses PA; + PA.preserve(); PA.preserve(); PA.preserve(); + PA.preserve(); PA.preserve(); // TODO: What we really want to do here is preserve an AA category, but that // concept doesn't exist yet. @@ -57,3 +214,13 @@ PA.preserve(); return PA; } + +PrintLoopPass::PrintLoopPass() : OS(dbgs()) {} +PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner) + : OS(OS), Banner(Banner) {} + +PreservedAnalyses PrintLoopPass::run(Loop &L, LoopAnalysisManager &, + LPMAnalysisResults &, LPMUpdateResult &) { + printLoop(L, OS, Banner); + return PreservedAnalyses::all(); +} Index: lib/Passes/PassBuilder.cpp =================================================================== --- lib/Passes/PassBuilder.cpp +++ lib/Passes/PassBuilder.cpp @@ -38,6 +38,7 @@ #include "llvm/Analysis/LazyValueInfo.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/ModuleSummaryAnalysis.h" #include "llvm/Analysis/OptimizationDiagnosticInfo.h" @@ -220,7 +221,8 @@ /// \brief No-op loop pass which does nothing. struct NoOpLoopPass { - PreservedAnalyses run(Loop &L, LoopAnalysisManager &) { + PreservedAnalyses run(Loop &L, LoopAnalysisManager &, LPMAnalysisResults &, + LPMUpdateResult &) { return PreservedAnalyses::all(); } static StringRef name() { return "NoOpLoopPass"; } @@ -233,7 +235,9 @@ public: struct Result {}; - Result run(Loop &, LoopAnalysisManager &) { return Result(); } + Result run(Loop &, LoopAnalysisManager &, LPMAnalysisResults &) { + return Result(); + } static StringRef name() { return "NoOpLoopAnalysis"; } }; @@ -1018,8 +1022,10 @@ } #define LOOP_ANALYSIS(NAME, CREATE_PASS) \ if (Name == "require<" NAME ">") { \ - LPM.addPass(RequireAnalysisPass< \ - std::remove_reference::type, Loop>()); \ + LPM.addPass( \ + RequireAnalysisPass< \ + std::remove_reference::type, Loop, \ + LoopAnalysisManager, LPMAnalysisResults &, LPMUpdateResult &>()); \ return true; \ } \ if (Name == "invalidate<" NAME ">") { \ Index: lib/Transforms/Scalar/IndVarSimplify.cpp =================================================================== --- lib/Transforms/Scalar/IndVarSimplify.cpp +++ lib/Transforms/Scalar/IndVarSimplify.cpp @@ -2482,23 +2482,13 @@ return Changed; } -PreservedAnalyses IndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &AM) { - auto &FAM = AM.getResult(L).getManager(); +PreservedAnalyses IndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { Function *F = L.getHeader()->getParent(); const DataLayout &DL = F->getParent()->getDataLayout(); - auto *LI = FAM.getCachedResult(*F); - auto *SE = FAM.getCachedResult(*F); - auto *DT = FAM.getCachedResult(*F); - - assert((LI && SE && DT) && - "Analyses required for indvarsimplify not available!"); - - // Optional analyses. - auto *TTI = FAM.getCachedResult(*F); - auto *TLI = FAM.getCachedResult(*F); - - IndVarSimplify IVS(LI, SE, DT, DL, TLI, TTI); + IndVarSimplify IVS(&AR.LI, &AR.SE, &AR.DT, DL, &AR.TLI, &AR.TTI); if (!IVS.run(&L)) return PreservedAnalyses::all(); Index: lib/Transforms/Scalar/LICM.cpp =================================================================== --- lib/Transforms/Scalar/LICM.cpp +++ lib/Transforms/Scalar/LICM.cpp @@ -176,21 +176,11 @@ }; } -PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM) { - const auto &FAM = - AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - auto *AA = FAM.getCachedResult(*F); - auto *LI = FAM.getCachedResult(*F); - auto *DT = FAM.getCachedResult(*F); - auto *TLI = FAM.getCachedResult(*F); - auto *SE = FAM.getCachedResult(*F); - assert((AA && LI && DT && TLI && SE) && "Analyses for LICM not available"); - +PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { LoopInvariantCodeMotion LICM; - if (!LICM.runOnLoop(&L, AA, LI, DT, TLI, SE, true)) + if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.SE, true)) return PreservedAnalyses::all(); // FIXME: There is no setPreservesCFG in the new PM. When that becomes Index: lib/Transforms/Scalar/LoopDeletion.cpp =================================================================== --- lib/Transforms/Scalar/LoopDeletion.cpp +++ lib/Transforms/Scalar/LoopDeletion.cpp @@ -215,15 +215,10 @@ return Changed; } -PreservedAnalyses LoopDeletionPass::run(Loop &L, LoopAnalysisManager &AM) { - auto &FAM = AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - auto &DT = *FAM.getCachedResult(*F); - auto &SE = *FAM.getCachedResult(*F); - auto &LI = *FAM.getCachedResult(*F); - - bool Changed = runImpl(&L, DT, SE, LI); +PreservedAnalyses LoopDeletionPass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { + bool Changed = runImpl(&L, AR.DT, AR.SE, AR.LI); if (!Changed) return PreservedAnalyses::all(); Index: lib/Transforms/Scalar/LoopDistribute.cpp =================================================================== --- lib/Transforms/Scalar/LoopDistribute.cpp +++ lib/Transforms/Scalar/LoopDistribute.cpp @@ -946,10 +946,18 @@ auto &SE = AM.getResult(F); auto &ORE = AM.getResult(F); + // We don't directly need these analyses but they're required for loop + // analyses so provide them below. + auto &AA = AM.getResult(F); + auto &AC = AM.getResult(F); + auto &TTI = AM.getResult(F); + auto &TLI = AM.getResult(F); + auto &LAM = AM.getResult(F).getManager(); std::function GetLAA = [&](Loop &L) -> const LoopAccessInfo & { - return LAM.getResult(L); + LPMAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI}; + return LAM.getResult(L, AR); }; bool Changed = runImpl(F, &LI, &DT, &SE, &ORE, GetLAA); Index: lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -186,24 +186,12 @@ }; } // End anonymous namespace. -PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, - LoopAnalysisManager &AM) { - const auto &FAM = - AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - // Use getCachedResult because Loop pass cannot trigger a function analysis. - auto *AA = FAM.getCachedResult(*F); - auto *DT = FAM.getCachedResult(*F); - auto *LI = FAM.getCachedResult(*F); - auto *SE = FAM.getCachedResult(*F); - auto *TLI = FAM.getCachedResult(*F); - const auto *TTI = FAM.getCachedResult(*F); +PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { const auto *DL = &L.getHeader()->getModule()->getDataLayout(); - assert((AA && DT && LI && SE && TLI && TTI && DL) && - "Analyses for Loop Idiom Recognition not available"); - LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL); + LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL); if (!LIR.runOnLoop(&L)) return PreservedAnalyses::all(); Index: lib/Transforms/Scalar/LoopInstSimplify.cpp =================================================================== --- lib/Transforms/Scalar/LoopInstSimplify.cpp +++ lib/Transforms/Scalar/LoopInstSimplify.cpp @@ -183,20 +183,10 @@ }; } -PreservedAnalyses LoopInstSimplifyPass::run(Loop &L, - LoopAnalysisManager &AM) { - const auto &FAM = - AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - // Use getCachedResult because Loop pass cannot trigger a function analysis. - auto *DT = FAM.getCachedResult(*F); - auto *LI = FAM.getCachedResult(*F); - auto *AC = FAM.getCachedResult(*F); - const auto *TLI = FAM.getCachedResult(*F); - assert((LI && AC && TLI) && "Analyses for Loop Inst Simplify not available"); - - if (!SimplifyLoopInst(&L, DT, LI, AC, TLI)) +PreservedAnalyses LoopInstSimplifyPass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { + if (!SimplifyLoopInst(&L, &AR.DT, &AR.LI, &AR.AC, &AR.TLI)) return PreservedAnalyses::all(); return getLoopPassPreservedAnalyses(); Index: lib/Transforms/Scalar/LoopRotation.cpp =================================================================== --- lib/Transforms/Scalar/LoopRotation.cpp +++ lib/Transforms/Scalar/LoopRotation.cpp @@ -625,20 +625,11 @@ LoopRotatePass::LoopRotatePass(bool EnableHeaderDuplication) : EnableHeaderDuplication(EnableHeaderDuplication) {} -PreservedAnalyses LoopRotatePass::run(Loop &L, LoopAnalysisManager &AM) { - auto &FAM = AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - auto *LI = FAM.getCachedResult(*F); - const auto *TTI = FAM.getCachedResult(*F); - auto *AC = FAM.getCachedResult(*F); - assert((LI && TTI && AC) && "Analyses for loop rotation not available"); - - // Optional analyses. - auto *DT = FAM.getCachedResult(*F); - auto *SE = FAM.getCachedResult(*F); +PreservedAnalyses LoopRotatePass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { int Threshold = EnableHeaderDuplication ? DefaultRotationThreshold : 0; - LoopRotate LR(Threshold, LI, TTI, AC, DT, SE); + LoopRotate LR(Threshold, &AR.LI, &AR.TTI, &AR.AC, &AR.DT, &AR.SE); bool Changed = LR.processLoop(&L); if (!Changed) Index: lib/Transforms/Scalar/LoopSimplifyCFG.cpp =================================================================== --- lib/Transforms/Scalar/LoopSimplifyCFG.cpp +++ lib/Transforms/Scalar/LoopSimplifyCFG.cpp @@ -64,16 +64,10 @@ return Changed; } -PreservedAnalyses LoopSimplifyCFGPass::run(Loop &L, LoopAnalysisManager &AM) { - const auto &FAM = - AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - auto *LI = FAM.getCachedResult(*F); - auto *DT = FAM.getCachedResult(*F); - assert((LI && DT) && "Analyses for LoopSimplifyCFG not available"); - - if (!simplifyLoopCFG(L, *DT, *LI)) +PreservedAnalyses LoopSimplifyCFGPass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { + if (!simplifyLoopCFG(L, AR.DT, AR.LI)) return PreservedAnalyses::all(); return getLoopPassPreservedAnalyses(); } Index: lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -5052,21 +5052,11 @@ return ReduceLoopStrength(L, IU, SE, DT, LI, TTI); } -PreservedAnalyses LoopStrengthReducePass::run(Loop &L, - LoopAnalysisManager &AM) { - const auto &FAM = - AM.getResult(L).getManager(); - Function *F = L.getHeader()->getParent(); - - auto &IU = AM.getResult(L); - auto *SE = FAM.getCachedResult(*F); - auto *DT = FAM.getCachedResult(*F); - auto *LI = FAM.getCachedResult(*F); - auto *TTI = FAM.getCachedResult(*F); - assert((SE && DT && LI && TTI) && - "Analyses for Loop Strength Reduce not available"); - - if (!ReduceLoopStrength(&L, IU, *SE, *DT, *LI, *TTI)) +PreservedAnalyses LoopStrengthReducePass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { + if (!ReduceLoopStrength(&L, AM.getResult(L, AR), AR.SE, + AR.DT, AR.LI, AR.TTI)) return PreservedAnalyses::all(); return getLoopPassPreservedAnalyses(); Index: lib/Transforms/Scalar/LoopUnrollPass.cpp =================================================================== --- lib/Transforms/Scalar/LoopUnrollPass.cpp +++ lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -1111,41 +1111,23 @@ return llvm::createLoopUnrollPass(-1, -1, 0, 0, 0); } -PreservedAnalyses LoopUnrollPass::run(Loop &L, LoopAnalysisManager &AM) { +PreservedAnalyses LoopUnrollPass::run(Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &UR) { const auto &FAM = - AM.getResult(L).getManager(); + AM.getResult(L, AR).getManager(); Function *F = L.getHeader()->getParent(); - - DominatorTree *DT = FAM.getCachedResult(*F); - LoopInfo *LI = FAM.getCachedResult(*F); - ScalarEvolution *SE = FAM.getCachedResult(*F); - auto *TTI = FAM.getCachedResult(*F); - auto *AC = FAM.getCachedResult(*F); auto *ORE = FAM.getCachedResult(*F); - if (!DT) - report_fatal_error( - "LoopUnrollPass: DominatorTreeAnalysis not cached at a higher level"); - if (!LI) - report_fatal_error( - "LoopUnrollPass: LoopAnalysis not cached at a higher level"); - if (!SE) - report_fatal_error( - "LoopUnrollPass: ScalarEvolutionAnalysis not cached at a higher level"); - if (!TTI) - report_fatal_error( - "LoopUnrollPass: TargetIRAnalysis not cached at a higher level"); - if (!AC) - report_fatal_error( - "LoopUnrollPass: AssumptionAnalysis not cached at a higher level"); + // FIXME: This should probably be optional rather than required. if (!ORE) report_fatal_error("LoopUnrollPass: OptimizationRemarkEmitterAnalysis not " "cached at a higher level"); - bool Changed = - tryToUnrollLoop(&L, *DT, LI, SE, *TTI, *AC, *ORE, /*PreserveLCSSA*/ true, - ProvidedCount, ProvidedThreshold, ProvidedAllowPartial, - ProvidedRuntime, ProvidedUpperBound); + bool Changed = tryToUnrollLoop(&L, AR.DT, &AR.LI, &AR.SE, AR.TTI, AR.AC, *ORE, + /*PreserveLCSSA*/ true, ProvidedCount, + ProvidedThreshold, ProvidedAllowPartial, + ProvidedRuntime, ProvidedUpperBound); if (!Changed) return PreservedAnalyses::all(); Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -7660,7 +7660,7 @@ auto &TTI = AM.getResult(F); auto &DT = AM.getResult(F); auto &BFI = AM.getResult(F); - auto *TLI = AM.getCachedResult(F); + auto &TLI = AM.getResult(F); auto &AA = AM.getResult(F); auto &AC = AM.getResult(F); auto &DB = AM.getResult(F); @@ -7669,10 +7669,11 @@ auto &LAM = AM.getResult(F).getManager(); std::function GetLAA = [&](Loop &L) -> const LoopAccessInfo & { - return LAM.getResult(L); + LPMAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI}; + return LAM.getResult(L, AR); }; bool Changed = - runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); + runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); if (!Changed) return PreservedAnalyses::all(); PreservedAnalyses PA; Index: test/Other/loop-pass-ordering.ll =================================================================== --- test/Other/loop-pass-ordering.ll +++ test/Other/loop-pass-ordering.ll @@ -8,11 +8,12 @@ ; / \ \ ; loop.0.0 loop.0.1 loop.1.0 ; -; CHECK: Running pass: NoOpLoopPass on loop.1.0 -; CHECK: Running pass: NoOpLoopPass on loop.1 -; CHECK: Running pass: NoOpLoopPass on loop.0.0 -; CHECK: Running pass: NoOpLoopPass on loop.0.1 -; CHECK: Running pass: NoOpLoopPass on loop.0 +; CHECK: Running pass: NoOpLoopPass on Loop at depth 2 containing: %loop.0.0 +; CHECK: Running pass: NoOpLoopPass on Loop at depth 2 containing: %loop.0.1 +; CHECK: Running pass: NoOpLoopPass on Loop at depth 1 containing: %loop.0 +; CHECK: Running pass: NoOpLoopPass on Loop at depth 2 containing: %loop.1.0 +; CHECK: Running pass: NoOpLoopPass on Loop at depth 1 containing: %loop.1 + define void @f() { entry: br label %loop.0 Index: test/Other/new-pass-manager.ll =================================================================== --- test/Other/new-pass-manager.ll +++ test/Other/new-pass-manager.ll @@ -433,12 +433,12 @@ ; CHECK-O: Running pass: TailCallElimPass ; CHECK-O: Running pass: SimplifyCFGPass ; CHECK-O: Running pass: ReassociatePass -; CHECK-O: Starting llvm::Loop pass manager run. -; CHECK-O: Finished llvm::Loop pass manager run. +; CHECK-O: Starting Loop pass manager run. +; CHECK-O: Finished Loop pass manager run. ; CHECK-O: Running pass: SimplifyCFGPass ; CHECK-O: Running pass: InstCombinePass -; CHECK-O: Starting llvm::Loop pass manager run. -; CHECK-O: Finished llvm::Loop pass manager run. +; CHECK-O: Starting Loop pass manager run. +; CHECK-O: Finished Loop pass manager run. ; CHECK-O: Running pass: MemCpyOptPass ; CHECK-O: Running pass: SCCPPass ; CHECK-O: Running pass: BDCEPass @@ -544,20 +544,21 @@ ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: DominatorTreeAnalysis ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AAManager ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetLibraryAnalysis -; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: ScalarEvolutionAnalysis ; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: AssumptionAnalysis -; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: ScalarEvolutionAnalysis +; CHECK-REPEAT-LOOP-PASS-NEXT: Running analysis: TargetIRAnalysis +; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run ; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: RepeatedPass -; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run ; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: NoOpLoopPass -; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run -; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run ; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: NoOpLoopPass -; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run -; CHECK-REPEAT-LOOP-PASS-NEXT: Starting llvm::Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Starting Loop pass manager run ; CHECK-REPEAT-LOOP-PASS-NEXT: Running pass: NoOpLoopPass -; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run -; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run +; CHECK-REPEAT-LOOP-PASS-NEXT: Finished Loop pass manager run ; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Function pass manager run ; CHECK-REPEAT-LOOP-PASS-NEXT: Finished llvm::Module pass manager run Index: test/Other/pass-pipeline-parsing.ll =================================================================== --- test/Other/pass-pipeline-parsing.ll +++ test/Other/pass-pipeline-parsing.ll @@ -144,10 +144,10 @@ ; CHECK-TWO-NOOP-LOOP: Running pass: ModuleToFunctionPassAdaptor ; CHECK-TWO-NOOP-LOOP: Starting llvm::Function pass manager run ; CHECK-TWO-NOOP-LOOP: Running pass: FunctionToLoopPassAdaptor -; CHECK-TWO-NOOP-LOOP: Starting llvm::Loop pass manager run +; CHECK-TWO-NOOP-LOOP: Starting Loop pass manager run ; CHECK-TWO-NOOP-LOOP: Running pass: NoOpLoopPass ; CHECK-TWO-NOOP-LOOP: Running pass: NoOpLoopPass -; CHECK-TWO-NOOP-LOOP: Finished llvm::Loop pass manager run +; CHECK-TWO-NOOP-LOOP: Finished Loop pass manager run ; CHECK-TWO-NOOP-LOOP: Finished llvm::Function pass manager run ; CHECK-TWO-NOOP-LOOP: Finished llvm::Module pass manager run @@ -167,9 +167,9 @@ ; CHECK-NESTED-FP-LP: Running pass: ModuleToFunctionPassAdaptor ; CHECK-NESTED-FP-LP: Starting llvm::Function pass manager run ; CHECK-NESTED-FP-LP: Running pass: FunctionToLoopPassAdaptor -; CHECK-NESTED-FP-LP: Starting llvm::Loop pass manager run +; CHECK-NESTED-FP-LP: Starting Loop pass manager run ; CHECK-NESTED-FP-LP: Running pass: NoOpLoopPass -; CHECK-NESTED-FP-LP: Finished llvm::Loop pass manager run +; CHECK-NESTED-FP-LP: Finished Loop pass manager run ; CHECK-NESTED-FP-LP: Finished llvm::Function pass manager run ; CHECK-NESTED-FP-LP: Finished llvm::Module pass manager run Index: unittests/Analysis/LoopPassManagerTest.cpp =================================================================== --- unittests/Analysis/LoopPassManagerTest.cpp +++ unittests/Analysis/LoopPassManagerTest.cpp @@ -12,6 +12,7 @@ #include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/AsmParser/Parser.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" @@ -19,84 +20,169 @@ #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/Support/SourceMgr.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" using namespace llvm; namespace { -class TestLoopAnalysis : public AnalysisInfoMixin { - friend AnalysisInfoMixin; - static AnalysisKey Key; - - int &Runs; +using testing::DoDefault; +using testing::Return; +using testing::Expectation; +using testing::Invoke; +using testing::InvokeWithoutArgs; +using testing::_; +template , + typename... ExtraArgTs> +class MockAnalysisHandleTemplateBase { public: - struct Result { - Result(int Count) : BlockCount(Count) {} - int BlockCount; - }; + class Analysis : public AnalysisInfoMixin { + friend AnalysisInfoMixin; + friend MockAnalysisHandleTemplateBase; + static AnalysisKey Key; + + DerivedT *Handle; + + Analysis(DerivedT &Handle) : Handle(&Handle) {} + + public: + class Result { + friend MockAnalysisHandleTemplateBase; + + DerivedT *Handle; - TestLoopAnalysis(int &Runs) : Runs(Runs) {} + Result(DerivedT &Handle) : Handle(&Handle) {} - /// \brief Run the analysis pass over the loop and return a result. - Result run(Loop &L, LoopAnalysisManager &AM) { - ++Runs; - int Count = 0; + public: + // Forward invalidation events to the mock handle. + bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA, + typename AnalysisManagerT::Invalidator &Inv) { + return Handle->invalidate(IR, PA, Inv); + } + }; - for (auto I = L.block_begin(), E = L.block_end(); I != E; ++I) - ++Count; - return Result(Count); + Result run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs) { + return Handle->run(IR, AM, ExtraArgs...); + } + }; + + Analysis getAnalysis() { return Analysis(static_cast(*this)); } + typename Analysis::Result getResult() { + return typename Analysis::Result(static_cast(*this)); + } + +protected: + /// The base provides a method the derived class constructor can use to set + /// up default actions. We can't do this directly because our constructor is + /// run before the derived object is constructed. + void setDefaults() { + ON_CALL(static_cast(*this), + run(_, _, testing::Matcher(_)...)) + .WillByDefault(Return(this->getResult())); + ON_CALL(static_cast(*this), invalidate(_, _, _)) + .WillByDefault(Invoke([](IRUnitT &, const PreservedAnalyses &PA, + typename AnalysisManagerT::Invalidator &Inv) { + auto PAC = PA.getChecker(); + return !PAC.preserved() && + !PAC.template preservedSet>(); + })); } }; -AnalysisKey TestLoopAnalysis::Key; +template +AnalysisKey MockAnalysisHandleTemplateBase::Analysis::Key; -class TestLoopPass { - std::vector &VisitedLoops; - int &AnalyzedBlockCount; - bool OnlyUseCachedResults; +template (-1)> +struct MockLoopAnalysisHandleTemplate + : MockAnalysisHandleTemplateBase, Loop, + LoopAnalysisManager, + LPMAnalysisResults &> { + typedef typename MockLoopAnalysisHandleTemplate::Analysis Analysis; + MOCK_METHOD3_T(run, typename Analysis::Result(Loop &, LoopAnalysisManager &, + LPMAnalysisResults &)); + + MOCK_METHOD3_T(invalidate, bool(Loop &, const PreservedAnalyses &, + LoopAnalysisManager::Invalidator &)); + + MockLoopAnalysisHandleTemplate() { this->setDefaults(); } +}; + +typedef MockLoopAnalysisHandleTemplate<> MockLoopAnalysisHandle; + +struct MockFunctionAnalysisHandle + : MockAnalysisHandleTemplateBase { + MOCK_METHOD2(run, Analysis::Result(Function &, FunctionAnalysisManager &)); + + MOCK_METHOD3(invalidate, bool(Function &, const PreservedAnalyses &, + FunctionAnalysisManager::Invalidator &)); + + MockFunctionAnalysisHandle() { setDefaults(); } +}; + +template , + typename... ExtraArgTs> +class MockPassHandleBase { public: - TestLoopPass(std::vector &VisitedLoops, int &AnalyzedBlockCount, - bool OnlyUseCachedResults = false) - : VisitedLoops(VisitedLoops), AnalyzedBlockCount(AnalyzedBlockCount), - OnlyUseCachedResults(OnlyUseCachedResults) {} - - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM) { - VisitedLoops.push_back(L.getName()); - - if (OnlyUseCachedResults) { - // Hack to force the use of the cached interface. - if (auto *AR = AM.getCachedResult(L)) - AnalyzedBlockCount += AR->BlockCount; - } else { - // Typical path just runs the analysis as needed. - auto &AR = AM.getResult(L); - AnalyzedBlockCount += AR.BlockCount; + class Pass : public PassInfoMixin { + friend MockPassHandleBase; + + DerivedT *Handle; + + Pass(DerivedT &Handle) : Handle(&Handle) {} + + public: + PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, + ExtraArgTs... ExtraArgs) { + return Handle->run(IR, AM, ExtraArgs...); } + }; - return PreservedAnalyses::all(); + Pass getPass() { return Pass(static_cast(*this)); } + +protected: + /// The base provides a method the derived class constructor can use to set + /// up default actions. We can't do this directly because our constructor is + /// run before the derived object is constructed. + void setDefaults() { + ON_CALL(static_cast(*this), + run(_, _, testing::Matcher(_)...)) + .WillByDefault(Return(PreservedAnalyses::all())); } +}; - static StringRef name() { return "TestLoopPass"; } +struct MockLoopPassHandle + : MockPassHandleBase { + MOCK_METHOD4(run, PreservedAnalyses(Loop &, LoopAnalysisManager &, + LPMAnalysisResults &, LPMUpdateResult &)); + MockLoopPassHandle() { setDefaults(); } }; -// A test loop pass that invalidates the analysis for loops with the given name. -class TestLoopInvalidatingPass { - StringRef Name; +struct MockFunctionPassHandle + : MockPassHandleBase { + MOCK_METHOD2(run, PreservedAnalyses(Function &, FunctionAnalysisManager &)); -public: - TestLoopInvalidatingPass(StringRef LoopName) : Name(LoopName) {} + MockFunctionPassHandle() { setDefaults(); } +}; - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM) { - return L.getName() == Name ? getLoopPassPreservedAnalyses() - : PreservedAnalyses::all(); - } +struct MockModulePassHandle : MockPassHandleBase { + MOCK_METHOD2(run, PreservedAnalyses(Module &, ModuleAnalysisManager &)); - static StringRef name() { return "TestLoopInvalidatingPass"; } + MockModulePassHandle() { setDefaults(); } }; +MATCHER_P(HasName, Name, "") { + *result_listener << "has name '" << arg.getName().str() << "'"; + return Name == arg.getName(); +} + std::unique_ptr parseIR(LLVMContext &C, const char *IR) { SMDiagnostic Err; return parseAssemblyString(IR, Err, C); @@ -107,6 +193,23 @@ LLVMContext Context; std::unique_ptr M; + LoopAnalysisManager LAM; + FunctionAnalysisManager FAM; + ModuleAnalysisManager MAM; + + MockLoopAnalysisHandle MLAHandle; + MockLoopPassHandle MLPHandle; + MockFunctionPassHandle MFPHandle; + MockModulePassHandle MMPHandle; + + static PreservedAnalyses getLoopAnalysisResult(Loop &L, + LoopAnalysisManager &AM, + LPMAnalysisResults &AR, + LPMUpdateResult &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + }; + public: LoopPassManagerTest() : M(parseIR(Context, "define void @f() {\n" @@ -129,81 +232,1164 @@ " br i1 undef, label %loop.g.0, label %end\n" "end:\n" " ret void\n" - "}\n")) {} -}; + "}\n")), + LAM(true), FAM(true), MAM(true) { + // Register an analysis from the mock handle. + LAM.registerPass([&] { return MLAHandle.getAnalysis(); }); -#define EXPECT_N_ELEMENTS_EQ(N, EXPECTED, ACTUAL) \ - do { \ - EXPECT_EQ(N##UL, ACTUAL.size()); \ - for (int I = 0; I < N; ++I) \ - EXPECT_TRUE(EXPECTED[I] == ACTUAL[I]) << "Element " << I << " is " \ - << ACTUAL[I] << ". Expected " \ - << EXPECTED[I] << "."; \ - } while (0) + // We need DominatorTreeAnalysis for LoopAnalysis. + FAM.registerPass([&] { return DominatorTreeAnalysis(); }); + FAM.registerPass([&] { return LoopAnalysis(); }); + // We also allow loop passes to assume a set of other analyses and so need + // those. + FAM.registerPass([&] { return AAManager(); }); + FAM.registerPass([&] { return AssumptionAnalysis(); }); + FAM.registerPass([&] { return ScalarEvolutionAnalysis(); }); + FAM.registerPass([&] { return TargetLibraryAnalysis(); }); + FAM.registerPass([&] { return TargetIRAnalysis(); }); -TEST_F(LoopPassManagerTest, Basic) { - LoopAnalysisManager LAM(true); - int LoopAnalysisRuns = 0; - LAM.registerPass([&] { return TestLoopAnalysis(LoopAnalysisRuns); }); - - FunctionAnalysisManager FAM(true); - // We need DominatorTreeAnalysis for LoopAnalysis. - FAM.registerPass([&] { return DominatorTreeAnalysis(); }); - FAM.registerPass([&] { return LoopAnalysis(); }); - // We also allow loop passes to assume a set of other analyses and so need - // those. - FAM.registerPass([&] { return AAManager(); }); - FAM.registerPass([&] { return TargetLibraryAnalysis(); }); - FAM.registerPass([&] { return ScalarEvolutionAnalysis(); }); - FAM.registerPass([&] { return AssumptionAnalysis(); }); - FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); }); - LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); }); - - ModuleAnalysisManager MAM(true); - MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); }); - FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); }); + // Cross register proxies. + LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); }); + FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); }); + FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); }); + MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); }); + } +}; +TEST_F(LoopPassManagerTest, Basic) { ModulePassManager MPM(true); - FunctionPassManager FPM(true); + ::testing::InSequence MakeExpectationsSequenced; - // Visit all of the loops. - std::vector VisitedLoops1; - int AnalyzedBlockCount1 = 0; + // First we just visit all the loops in all the functions and get the + // analysis result for it. This will run the analysis a total of four times, + // once for each loop. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)).Times(1); { - LoopPassManager LPM; - LPM.addPass(TestLoopPass(VisitedLoops1, AnalyzedBlockCount1)); - + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + FunctionPassManager FPM(true); FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); } - // Only use cached analyses. - std::vector VisitedLoops2; - int AnalyzedBlockCount2 = 0; + // Next we run two passes over the loops. The first one invalidates the + // analyses for one loop, the second ones try to get the analysis results. + // This should force only one analysis to re-run within the loop PM, but will + // also invalidate everything after the loop pass manager finishes. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(DoDefault()) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(InvokeWithoutArgs([] { return PreservedAnalyses::none(); })) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(DoDefault()) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) + .WillOnce(DoDefault()) + .WillOnce(Invoke(getLoopAnalysisResult)); { - LoopPassManager LPM; - LPM.addPass(TestLoopInvalidatingPass("loop.g.0")); - LPM.addPass(TestLoopPass(VisitedLoops2, AnalyzedBlockCount2, - /*OnlyUseCachedResults=*/true)); - + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FunctionPassManager FPM(true); FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); } + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, FunctionPassInvalidationOfLoopAnalyses) { + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + // We process each function completely in sequence. + ::testing::Sequence FSequence, GSequence; + + // First, force the analysis result to be computed for each loop. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)) + .InSequence(GSequence) + .WillOnce(DoDefault()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // No need to re-run if we require again from a fresh loop pass manager. + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // For 'f', preserve most things but not the specific loop analyses. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .InSequence(FSequence) + .WillOnce(Return(getLoopPassPreservedAnalyses())); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // On one loop, skip the invalidation (as though we did an internal update). + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)) + .InSequence(FSequence) + .WillOnce(Return(false)); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // Now two loops still have to be recomputed. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // Preserve things in the second function to ensure invalidation remains + // isolated to one function. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)) + .InSequence(GSequence) + .WillOnce(DoDefault()); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // For 'g', fail to preserve anything causing the loops themselves to be + // cleared. We don't get an invalidation event here as the loop is gone, but + // we should still have to recompute the analysis. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)) + .InSequence(GSequence) + .WillOnce(Return(PreservedAnalyses::none())); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)) + .InSequence(GSequence) + .WillOnce(DoDefault()); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + + // Verify with a separate function pass run that we didn't mess upp 'f's + // cache. No analysis runs should be necessary here. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, ModulePassInvalidationOfLoopAnalyses) { + ModulePassManager MPM(true); + ::testing::InSequence MakeExpectationsSequenced; + + // First, force the analysis result to be computed for each loop. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)).Times(1); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Walking all the way out and all the way back in doesn't re-run the + // analysis. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // But a module pass that doesn't preserve the loop analyses themselves + // invalidates all the way down and forces recomputing. + EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + PA.preserve(); + return PA; + })); + // All the loop analyses from both functions get invalidated before we + // recompute anything. + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)).Times(1); + // On one loop, again skip the invalidation (as though we did an internal + // update). + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)) + .WillOnce(Return(false)); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.g.0"), _, _)).Times(1); + // Now all but one of the loops gets re-analyzed. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)).Times(1); + MPM.addPass(MMPHandle.getPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Verify that the cached values persist. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Now we fail to preserve the loop analysis and observe that the loop + // analyses are cleared (so no invalidation event) as the loops themselves + // are no longer valid. + EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)).Times(1); + MPM.addPass(MMPHandle.getPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Verify that the cached values persist. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Next, check that even if we preserve everything within the function itelf, + // if the function's module pass proxy isn't preserved and the potential set + // of functions changes, the clear reaches the loop analyses as well. This + // will again trigger re-runs but not invalidation events. + EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserveSet>(); + PA.preserveSet>(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)).Times(1); + MPM.addPass(MMPHandle.getPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + MPM.run(*M, MAM); +} + +// Test that if any of the bundled analyses provided in the LPM's signature +// become invalid, the analysis proxy itself becomes invalid and we clear all +// loop analysis results. +TEST_F(LoopPassManagerTest, InvalidationOfBundledAnalyses) { + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + ::testing::InSequence MakeExpectationsSequenced; + + // First, force the analysis result to be computed for each loop. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // No need to re-run if we require again from a fresh loop pass manager. + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // Preserving everything but the loop analyses themselves results in + // invalidation and running. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .WillOnce(Return(getLoopPassPreservedAnalyses())); + EXPECT_CALL(MLAHandle, invalidate(_, _, _)).Times(3); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // The rest don't invalidate, only trigger re-runs because we clear the cache + // completely. + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + // Not preserving `AAManager`. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + // Not preserving `AssumptionAnalysis`. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + // Not preserving `DominatorTreeAnalysis`. + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // Not preserving the `LoopAnalysis`. + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // Not preserving the `LoopAnalysisManagerFunctionProxy`. + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // Not preserving `ScalarEvolutionAnalysis`. + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // After all the churn on 'f', we'll compute the loop analysis results for + // 'g' once with a requires pass and then run our mock pass over g a bunch + // but just get cached results each time. + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)).Times(1); + EXPECT_CALL(MFPHandle, run(HasName("g"), _)).Times(7); + + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, IndirectInvalidation) { + // We need two distinct analysis types and handles. + enum { A, B }; + MockLoopAnalysisHandleTemplate MLAHandleA; + MockLoopAnalysisHandleTemplate MLAHandleB; + LAM.registerPass([&] { return MLAHandleA.getAnalysis(); }); + LAM.registerPass([&] { return MLAHandleB.getAnalysis(); }); + typedef decltype(MLAHandleA)::Analysis AnalysisA; + typedef decltype(MLAHandleB)::Analysis AnalysisB; + + // Setup our 'A' analysis to depend on our 'B' analysis. For testing purposes + // we just need to trigger getting the 'B' analysis results in 'A's run + // method and check if 'B' gets invalidate in 'A's invalidate method. + ON_CALL(MLAHandleA, run(_, _, _)) + .WillByDefault( + Invoke([&](Loop &L, LoopAnalysisManager &AM, LPMAnalysisResults &AR) { + (void)AM.getResult(L, AR); + return MLAHandleA.getResult(); + })); + ON_CALL(MLAHandleA, invalidate(_, _, _)) + .WillByDefault(Invoke([](Loop &L, const PreservedAnalyses &PA, + LoopAnalysisManager::Invalidator &Inv) { + auto PAC = PA.getChecker(); + return !(PAC.preserved() || PAC.preservedSet>()) || + Inv.invalidate(L, PA); + })); + + ::testing::InSequence MakeExpectationsSequenced; + + // Compute the analyses across all of 'f' first. + EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandleA, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandleA, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0"), _, _)).Times(1); + + // Now we invalidate 'B' (but not 'A') for one of the loops and preserve + // everything for the rest. This in turn triggers that one loop to recompute + // both 'B' *and* 'A' if indirect invalidation is working. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + // Specifically preserve 'A'so that it would survive if it didn't + // depend on 'B'. + PA.preserve(); + return PA; + })); + // It happens that 'B' is invalidated first. That shouldn't matter though and + // we should still call 'A"s invalidation. + EXPECT_CALL(MLAHandleB, invalidate(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandleA, invalidate(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke([](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _)).Times(1); + // The rest of the loops should run and get cached results. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + + // The run over 'g' should be boring with us just computing the analyses once + // up front and then running loop passes and getting cached results. + EXPECT_CALL(MLAHandleA, run(HasName("loop.g.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandleB, run(HasName("loop.g.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + + // Build the pipeline and run it. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + FPM.addPass( + createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, IndirectOuterPassInvalidation) { + typedef decltype(MLAHandle)::Analysis LoopAnalysis; + + MockFunctionAnalysisHandle MFAHandle; + FAM.registerPass([&] { return MFAHandle.getAnalysis(); }); + typedef decltype(MFAHandle)::Analysis FunctionAnalysis; + + // Setup the loop analysis to depend on both the function and module analysis + // by default. + ON_CALL(MLAHandle, run(_, _, _)) + .WillByDefault( + Invoke([&](Loop &L, LoopAnalysisManager &AM, LPMAnalysisResults &AR) { + auto &FAMP = AM.getResult(L, AR); + auto &FAM = FAMP.getManager(); + Function &F = *L.getHeader()->getParent(); + if (auto *FA = FAM.getCachedResult(F)) + FAMP.registerOuterAnalysisInvalidation(); + return MLAHandle.getResult(); + })); + + ::testing::InSequence MakeExpectationsSequenced; + + // Compute the analyses across all of 'f' first. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .WillOnce(Invoke([](Function &F, FunctionAnalysisManager &AM) { + // Force the computing of the function analysis so it is available in + // this function. + (void)AM.getResult(F); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + + // Now invalidate the function analysis but preserving the loop analyses + // which should trigger immediate invalidation of the loop analyses despite + // being preserved. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + PA.preserveSet>(); + return PA; + })); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)).Times(1); + + // And re-running a requires pass recomputes them. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + + // When we run over 'g' we don't populate the cache with the function + // analysis. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)) + .WillOnce(Return(PreservedAnalyses::all())); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)).Times(1); + + // Which means that no extra invalidation occurs and cached values are used. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + PA.preserveSet>(); + return PA; + })); + + // Build the pipeline and run it. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass( + createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass( + createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, LoopChildInsertion) { + // Super boring module with three loops in a single loop nest. + M = parseIR(Context, "define void @f() {\n" + "entry:\n" + " br label %loop.0\n" + "loop.0:\n" + " br i1 undef, label %loop.0.0, label %end\n" + "loop.0.0:\n" + " br i1 undef, label %loop.0.0, label %loop.0.1\n" + "loop.0.1:\n" + " br i1 undef, label %loop.0.1, label %loop.0.2\n" + "loop.0.2:\n" + " br i1 undef, label %loop.0.2, label %loop.0\n" + "end:\n" + " ret void\n" + "}\n"); + + // Build up variables referring into the IR so we can rewrite it below + // easily. + Function &F = *M->begin(); + ASSERT_THAT(F, HasName("f")); + auto BBI = F.begin(); + BasicBlock &EntryBB = *BBI++; + ASSERT_THAT(EntryBB, HasName("entry")); + BasicBlock &Loop0BB = *BBI++; + ASSERT_THAT(Loop0BB, HasName("loop.0")); + BasicBlock &Loop00BB = *BBI++; + ASSERT_THAT(Loop00BB, HasName("loop.0.0")); + BasicBlock &Loop01BB = *BBI++; + ASSERT_THAT(Loop01BB, HasName("loop.0.1")); + BasicBlock &Loop02BB = *BBI++; + ASSERT_THAT(Loop02BB, HasName("loop.0.2")); + BasicBlock &EndBB = *BBI++; + ASSERT_THAT(EndBB, HasName("end")); + ASSERT_THAT(BBI, F.end()); + + // Build the pass managers and register our pipeline. We build a single loop + // pass pipeline consisting of three mock pass runs over each loop. After + // this we run both domtree and loop verification passes to make sure that + // the IR remained valid during our mutations. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + FPM.addPass(DominatorTreeVerifierPass()); + FPM.addPass(LoopVerifierPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + + // All the visit orders are deterministic so we use simple fully order + // expectations. + ::testing::InSequence MakeExpectationsSequenced; + + // We run loop passes three times over each of the loops. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); - StringRef ExpectedLoops[] = {"loop.0.0", "loop.0.1", "loop.0", "loop.g.0"}; + // When running over the middle loop, the second run inserts two new child + // loops, inserting them and itself into the worklist. + BasicBlock *NewLoop010BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + auto *NewLoop = new Loop(); + L.addChildLoop(NewLoop); + NewLoop010BB = BasicBlock::Create(Context, "loop.0.1.0", &F, &Loop02BB); + BranchInst::Create(&Loop01BB, NewLoop010BB, + UndefValue::get(Type::getInt1Ty(Context)), + NewLoop010BB); + Loop01BB.getTerminator()->replaceUsesOfWith(&Loop01BB, NewLoop010BB); + AR.DT.addNewBlock(NewLoop010BB, &Loop01BB); + NewLoop->addBasicBlockToLoop(NewLoop010BB, AR.LI); + UR.addChildLoops({NewLoop}); + return PreservedAnalyses::all(); + })); - // Validate the counters and order of loops visited. - // loop.0 has 3 blocks whereas loop.0.0, loop.0.1, and loop.g.0 each have 1. - EXPECT_N_ELEMENTS_EQ(4, ExpectedLoops, VisitedLoops1); - EXPECT_EQ(6, AnalyzedBlockCount1); + // We should immediately drop down to fully visit the new inner loop. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); - EXPECT_N_ELEMENTS_EQ(4, ExpectedLoops, VisitedLoops2); - // The block from loop.g.0 won't be counted, since it wasn't cached. - EXPECT_EQ(5, AnalyzedBlockCount2); + // After visiting the inner loop, we should re-visit the second loop + // reflecting its new loop nest structure. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); - // The first LPM runs the loop analysis for all four loops, the second uses - // cached results for everything. - EXPECT_EQ(4, LoopAnalysisRuns); + // The second run over the middle loop after we've visited the new child, we + // add another child to check that we can repeatedly add children, and add + // children to a loop that already has children. + BasicBlock *NewLoop011BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + auto *NewLoop = new Loop(); + L.addChildLoop(NewLoop); + NewLoop011BB = BasicBlock::Create(Context, "loop.0.1.1", &F, &Loop02BB); + BranchInst::Create(&Loop01BB, NewLoop011BB, + UndefValue::get(Type::getInt1Ty(Context)), + NewLoop011BB); + NewLoop010BB->getTerminator()->replaceUsesOfWith(&Loop01BB, + NewLoop011BB); + AR.DT.addNewBlock(NewLoop011BB, NewLoop010BB); + NewLoop->addBasicBlockToLoop(NewLoop011BB, AR.LI); + UR.addChildLoops({NewLoop}); + return PreservedAnalyses::all(); + })); + + // Again, we should immediately drop down to visit the new, unvisited child + // loop. We don't need to revisit the other child though. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.1"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // And now we should pop back up to the second loop and do a full pipeline of + // three passes on its current form. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Now that all the expected actions are registered, run the pipeline over + // our module. All of our expectations are verified when the test finishes. + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, LoopPeerInsertion) { + // Super boring module with two loop nests and loop nest with two child + // loops. + M = parseIR(Context, "define void @f() {\n" + "entry:\n" + " br label %loop.0\n" + "loop.0:\n" + " br i1 undef, label %loop.0.0, label %loop.2\n" + "loop.0.0:\n" + " br i1 undef, label %loop.0.0, label %loop.0.2\n" + "loop.0.2:\n" + " br i1 undef, label %loop.0.2, label %loop.0\n" + "loop.2:\n" + " br i1 undef, label %loop.2, label %end\n" + "end:\n" + " ret void\n" + "}\n"); + + // Build up variables referring into the IR so we can rewrite it below + // easily. + Function &F = *M->begin(); + ASSERT_THAT(F, HasName("f")); + auto BBI = F.begin(); + BasicBlock &EntryBB = *BBI++; + ASSERT_THAT(EntryBB, HasName("entry")); + BasicBlock &Loop0BB = *BBI++; + ASSERT_THAT(Loop0BB, HasName("loop.0")); + BasicBlock &Loop00BB = *BBI++; + ASSERT_THAT(Loop00BB, HasName("loop.0.0")); + BasicBlock &Loop02BB = *BBI++; + ASSERT_THAT(Loop02BB, HasName("loop.0.2")); + BasicBlock &Loop2BB = *BBI++; + ASSERT_THAT(Loop2BB, HasName("loop.2")); + BasicBlock &EndBB = *BBI++; + ASSERT_THAT(EndBB, HasName("end")); + ASSERT_THAT(BBI, F.end()); + Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context)); + + // Build the pass managers and register our pipeline. We build a single loop + // pass pipeline consisting of three mock pass runs over each loop. After + // this we run both domtree and loop verification passes to make sure that + // the IR remained valid during our mutations. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + FPM.addPass(DominatorTreeVerifierPass()); + FPM.addPass(LoopVerifierPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + + // All the visit orders are deterministic so we use simple fully order + // expectations. + ::testing::InSequence MakeExpectationsSequenced; + + // We run loop passes three times over each of the loops. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + + // On the second run, we insert a sibling loop. + BasicBlock *NewLoop01BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + auto *NewLoop = new Loop(); + L.getParentLoop()->addChildLoop(NewLoop); + NewLoop01BB = BasicBlock::Create(Context, "loop.0.1", &F, &Loop02BB); + BranchInst::Create(&Loop02BB, NewLoop01BB, Undefi1, NewLoop01BB); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, NewLoop01BB); + auto *NewDTNode = AR.DT.addNewBlock(NewLoop01BB, &Loop00BB); + AR.DT.changeImmediateDominator(AR.DT[&Loop02BB], NewDTNode); + NewLoop->addBasicBlockToLoop(NewLoop01BB, AR.LI); + UR.addSiblingLoops({NewLoop}); + return PreservedAnalyses::all(); + })); + // We finish processing this loop as sibling loops don't perturb the + // postorder walk. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + + // We visit the inserted sibling next. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + // Next, on the third pass run on last inner loop we add more new siblings, + // more than one, and one with nested child loops. By doing this at the end + // we make sure that edge case works well. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + Loop *NewLoops[] = {new Loop(), new Loop(), new Loop()}; + L.getParentLoop()->addChildLoop(NewLoops[0]); + L.getParentLoop()->addChildLoop(NewLoops[1]); + NewLoops[1]->addChildLoop(NewLoops[2]); + auto *NewLoop03BB = + BasicBlock::Create(Context, "loop.0.3", &F, &Loop2BB); + auto *NewLoop04BB = + BasicBlock::Create(Context, "loop.0.4", &F, &Loop2BB); + auto *NewLoop040BB = + BasicBlock::Create(Context, "loop.0.4.0", &F, &Loop2BB); + Loop02BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB); + BranchInst::Create(NewLoop04BB, NewLoop03BB, Undefi1, NewLoop03BB); + BranchInst::Create(&Loop0BB, NewLoop040BB, Undefi1, NewLoop04BB); + BranchInst::Create(NewLoop04BB, NewLoop040BB, Undefi1, NewLoop040BB); + AR.DT.addNewBlock(NewLoop03BB, &Loop02BB); + AR.DT.addNewBlock(NewLoop04BB, NewLoop03BB); + AR.DT.addNewBlock(NewLoop040BB, NewLoop04BB); + NewLoops[0]->addBasicBlockToLoop(NewLoop03BB, AR.LI); + NewLoops[1]->addBasicBlockToLoop(NewLoop04BB, AR.LI); + NewLoops[2]->addBasicBlockToLoop(NewLoop040BB, AR.LI); + UR.addSiblingLoops({NewLoops[0], NewLoops[1]}); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Note that we need to visit the inner loop of this added sibling before the + // sibling itself! + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.4.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.4"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // And only now do we visit the outer loop of the nest. + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + // On the second pass, we add sibling loops to the outer-most iteration. + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + auto *NewLoop = new Loop(); + AR.LI.addTopLevelLoop(NewLoop); + auto *NewLoop1BB = BasicBlock::Create(Context, "loop.1", &F, &Loop2BB); + BranchInst::Create(&Loop2BB, NewLoop1BB, Undefi1, NewLoop1BB); + Loop0BB.getTerminator()->replaceUsesOfWith(&Loop2BB, NewLoop1BB); + auto *NewDTNode = AR.DT.addNewBlock(NewLoop1BB, &Loop0BB); + AR.DT.changeImmediateDominator(AR.DT[&Loop2BB], NewDTNode); + NewLoop->addBasicBlockToLoop(NewLoop1BB, AR.LI); + UR.addSiblingLoops({NewLoop}); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.1"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.2"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Now that all the expected actions are registered, run the pipeline over + // our module. All of our expectations are verified when the test finishes. + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, LoopDeletion) { + // Build a module with a single loop nest that contains one outer loop with + // three subloops, and one of those with its own subloop. We will + // incrementally delete all of these to test different deletion scenarios. + M = parseIR(Context, "define void @f() {\n" + "entry:\n" + " br label %loop.0\n" + "loop.0:\n" + " br i1 undef, label %loop.0.0, label %end\n" + "loop.0.0:\n" + " br i1 undef, label %loop.0.0, label %loop.0.1\n" + "loop.0.1:\n" + " br i1 undef, label %loop.0.1, label %loop.0.2\n" + "loop.0.2:\n" + " br i1 undef, label %loop.0.2.0, label %loop.0\n" + "loop.0.2.0:\n" + " br i1 undef, label %loop.0.2.0, label %loop.0.2\n" + "end:\n" + " ret void\n" + "}\n"); + + // Build up variables referring into the IR so we can rewrite it below + // easily. + Function &F = *M->begin(); + ASSERT_THAT(F, HasName("f")); + auto BBI = F.begin(); + BasicBlock &EntryBB = *BBI++; + ASSERT_THAT(EntryBB, HasName("entry")); + BasicBlock &Loop0BB = *BBI++; + ASSERT_THAT(Loop0BB, HasName("loop.0")); + BasicBlock &Loop00BB = *BBI++; + ASSERT_THAT(Loop00BB, HasName("loop.0.0")); + BasicBlock &Loop01BB = *BBI++; + ASSERT_THAT(Loop01BB, HasName("loop.0.1")); + BasicBlock &Loop02BB = *BBI++; + ASSERT_THAT(Loop02BB, HasName("loop.0.2")); + BasicBlock &Loop020BB = *BBI++; + ASSERT_THAT(Loop020BB, HasName("loop.0.2.0")); + BasicBlock &EndBB = *BBI++; + ASSERT_THAT(EndBB, HasName("end")); + ASSERT_THAT(BBI, F.end()); + Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context)); + + // Helper to do the actual deletion of a loop. We directly encode this here + // to isolate ourselves from the rest of LLVM and for simplicity. Here we can + // egregiously cheat based on knowledge of the test case. For example, we + // have no PHI nodes and there is always a single i-dom. + auto DeleteLoopBlocks = [](Loop &L, BasicBlock &IDomBB, + LPMAnalysisResults &AR) { + for (BasicBlock *LoopBB : L.blocks()) { + SmallVector ChildNodes(AR.DT[LoopBB]->begin(), + AR.DT[LoopBB]->end()); + for (DomTreeNode *ChildNode : ChildNodes) + AR.DT.changeImmediateDominator(ChildNode, AR.DT[&IDomBB]); + AR.DT.eraseNode(LoopBB); + LoopBB->dropAllReferences(); + } + for (BasicBlock *LoopBB : L.blocks()) + LoopBB->eraseFromParent(); + SmallVector LoopBBs(L.block_begin(), L.block_end()); + for (auto *LoopBB : LoopBBs) + AR.LI.removeBlock(LoopBB); + }; + + // Build up the pass managers. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + // We run several loop pass pipelines across the loop nest, but they all take + // the same form of three mock pass runs in a loop pipeline followed by + // domtree and loop verification. We use a lambda to stamp this out each + // time. + auto AddLoopPipelineAndVerificationPasses = [&] { + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + FPM.addPass(DominatorTreeVerifierPass()); + FPM.addPass(LoopVerifierPass()); + }; + + // All the visit orders are deterministic so we use simple fully order + // expectations. + ::testing::InSequence MakeExpectationsSequenced; + + // We run the loop pipeline with three passes over each of the loops. When + // running over the middle loop, the second pass in the pipeline deletes it. + // This should prevent the third pass from visiting it but otherwise leave + // the process unimpacted. + AddLoopPipelineAndVerificationPasses(); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + AR.SE.forgetLoop(&L); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop01BB, &Loop02BB); + DeleteLoopBlocks(L, Loop00BB, AR); + UR.markLoopAsDeleted(L); + AR.LI.markAsRemoved(&L); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Run the loop pipeline again. This time we delete the last loop which + // contains a nested loop within it, and we re-use that loop object to insert + // a new loop into the nest. This makes sure that we don't reuse cached + // analysis results for loop objects when removed just because their pointers + // match, and that we can handle nested loop deletion. + AddLoopPipelineAndVerificationPasses(); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + BasicBlock *NewLoop03BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + // Delete the inner loop first. + AR.SE.forgetLoop(*L.begin()); + Loop02BB.getTerminator()->replaceUsesOfWith(&Loop020BB, &Loop02BB); + DeleteLoopBlocks(**L.begin(), Loop02BB, AR); + // Remove and save the child loop object to re-use later. + UR.markLoopAsDeleted(**L.begin()); + auto *OldL = L.removeChildLoop(L.begin()); + + auto *ParentL = L.getParentLoop(); + AR.SE.forgetLoop(&L); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, &Loop0BB); + DeleteLoopBlocks(L, Loop00BB, AR); + UR.markLoopAsDeleted(L); + AR.LI.markAsRemoved(&L); + + // Now insert a new sibling loop re-using a loop pointer. + ParentL->addChildLoop(OldL); + NewLoop03BB = BasicBlock::Create(Context, "loop.0.3", &F, &EndBB); + BranchInst::Create(&Loop0BB, NewLoop03BB, Undefi1, NewLoop03BB); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB); + AR.DT.addNewBlock(NewLoop03BB, &Loop00BB); + OldL->addBasicBlockToLoop(NewLoop03BB, AR.LI); + UR.addSiblingLoops({OldL}); + return PreservedAnalyses::all(); + })); + + // We should visit the newly inserted sibling to the just deleted loop first + // here as it is still a postorder constraint prior to visiting the outer + // loop. It is important that this computes a fresh analysis result rather + // than using a cached result due to the same loop object acting as a key. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _)).Times(1); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // In the final loop pipeline run we delete every loop including the last + // loop of the nest. We do this again in the second pass in the pipeline and + // as a consequence we never make it to three runs on any loop. We also cover + // deleting multiple loops in a single pipeline, deleting the first loop and + // deleting the (last) top level loop. + AddLoopPipelineAndVerificationPasses(); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + AR.SE.forgetLoop(&L); + Loop0BB.getTerminator()->replaceUsesOfWith(&Loop00BB, NewLoop03BB); + DeleteLoopBlocks(L, Loop0BB, AR); + UR.markLoopAsDeleted(L); + AR.LI.markAsRemoved(&L); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + AR.SE.forgetLoop(&L); + Loop0BB.getTerminator()->replaceUsesOfWith(NewLoop03BB, &Loop0BB); + DeleteLoopBlocks(L, Loop0BB, AR); + UR.markLoopAsDeleted(L); + AR.LI.markAsRemoved(&L); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LPMAnalysisResults &AR, LPMUpdateResult &UR) { + AR.SE.forgetLoop(&L); + EntryBB.getTerminator()->replaceUsesOfWith(&Loop0BB, &EndBB); + DeleteLoopBlocks(L, EntryBB, AR); + UR.markLoopAsDeleted(L); + AR.LI.markAsRemoved(&L); + return PreservedAnalyses::all(); + })); + + // Add the function pass pipeline now that it is fully built up and run it + // over the module's one function. + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + MPM.run(*M, MAM); } }