Index: llvm/trunk/include/llvm/Analysis/IVUsers.h =================================================================== --- llvm/trunk/include/llvm/Analysis/IVUsers.h +++ llvm/trunk/include/llvm/Analysis/IVUsers.h @@ -15,8 +15,8 @@ #ifndef LLVM_ANALYSIS_IVUSERS_H #define LLVM_ANALYSIS_IVUSERS_H +#include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolutionNormalization.h" #include "llvm/IR/ValueHandle.h" @@ -197,15 +197,6 @@ LoopStandardAnalysisResults &AR); }; -/// Printer pass for the \c IVUsers for a loop. -class IVUsersPrinterPass : public PassInfoMixin { - raw_ostream &OS; - -public: - explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {} - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &U); -}; } #endif Index: llvm/trunk/include/llvm/Analysis/LoopAccessAnalysis.h =================================================================== --- llvm/trunk/include/llvm/Analysis/LoopAccessAnalysis.h +++ llvm/trunk/include/llvm/Analysis/LoopAccessAnalysis.h @@ -20,7 +20,7 @@ #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasSetTracker.h" -#include "llvm/Analysis/LoopPassManager.h" +#include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/ValueHandle.h" @@ -757,17 +757,6 @@ Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR); }; -/// \brief Printer pass for the \c LoopAccessInfo results. -class LoopAccessInfoPrinterPass - : public PassInfoMixin { - raw_ostream &OS; - -public: - explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {} - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &U); -}; - inline Instruction *MemoryDepChecker::Dependence::getSource( const LoopAccessInfo &LAI) const { return LAI.getDepChecker().getMemoryInstructions()[Source]; Index: llvm/trunk/include/llvm/Analysis/LoopAnalysisManager.h =================================================================== --- llvm/trunk/include/llvm/Analysis/LoopAnalysisManager.h +++ llvm/trunk/include/llvm/Analysis/LoopAnalysisManager.h @@ -0,0 +1,155 @@ +//===- LoopAnalysisManager.h - Loop analysis management ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// +/// This header provides classes for managing per-loop analyses. These are +/// typically used as part of a loop pass pipeline over the loop nests of +/// a function. +/// +/// Loop analyses are allowed to make some simplifying assumptions: +/// 1) Loops are, where possible, in simplified form. +/// 2) Loops are *always* in LCSSA form. +/// 3) A collection of analysis results are available: +/// - LoopInfo +/// - DominatorTree +/// - ScalarEvolution +/// - AAManager +/// +/// The primary mechanism to provide these invariants is the loop pass manager, +/// but they can also be manually provided in order to reason about a loop from +/// outside of a dedicated pass manager. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_LOOPANALYSISMANAGER_H +#define LLVM_ANALYSIS_LOOPANALYSISMANAGER_H + +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/PriorityWorklist.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/Analysis/GlobalsModRef.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { + +/// The adaptor from a function pass to a loop pass computes these analyses and +/// makes them available to the loop passes "for free". Each loop pass is +/// expected expected to update these analyses if necessary to ensure they're +/// valid after it runs. +struct LoopStandardAnalysisResults { + AAResults &AA; + AssumptionCache &AC; + DominatorTree &DT; + LoopInfo &LI; + ScalarEvolution &SE; + TargetLibraryInfo &TLI; + TargetTransformInfo &TTI; +}; + +/// Extern template declaration for the analysis set for this IR unit. +extern template class AllAnalysesOn; + +extern template class AnalysisManager; +/// \brief The loop analysis manager. +/// +/// See the documentation for the AnalysisManager template for detail +/// documentation. This typedef serves as a convenient way to refer to this +/// construct in the adaptors and proxies used to integrate this into the larger +/// pass manager infrastructure. +typedef AnalysisManager + LoopAnalysisManager; + +/// A proxy from a \c LoopAnalysisManager to a \c Function. +typedef InnerAnalysisManagerProxy + LoopAnalysisManagerFunctionProxy; + +/// A specialized result for the \c LoopAnalysisManagerFunctionProxy which +/// retains a \c LoopInfo reference. +/// +/// This allows it to collect loop objects for which analysis results may be +/// cached in the \c LoopAnalysisManager. +template <> class LoopAnalysisManagerFunctionProxy::Result { +public: + explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI) + : InnerAM(&InnerAM), LI(&LI) {} + Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI) { + // We have to null out the analysis manager in the moved-from state + // because we are taking ownership of the responsibilty to clear the + // analysis state. + Arg.InnerAM = nullptr; + } + Result &operator=(Result &&RHS) { + InnerAM = RHS.InnerAM; + LI = RHS.LI; + // We have to null out the analysis manager in the moved-from state + // because we are taking ownership of the responsibilty to clear the + // analysis state. + RHS.InnerAM = nullptr; + return *this; + } + ~Result() { + // InnerAM is cleared in a moved from state where there is nothing to do. + if (!InnerAM) + return; + + // Clear out the analysis manager if we're being destroyed -- it means we + // didn't even see an invalidate call when we got invalidated. + InnerAM->clear(); + } + + /// Accessor for the analysis manager. + LoopAnalysisManager &getManager() { return *InnerAM; } + + /// Handler for invalidation of the proxy for a particular function. + /// + /// If the proxy, \c LoopInfo, and associated analyses are preserved, this + /// will merely forward the invalidation event to any cached loop analysis + /// results for loops within this function. + /// + /// If the necessary loop infrastructure is not preserved, this will forcibly + /// clear all of the cached analysis results that are keyed on the \c + /// LoopInfo for this function. + bool invalidate(Function &F, const PreservedAnalyses &PA, + FunctionAnalysisManager::Invalidator &Inv); + +private: + LoopAnalysisManager *InnerAM; + LoopInfo *LI; +}; + +/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy +/// so it can pass the \c LoopInfo to the result. +template <> +LoopAnalysisManagerFunctionProxy::Result +LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM); + +// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern +// template. +extern template class InnerAnalysisManagerProxy; + +extern template class OuterAnalysisManagerProxy; +/// A proxy from a \c FunctionAnalysisManager to a \c Loop. +typedef OuterAnalysisManagerProxy + FunctionAnalysisManagerLoopProxy; + +/// Returns the minimum set of Analyses that all loop passes must preserve. +PreservedAnalyses getLoopPassPreservedAnalyses(); +} + +#endif // LLVM_ANALYSIS_LOOPANALYSISMANAGER_H Index: llvm/trunk/include/llvm/Analysis/LoopPassManager.h =================================================================== --- llvm/trunk/include/llvm/Analysis/LoopPassManager.h +++ llvm/trunk/include/llvm/Analysis/LoopPassManager.h @@ -1,471 +0,0 @@ -//===- LoopPassManager.h - Loop pass management -----------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -/// \file -/// -/// This header provides classes for managing a pipeline of passes over loops -/// in LLVM IR. -/// -/// The primary loop pass pipeline is managed in a very particular way to -/// provide a set of core guarantees: -/// 1) Loops are, where possible, in simplified form. -/// 2) Loops are *always* in LCSSA form. -/// 3) A collection of Loop-specific analysis results are available: -/// - LoopInfo -/// - DominatorTree -/// - ScalarEvolution -/// - AAManager -/// 4) All loop passes preserve #1 (where possible), #2, and #3. -/// 5) Loop passes run over each loop in the loop nest from the innermost to -/// the outermost. Specifically, all inner loops are processed before -/// passes run over outer loops. When running the pipeline across an inner -/// loop creates new inner loops, those are added and processed in this -/// order as well. -/// -/// This process is designed to facilitate transformations which simplify, -/// reduce, and remove loops. For passes which are more oriented towards -/// optimizing loops, especially optimizing loop *nests* instead of single -/// loops in isolation, this framework is less interesting. -/// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_ANALYSIS_LOOPPASSMANAGER_H -#define LLVM_ANALYSIS_LOOPPASSMANAGER_H - -#include "llvm/ADT/PostOrderIterator.h" -#include "llvm/ADT/PriorityWorklist.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/BasicAliasAnalysis.h" -#include "llvm/Analysis/GlobalsModRef.h" -#include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/ScalarEvolution.h" -#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" -#include "llvm/Analysis/TargetLibraryInfo.h" -#include "llvm/Analysis/TargetTransformInfo.h" -#include "llvm/IR/Dominators.h" -#include "llvm/IR/PassManager.h" - -namespace llvm { - -// Forward declarations of a update tracking and analysis result tracking -// structures used in the API of loop passes that work within this -// infrastructure. -class LPMUpdater; -struct LoopStandardAnalysisResults; - -/// Extern template declaration for the analysis set for this IR unit. -extern template class AllAnalysesOn; - -extern template class AnalysisManager; -/// \brief The loop analysis manager. -/// -/// See the documentation for the AnalysisManager template for detail -/// documentation. This typedef serves as a convenient way to refer to this -/// construct in the adaptors and proxies used to integrate this into the larger -/// pass manager infrastructure. -typedef AnalysisManager - LoopAnalysisManager; - -// Explicit specialization and instantiation declarations for the pass manager. -// See the comments on the definition of the specialization for details on how -// it differs from the primary template. -template <> -PreservedAnalyses -PassManager::run(Loop &InitialL, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AnalysisResults, - LPMUpdater &U); -extern template class PassManager; - -/// \brief The Loop pass manager. -/// -/// See the documentation for the PassManager template for details. It runs -/// a sequence of Loop passes over each Loop that the manager is run over. This -/// typedef serves as a convenient way to refer to this construct. -typedef PassManager - LoopPassManager; - -/// A partial specialization of the require analysis template pass to forward -/// the extra parameters from a transformation's run method to the -/// AnalysisManager's getResult. -template -struct RequireAnalysisPass - : PassInfoMixin< - RequireAnalysisPass> { - PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &) { - (void)AM.template getResult(L, AR); - return PreservedAnalyses::all(); - } -}; - -/// An alias template to easily name a require analysis loop pass. -template -using RequireAnalysisLoopPass = - RequireAnalysisPass; - -/// A proxy from a \c LoopAnalysisManager to a \c Function. -typedef InnerAnalysisManagerProxy - LoopAnalysisManagerFunctionProxy; - -/// A specialized result for the \c LoopAnalysisManagerFunctionProxy which -/// retains a \c LoopInfo reference. -/// -/// This allows it to collect loop objects for which analysis results may be -/// cached in the \c LoopAnalysisManager. -template <> class LoopAnalysisManagerFunctionProxy::Result { -public: - explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI) - : InnerAM(&InnerAM), LI(&LI) {} - Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI) { - // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the - // analysis state. - Arg.InnerAM = nullptr; - } - Result &operator=(Result &&RHS) { - InnerAM = RHS.InnerAM; - LI = RHS.LI; - // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the - // analysis state. - RHS.InnerAM = nullptr; - return *this; - } - ~Result() { - // InnerAM is cleared in a moved from state where there is nothing to do. - if (!InnerAM) - return; - - // Clear out the analysis manager if we're being destroyed -- it means we - // didn't even see an invalidate call when we got invalidated. - InnerAM->clear(); - } - - /// Accessor for the analysis manager. - LoopAnalysisManager &getManager() { return *InnerAM; } - - /// Handler for invalidation of the proxy for a particular function. - /// - /// If the proxy, \c LoopInfo, and associated analyses are preserved, this - /// will merely forward the invalidation event to any cached loop analysis - /// results for loops within this function. - /// - /// If the necessary loop infrastructure is not preserved, this will forcibly - /// clear all of the cached analysis results that are keyed on the \c - /// LoopInfo for this function. - bool invalidate(Function &F, const PreservedAnalyses &PA, - FunctionAnalysisManager::Invalidator &Inv); - -private: - LoopAnalysisManager *InnerAM; - LoopInfo *LI; -}; - -/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy -/// so it can pass the \c LoopInfo to the result. -template <> -LoopAnalysisManagerFunctionProxy::Result -LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM); - -// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern -// template. -extern template class InnerAnalysisManagerProxy; - -extern template class OuterAnalysisManagerProxy; -/// A proxy from a \c FunctionAnalysisManager to a \c Loop. -typedef OuterAnalysisManagerProxy - FunctionAnalysisManagerLoopProxy; - -/// Returns the minimum set of Analyses that all loop passes must preserve. -PreservedAnalyses getLoopPassPreservedAnalyses(); - -namespace internal { -/// Helper to implement appending of loops onto a worklist. -/// -/// We want to process loops in postorder, but the worklist is a LIFO data -/// structure, so we append to it in *reverse* postorder. -/// -/// For trees, a preorder traversal is a viable reverse postorder, so we -/// actually append using a preorder walk algorithm. -template -inline void appendLoopsToWorklist(RangeT &&Loops, - SmallPriorityWorklist &Worklist) { - // We use an internal worklist to build up the preorder traversal without - // recursion. - SmallVector PreOrderLoops, PreOrderWorklist; - - // We walk the initial sequence of loops in reverse because we generally want - // to visit defs before uses and the worklist is LIFO. - for (Loop *RootL : reverse(Loops)) { - assert(PreOrderLoops.empty() && "Must start with an empty preorder walk."); - assert(PreOrderWorklist.empty() && - "Must start with an empty preorder walk worklist."); - PreOrderWorklist.push_back(RootL); - do { - Loop *L = PreOrderWorklist.pop_back_val(); - PreOrderWorklist.append(L->begin(), L->end()); - PreOrderLoops.push_back(L); - } while (!PreOrderWorklist.empty()); - - Worklist.insert(std::move(PreOrderLoops)); - PreOrderLoops.clear(); - } -} -} - -/// The adaptor from a function pass to a loop pass directly computes -/// a standard set of analyses that are especially useful to loop passes and -/// makes them available in the API. Loop passes are also expected to update -/// all of these so that they remain correct across the entire loop pipeline. -struct LoopStandardAnalysisResults { - AAResults &AA; - AssumptionCache &AC; - DominatorTree &DT; - LoopInfo &LI; - ScalarEvolution &SE; - TargetLibraryInfo &TLI; - TargetTransformInfo &TTI; -}; - -template class FunctionToLoopPassAdaptor; - -/// This class provides an interface for updating the loop pass manager based -/// on mutations to the loop nest. -/// -/// A reference to an instance of this class is passed as an argument to each -/// Loop pass, and Loop passes should use it to update LPM infrastructure if -/// they modify the loop nest structure. -class LPMUpdater { -public: - /// This can be queried by loop passes which run other loop passes (like pass - /// managers) to know whether the loop needs to be skipped due to updates to - /// the loop nest. - /// - /// If this returns true, the loop object may have been deleted, so passes - /// should take care not to touch the object. - bool skipCurrentLoop() const { return SkipCurrentLoop; } - - /// Loop passes should use this method to indicate they have deleted a loop - /// from the nest. - /// - /// Note that this loop must either be the current loop or a subloop of the - /// current loop. This routine must be called prior to removing the loop from - /// the loop nest. - /// - /// If this is called for the current loop, in addition to clearing any - /// state, this routine will mark that the current loop should be skipped by - /// the rest of the pass management infrastructure. - void markLoopAsDeleted(Loop &L) { - LAM.clear(L); - assert(CurrentL->contains(&L) && "Cannot delete a loop outside of the " - "subloop tree currently being processed."); - if (&L == CurrentL) - SkipCurrentLoop = true; - } - - /// Loop passes should use this method to indicate they have added new child - /// loops of the current loop. - /// - /// \p NewChildLoops must contain only the immediate children. Any nested - /// loops within them will be visited in postorder as usual for the loop pass - /// manager. - void addChildLoops(ArrayRef NewChildLoops) { - // Insert ourselves back into the worklist first, as this loop should be - // revisited after all the children have been processed. - Worklist.insert(CurrentL); - -#ifndef NDEBUG - for (Loop *NewL : NewChildLoops) - assert(NewL->getParentLoop() == CurrentL && "All of the new loops must " - "be immediate children of " - "the current loop!"); -#endif - - internal::appendLoopsToWorklist(NewChildLoops, Worklist); - - // Also skip further processing of the current loop--it will be revisited - // after all of its newly added children are accounted for. - SkipCurrentLoop = true; - } - - /// Loop passes should use this method to indicate they have added new - /// sibling loops to the current loop. - /// - /// \p NewSibLoops must only contain the immediate sibling loops. Any nested - /// loops within them will be visited in postorder as usual for the loop pass - /// manager. - void addSiblingLoops(ArrayRef NewSibLoops) { -#ifndef NDEBUG - for (Loop *NewL : NewSibLoops) - assert(NewL->getParentLoop() == ParentL && - "All of the new loops must be siblings of the current loop!"); -#endif - - internal::appendLoopsToWorklist(NewSibLoops, Worklist); - - // No need to skip the current loop or revisit it, as sibling loops - // shouldn't impact anything. - } - -private: - template friend class llvm::FunctionToLoopPassAdaptor; - - /// The \c FunctionToLoopPassAdaptor's worklist of loops to process. - SmallPriorityWorklist &Worklist; - - /// The analysis manager for use in the current loop nest. - LoopAnalysisManager &LAM; - - Loop *CurrentL; - bool SkipCurrentLoop; - -#ifndef NDEBUG - // In debug builds we also track the parent loop to implement asserts even in - // the face of loop deletion. - Loop *ParentL; -#endif - - LPMUpdater(SmallPriorityWorklist &Worklist, - LoopAnalysisManager &LAM) - : Worklist(Worklist), LAM(LAM) {} -}; - -/// \brief Adaptor that maps from a function to its loops. -/// -/// Designed to allow composition of a LoopPass(Manager) and a -/// FunctionPassManager. Note that if this pass is constructed with a \c -/// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy -/// analysis prior to running the loop passes over the function to enable a \c -/// LoopAnalysisManager to be used within this run safely. -template -class FunctionToLoopPassAdaptor - : public PassInfoMixin> { -public: - explicit FunctionToLoopPassAdaptor(LoopPassT Pass) - : Pass(std::move(Pass)) {} - - /// \brief Runs the loop passes across every loop in the function. - PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) { - // Setup the loop analysis manager from its proxy. - LoopAnalysisManager &LAM = - AM.getResult(F).getManager(); - // Get the loop structure for this function - LoopInfo &LI = AM.getResult(F); - - // If there are no loops, there is nothing to do here. - if (LI.empty()) - return PreservedAnalyses::all(); - - // Get the analysis results needed by loop passes. - LoopStandardAnalysisResults LAR = {AM.getResult(F), - AM.getResult(F), - AM.getResult(F), - AM.getResult(F), - AM.getResult(F), - AM.getResult(F), - AM.getResult(F)}; - - PreservedAnalyses PA = PreservedAnalyses::all(); - - // A postorder worklist of loops to process. - SmallPriorityWorklist Worklist; - - // Register the worklist and loop analysis manager so that loop passes can - // update them when they mutate the loop nest structure. - LPMUpdater Updater(Worklist, LAM); - - // Add the loop nests in the reverse order of LoopInfo. For some reason, - // they are stored in RPO w.r.t. the control flow graph in LoopInfo. For - // the purpose of unrolling, loop deletion, and LICM, we largely want to - // work forward across the CFG so that we visit defs before uses and can - // propagate simplifications from one loop nest into the next. - // FIXME: Consider changing the order in LoopInfo. - internal::appendLoopsToWorklist(reverse(LI), Worklist); - - do { - Loop *L = Worklist.pop_back_val(); - - // Reset the update structure for this loop. - Updater.CurrentL = L; - Updater.SkipCurrentLoop = false; -#ifndef NDEBUG - Updater.ParentL = L->getParentLoop(); -#endif - - PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater); - // FIXME: We should verify the set of analyses relevant to Loop passes - // are preserved. - - // If the loop hasn't been deleted, we need to handle invalidation here. - if (!Updater.skipCurrentLoop()) - // We know that the loop pass couldn't have invalidated any other - // loop's analyses (that's the contract of a loop pass), so directly - // handle the loop analysis manager's invalidation here. - LAM.invalidate(*L, PassPA); - - // Then intersect the preserved set so that invalidation of module - // analyses will eventually occur when the module pass completes. - PA.intersect(std::move(PassPA)); - } while (!Worklist.empty()); - - // By definition we preserve the proxy. We also preserve all analyses on - // Loops. This precludes *any* invalidation of loop analyses by the proxy, - // but that's OK because we've taken care to invalidate analyses in the - // loop analysis manager incrementally above. - PA.preserveSet>(); - PA.preserve(); - // We also preserve the set of standard analyses. - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - // FIXME: What we really want to do here is preserve an AA category, but - // that concept doesn't exist yet. - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - return PA; - } - -private: - LoopPassT Pass; -}; - -/// \brief A function to deduce a loop pass type and wrap it in the templated -/// adaptor. -template -FunctionToLoopPassAdaptor -createFunctionToLoopPassAdaptor(LoopPassT Pass) { - return FunctionToLoopPassAdaptor(std::move(Pass)); -} - -/// \brief Pass for printing a loop's contents as textual IR. -class PrintLoopPass : public PassInfoMixin { - raw_ostream &OS; - std::string Banner; - -public: - PrintLoopPass(); - PrintLoopPass(raw_ostream &OS, const std::string &Banner = ""); - - PreservedAnalyses run(Loop &L, LoopAnalysisManager &, - LoopStandardAnalysisResults &, LPMUpdater &); -}; -} - -#endif // LLVM_ANALYSIS_LOOPPASSMANAGER_H Index: llvm/trunk/include/llvm/Passes/PassBuilder.h =================================================================== --- llvm/trunk/include/llvm/Passes/PassBuilder.h +++ llvm/trunk/include/llvm/Passes/PassBuilder.h @@ -18,8 +18,8 @@ #include "llvm/ADT/Optional.h" #include "llvm/Analysis/CGSCCPassManager.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/IVUsersPrinter.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/IVUsersPrinter.h +++ llvm/trunk/include/llvm/Transforms/Scalar/IVUsersPrinter.h @@ -0,0 +1,30 @@ +//===- IVUsersPrinter.h - Induction Variable Users Printing -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H +#define LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H + +#include "llvm/Analysis/IVUsers.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" + +namespace llvm { + +/// Printer pass for the \c IVUsers for a loop. +class IVUsersPrinterPass : public PassInfoMixin { + raw_ostream &OS; + +public: + explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {} + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &U); +}; +} + +#endif Index: llvm/trunk/include/llvm/Transforms/Scalar/IndVarSimplify.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/IndVarSimplify.h +++ llvm/trunk/include/llvm/Transforms/Scalar/IndVarSimplify.h @@ -16,8 +16,8 @@ #define LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LICM.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LICM.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LICM.h @@ -34,8 +34,8 @@ #define LLVM_TRANSFORMS_SCALAR_LICM_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h @@ -0,0 +1,31 @@ +//===- llvm/Analysis/LoopAccessAnalysisPrinter.h ----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H +#define LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H + +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" + +namespace llvm { + +/// \brief Printer pass for the \c LoopAccessInfo results. +class LoopAccessInfoPrinterPass + : public PassInfoMixin { + raw_ostream &OS; + +public: + explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {} + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &U); +}; + +} // End llvm namespace + +#endif Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopDeletion.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopDeletion.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopDeletion.h @@ -15,9 +15,9 @@ #define LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopIdiomRecognize.h @@ -17,8 +17,8 @@ #define LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopInstSimplify.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopInstSimplify.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopInstSimplify.h @@ -15,8 +15,8 @@ #define LLVM_TRANSFORMS_SCALAR_LOOPINSTSIMPLIFY_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopPassManager.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopPassManager.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopPassManager.h @@ -0,0 +1,363 @@ +//===- LoopPassManager.h - Loop pass management -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// +/// This header provides classes for managing a pipeline of passes over loops +/// in LLVM IR. +/// +/// The primary loop pass pipeline is managed in a very particular way to +/// provide a set of core guarantees: +/// 1) Loops are, where possible, in simplified form. +/// 2) Loops are *always* in LCSSA form. +/// 3) A collection of Loop-specific analysis results are available: +/// - LoopInfo +/// - DominatorTree +/// - ScalarEvolution +/// - AAManager +/// 4) All loop passes preserve #1 (where possible), #2, and #3. +/// 5) Loop passes run over each loop in the loop nest from the innermost to +/// the outermost. Specifically, all inner loops are processed before +/// passes run over outer loops. When running the pipeline across an inner +/// loop creates new inner loops, those are added and processed in this +/// order as well. +/// +/// This process is designed to facilitate transformations which simplify, +/// reduce, and remove loops. For passes which are more oriented towards +/// optimizing loops, especially optimizing loop *nests* instead of single +/// loops in isolation, this framework is less interesting. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H +#define LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H + +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/PriorityWorklist.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/Analysis/GlobalsModRef.h" +#include "llvm/Analysis/LoopAnalysisManager.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { + +// Forward declarations of an update tracking API used in the pass manager. +class LPMUpdater; + +// Explicit specialization and instantiation declarations for the pass manager. +// See the comments on the definition of the specialization for details on how +// it differs from the primary template. +template <> +PreservedAnalyses +PassManager::run(Loop &InitialL, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AnalysisResults, + LPMUpdater &U); +extern template class PassManager; + +/// \brief The Loop pass manager. +/// +/// See the documentation for the PassManager template for details. It runs +/// a sequence of Loop passes over each Loop that the manager is run over. This +/// typedef serves as a convenient way to refer to this construct. +typedef PassManager + LoopPassManager; + +/// A partial specialization of the require analysis template pass to forward +/// the extra parameters from a transformation's run method to the +/// AnalysisManager's getResult. +template +struct RequireAnalysisPass + : PassInfoMixin< + RequireAnalysisPass> { + PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { + (void)AM.template getResult(L, AR); + return PreservedAnalyses::all(); + } +}; + +/// An alias template to easily name a require analysis loop pass. +template +using RequireAnalysisLoopPass = + RequireAnalysisPass; + +namespace internal { +/// Helper to implement appending of loops onto a worklist. +/// +/// We want to process loops in postorder, but the worklist is a LIFO data +/// structure, so we append to it in *reverse* postorder. +/// +/// For trees, a preorder traversal is a viable reverse postorder, so we +/// actually append using a preorder walk algorithm. +template +inline void appendLoopsToWorklist(RangeT &&Loops, + SmallPriorityWorklist &Worklist) { + // We use an internal worklist to build up the preorder traversal without + // recursion. + SmallVector PreOrderLoops, PreOrderWorklist; + + // We walk the initial sequence of loops in reverse because we generally want + // to visit defs before uses and the worklist is LIFO. + for (Loop *RootL : reverse(Loops)) { + assert(PreOrderLoops.empty() && "Must start with an empty preorder walk."); + assert(PreOrderWorklist.empty() && + "Must start with an empty preorder walk worklist."); + PreOrderWorklist.push_back(RootL); + do { + Loop *L = PreOrderWorklist.pop_back_val(); + PreOrderWorklist.append(L->begin(), L->end()); + PreOrderLoops.push_back(L); + } while (!PreOrderWorklist.empty()); + + Worklist.insert(std::move(PreOrderLoops)); + PreOrderLoops.clear(); + } +} +} + +template class FunctionToLoopPassAdaptor; + +/// This class provides an interface for updating the loop pass manager based +/// on mutations to the loop nest. +/// +/// A reference to an instance of this class is passed as an argument to each +/// Loop pass, and Loop passes should use it to update LPM infrastructure if +/// they modify the loop nest structure. +class LPMUpdater { +public: + /// This can be queried by loop passes which run other loop passes (like pass + /// managers) to know whether the loop needs to be skipped due to updates to + /// the loop nest. + /// + /// If this returns true, the loop object may have been deleted, so passes + /// should take care not to touch the object. + bool skipCurrentLoop() const { return SkipCurrentLoop; } + + /// Loop passes should use this method to indicate they have deleted a loop + /// from the nest. + /// + /// Note that this loop must either be the current loop or a subloop of the + /// current loop. This routine must be called prior to removing the loop from + /// the loop nest. + /// + /// If this is called for the current loop, in addition to clearing any + /// state, this routine will mark that the current loop should be skipped by + /// the rest of the pass management infrastructure. + void markLoopAsDeleted(Loop &L) { + LAM.clear(L); + assert(CurrentL->contains(&L) && "Cannot delete a loop outside of the " + "subloop tree currently being processed."); + if (&L == CurrentL) + SkipCurrentLoop = true; + } + + /// Loop passes should use this method to indicate they have added new child + /// loops of the current loop. + /// + /// \p NewChildLoops must contain only the immediate children. Any nested + /// loops within them will be visited in postorder as usual for the loop pass + /// manager. + void addChildLoops(ArrayRef NewChildLoops) { + // Insert ourselves back into the worklist first, as this loop should be + // revisited after all the children have been processed. + Worklist.insert(CurrentL); + +#ifndef NDEBUG + for (Loop *NewL : NewChildLoops) + assert(NewL->getParentLoop() == CurrentL && "All of the new loops must " + "be immediate children of " + "the current loop!"); +#endif + + internal::appendLoopsToWorklist(NewChildLoops, Worklist); + + // Also skip further processing of the current loop--it will be revisited + // after all of its newly added children are accounted for. + SkipCurrentLoop = true; + } + + /// Loop passes should use this method to indicate they have added new + /// sibling loops to the current loop. + /// + /// \p NewSibLoops must only contain the immediate sibling loops. Any nested + /// loops within them will be visited in postorder as usual for the loop pass + /// manager. + void addSiblingLoops(ArrayRef NewSibLoops) { +#ifndef NDEBUG + for (Loop *NewL : NewSibLoops) + assert(NewL->getParentLoop() == ParentL && + "All of the new loops must be siblings of the current loop!"); +#endif + + internal::appendLoopsToWorklist(NewSibLoops, Worklist); + + // No need to skip the current loop or revisit it, as sibling loops + // shouldn't impact anything. + } + +private: + template friend class llvm::FunctionToLoopPassAdaptor; + + /// The \c FunctionToLoopPassAdaptor's worklist of loops to process. + SmallPriorityWorklist &Worklist; + + /// The analysis manager for use in the current loop nest. + LoopAnalysisManager &LAM; + + Loop *CurrentL; + bool SkipCurrentLoop; + +#ifndef NDEBUG + // In debug builds we also track the parent loop to implement asserts even in + // the face of loop deletion. + Loop *ParentL; +#endif + + LPMUpdater(SmallPriorityWorklist &Worklist, + LoopAnalysisManager &LAM) + : Worklist(Worklist), LAM(LAM) {} +}; + +/// \brief Adaptor that maps from a function to its loops. +/// +/// Designed to allow composition of a LoopPass(Manager) and a +/// FunctionPassManager. Note that if this pass is constructed with a \c +/// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy +/// analysis prior to running the loop passes over the function to enable a \c +/// LoopAnalysisManager to be used within this run safely. +template +class FunctionToLoopPassAdaptor + : public PassInfoMixin> { +public: + explicit FunctionToLoopPassAdaptor(LoopPassT Pass) : Pass(std::move(Pass)) {} + + /// \brief Runs the loop passes across every loop in the function. + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) { + // Setup the loop analysis manager from its proxy. + LoopAnalysisManager &LAM = + AM.getResult(F).getManager(); + // Get the loop structure for this function + LoopInfo &LI = AM.getResult(F); + + // If there are no loops, there is nothing to do here. + if (LI.empty()) + return PreservedAnalyses::all(); + + // Get the analysis results needed by loop passes. + LoopStandardAnalysisResults LAR = {AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F), + AM.getResult(F)}; + + PreservedAnalyses PA = PreservedAnalyses::all(); + + // A postorder worklist of loops to process. + SmallPriorityWorklist Worklist; + + // Register the worklist and loop analysis manager so that loop passes can + // update them when they mutate the loop nest structure. + LPMUpdater Updater(Worklist, LAM); + + // Add the loop nests in the reverse order of LoopInfo. For some reason, + // they are stored in RPO w.r.t. the control flow graph in LoopInfo. For + // the purpose of unrolling, loop deletion, and LICM, we largely want to + // work forward across the CFG so that we visit defs before uses and can + // propagate simplifications from one loop nest into the next. + // FIXME: Consider changing the order in LoopInfo. + internal::appendLoopsToWorklist(reverse(LI), Worklist); + + do { + Loop *L = Worklist.pop_back_val(); + + // Reset the update structure for this loop. + Updater.CurrentL = L; + Updater.SkipCurrentLoop = false; +#ifndef NDEBUG + Updater.ParentL = L->getParentLoop(); +#endif + + PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater); + // FIXME: We should verify the set of analyses relevant to Loop passes + // are preserved. + + // If the loop hasn't been deleted, we need to handle invalidation here. + if (!Updater.skipCurrentLoop()) + // We know that the loop pass couldn't have invalidated any other + // loop's analyses (that's the contract of a loop pass), so directly + // handle the loop analysis manager's invalidation here. + LAM.invalidate(*L, PassPA); + + // Then intersect the preserved set so that invalidation of module + // analyses will eventually occur when the module pass completes. + PA.intersect(std::move(PassPA)); + } while (!Worklist.empty()); + + // By definition we preserve the proxy. We also preserve all analyses on + // Loops. This precludes *any* invalidation of loop analyses by the proxy, + // but that's OK because we've taken care to invalidate analyses in the + // loop analysis manager incrementally above. + PA.preserveSet>(); + PA.preserve(); + // We also preserve the set of standard analyses. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // FIXME: What we really want to do here is preserve an AA category, but + // that concept doesn't exist yet. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; + } + +private: + LoopPassT Pass; +}; + +/// \brief A function to deduce a loop pass type and wrap it in the templated +/// adaptor. +template +FunctionToLoopPassAdaptor +createFunctionToLoopPassAdaptor(LoopPassT Pass) { + return FunctionToLoopPassAdaptor(std::move(Pass)); +} + +/// \brief Pass for printing a loop's contents as textual IR. +class PrintLoopPass : public PassInfoMixin { + raw_ostream &OS; + std::string Banner; + +public: + PrintLoopPass(); + PrintLoopPass(raw_ostream &OS, const std::string &Banner = ""); + + PreservedAnalyses run(Loop &L, LoopAnalysisManager &, + LoopStandardAnalysisResults &, LPMUpdater &); +}; +} + +#endif // LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopRotation.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopRotation.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopRotation.h @@ -15,8 +15,8 @@ #define LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopSimplifyCFG.h @@ -18,8 +18,8 @@ #define LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopStrengthReduce.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopStrengthReduce.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopStrengthReduce.h @@ -23,8 +23,8 @@ #define LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Scalar/LoopUnrollPass.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Scalar/LoopUnrollPass.h +++ llvm/trunk/include/llvm/Transforms/Scalar/LoopUnrollPass.h @@ -11,8 +11,8 @@ #define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" namespace llvm { Index: llvm/trunk/include/llvm/Transforms/Vectorize/LoopVectorize.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Vectorize/LoopVectorize.h +++ llvm/trunk/include/llvm/Transforms/Vectorize/LoopVectorize.h @@ -57,12 +57,12 @@ #include "llvm/Analysis/DemandedBits.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/OptimizationDiagnosticInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Function.h" #include "llvm/IR/PassManager.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include namespace llvm { Index: llvm/trunk/lib/Analysis/CMakeLists.txt =================================================================== --- llvm/trunk/lib/Analysis/CMakeLists.txt +++ llvm/trunk/lib/Analysis/CMakeLists.txt @@ -44,10 +44,10 @@ Lint.cpp Loads.cpp LoopAccessAnalysis.cpp + LoopAnalysisManager.cpp LoopUnrollAnalyzer.cpp LoopInfo.cpp LoopPass.cpp - LoopPassManager.cpp MemDepPrinter.cpp MemDerefPrinter.cpp MemoryBuiltins.cpp Index: llvm/trunk/lib/Analysis/IVUsers.cpp =================================================================== --- llvm/trunk/lib/Analysis/IVUsers.cpp +++ llvm/trunk/lib/Analysis/IVUsers.cpp @@ -16,8 +16,8 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CodeMetrics.h" +#include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" @@ -41,13 +41,6 @@ return IVUsers(&L, &AR.AC, &AR.LI, &AR.DT, &AR.SE); } -PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, - LPMUpdater &U) { - AM.getResult(L, AR).print(OS); - return PreservedAnalyses::all(); -} - char IVUsersWrapperPass::ID = 0; INITIALIZE_PASS_BEGIN(IVUsersWrapperPass, "iv-users", "Induction Variable Users", false, true) Index: llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp +++ llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp @@ -12,22 +12,22 @@ // //===----------------------------------------------------------------------===// +#include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/EquivalenceClasses.h" -#include "llvm/ADT/iterator_range.h" #include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/iterator_range.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasSetTracker.h" -#include "llvm/Analysis/LoopAccessAnalysis.h" +#include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/MemoryLocation.h" #include "llvm/Analysis/OptimizationDiagnosticInfo.h" #include "llvm/Analysis/ScalarEvolution.h" @@ -44,10 +44,10 @@ #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" -#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/Type.h" @@ -2125,17 +2125,6 @@ return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI); } -PreservedAnalyses -LoopAccessInfoPrinterPass::run(Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &) { - Function &F = *L.getHeader()->getParent(); - auto &LAI = AM.getResult(L, AR); - OS << "Loop access info in function '" << F.getName() << "':\n"; - OS.indent(2) << L.getHeader()->getName() << ":\n"; - LAI.print(OS, 4); - return PreservedAnalyses::all(); -} - namespace llvm { Pass *createLAAPass() { Index: llvm/trunk/lib/Analysis/LoopAnalysisManager.cpp =================================================================== --- llvm/trunk/lib/Analysis/LoopAnalysisManager.cpp +++ llvm/trunk/lib/Analysis/LoopAnalysisManager.cpp @@ -0,0 +1,160 @@ +//===- LoopAnalysisManager.cpp - Loop analysis management -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/LoopAnalysisManager.h" +#include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/Analysis/GlobalsModRef.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" +#include "llvm/IR/Dominators.h" + +using namespace llvm; + +// Explicit template instantiations and specialization defininitions for core +// template typedefs. +namespace llvm { +template class AllAnalysesOn; +template class AnalysisManager; +template class InnerAnalysisManagerProxy; +template class OuterAnalysisManagerProxy; + +bool LoopAnalysisManagerFunctionProxy::Result::invalidate( + Function &F, const PreservedAnalyses &PA, + FunctionAnalysisManager::Invalidator &Inv) { + // First compute the sequence of IR units covered by this proxy. We will want + // to visit this in postorder, but because this is a tree structure we can do + // this by building a preorder sequence and walking it in reverse. + SmallVector PreOrderLoops, PreOrderWorklist; + // Note that we want to walk the roots in reverse order because we will end + // up reversing the preorder sequence. However, it happens that the loop nest + // roots are in reverse order within the LoopInfo object. So we just walk + // forward here. + // FIXME: If we change the order of LoopInfo we will want to add a reverse + // here. + for (Loop *RootL : *LI) { + assert(PreOrderWorklist.empty() && + "Must start with an empty preorder walk worklist."); + PreOrderWorklist.push_back(RootL); + do { + Loop *L = PreOrderWorklist.pop_back_val(); + PreOrderWorklist.append(L->begin(), L->end()); + PreOrderLoops.push_back(L); + } while (!PreOrderWorklist.empty()); + } + + // If this proxy or the loop info is going to be invalidated, we also need + // to clear all the keys coming from that analysis. We also completely blow + // away the loop analyses if any of the standard analyses provided by the + // loop pass manager go away so that loop analyses can freely use these + // without worrying about declaring dependencies on them etc. + // FIXME: It isn't clear if this is the right tradeoff. We could instead make + // loop analyses declare any dependencies on these and use the more general + // invalidation logic below to act on that. + auto PAC = PA.getChecker(); + if (!(PAC.preserved() || PAC.preservedSet>()) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA) || + Inv.invalidate(F, PA)) { + // Note that the LoopInfo may be stale at this point, however the loop + // objects themselves remain the only viable keys that could be in the + // analysis manager's cache. So we just walk the keys and forcibly clear + // those results. Note that the order doesn't matter here as this will just + // directly destroy the results without calling methods on them. + for (Loop *L : PreOrderLoops) + InnerAM->clear(*L); + + // We also need to null out the inner AM so that when the object gets + // destroyed as invalid we don't try to clear the inner AM again. At that + // point we won't be able to reliably walk the loops for this function and + // only clear results associated with those loops the way we do here. + // FIXME: Making InnerAM null at this point isn't very nice. Most analyses + // try to remain valid during invalidation. Maybe we should add an + // `IsClean` flag? + InnerAM = nullptr; + + // Now return true to indicate this *is* invalid and a fresh proxy result + // needs to be built. This is especially important given the null InnerAM. + return true; + } + + // Directly check if the relevant set is preserved so we can short circuit + // invalidating loops. + bool AreLoopAnalysesPreserved = + PA.allAnalysesInSetPreserved>(); + + // Since we have a valid LoopInfo we can actually leave the cached results in + // the analysis manager associated with the Loop keys, but we need to + // propagate any necessary invalidation logic into them. We'd like to + // invalidate things in roughly the same order as they were put into the + // cache and so we walk the preorder list in reverse to form a valid + // postorder. + for (Loop *L : reverse(PreOrderLoops)) { + Optional InnerPA; + + // Check to see whether the preserved set needs to be adjusted based on + // function-level analysis invalidation triggering deferred invalidation + // for this loop. + if (auto *OuterProxy = + InnerAM->getCachedResult(*L)) + for (const auto &OuterInvalidationPair : + OuterProxy->getOuterInvalidations()) { + AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first; + const auto &InnerAnalysisIDs = OuterInvalidationPair.second; + if (Inv.invalidate(OuterAnalysisID, F, PA)) { + if (!InnerPA) + InnerPA = PA; + for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs) + InnerPA->abandon(InnerAnalysisID); + } + } + + // Check if we needed a custom PA set. If so we'll need to run the inner + // invalidation. + if (InnerPA) { + InnerAM->invalidate(*L, *InnerPA); + continue; + } + + // Otherwise we only need to do invalidation if the original PA set didn't + // preserve all Loop analyses. + if (!AreLoopAnalysesPreserved) + InnerAM->invalidate(*L, PA); + } + + // Return false to indicate that this result is still a valid proxy. + return false; +} + +template <> +LoopAnalysisManagerFunctionProxy::Result +LoopAnalysisManagerFunctionProxy::run(Function &F, + FunctionAnalysisManager &AM) { + return Result(*InnerAM, AM.getResult(F)); +} +} + +PreservedAnalyses llvm::getLoopPassPreservedAnalyses() { + PreservedAnalyses PA; + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // TODO: What we really want to do here is preserve an AA category, but that + // concept doesn't exist yet. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; +} Index: llvm/trunk/lib/Analysis/LoopPass.cpp =================================================================== --- llvm/trunk/lib/Analysis/LoopPass.cpp +++ llvm/trunk/lib/Analysis/LoopPass.cpp @@ -14,7 +14,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" +#include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRPrintingPasses.h" #include "llvm/IR/LLVMContext.h" Index: llvm/trunk/lib/Analysis/LoopPassManager.cpp =================================================================== --- llvm/trunk/lib/Analysis/LoopPassManager.cpp +++ llvm/trunk/lib/Analysis/LoopPassManager.cpp @@ -1,227 +0,0 @@ -//===- LoopPassManager.cpp - Loop pass management -------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Analysis/LoopPassManager.h" -#include "llvm/Analysis/BasicAliasAnalysis.h" -#include "llvm/Analysis/GlobalsModRef.h" -#include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/ScalarEvolution.h" -#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" -#include "llvm/IR/Dominators.h" - -using namespace llvm; - -// Explicit template instantiations and specialization defininitions for core -// template typedefs. -namespace llvm { -template class AllAnalysesOn; -template class AnalysisManager; -template class PassManager; -template class InnerAnalysisManagerProxy; -template class OuterAnalysisManagerProxy; - -/// Explicitly specialize the pass manager's run method to handle loop nest -/// structure updates. -template <> -PreservedAnalyses -PassManager::run(Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &U) { - PreservedAnalyses PA = PreservedAnalyses::all(); - - if (DebugLogging) - dbgs() << "Starting Loop pass manager run.\n"; - - for (auto &Pass : Passes) { - if (DebugLogging) - dbgs() << "Running pass: " << Pass->name() << " on " << L; - - PreservedAnalyses PassPA = Pass->run(L, AM, AR, U); - - // If the loop was deleted, abort the run and return to the outer walk. - if (U.skipCurrentLoop()) { - PA.intersect(std::move(PassPA)); - break; - } - - // Update the analysis manager as each pass runs and potentially - // invalidates analyses. - AM.invalidate(L, PassPA); - - // Finally, we intersect the final preserved analyses to compute the - // aggregate preserved set for this pass manager. - PA.intersect(std::move(PassPA)); - - // FIXME: Historically, the pass managers all called the LLVM context's - // yield function here. We don't have a generic way to acquire the - // context and it isn't yet clear what the right pattern is for yielding - // in the new pass manager so it is currently omitted. - // ...getContext().yield(); - } - - // Invalidation for the current loop should be handled above, and other loop - // analysis results shouldn't be impacted by runs over this loop. Therefore, - // the remaining analysis results in the AnalysisManager are preserved. We - // mark this with a set so that we don't need to inspect each one - // individually. - // FIXME: This isn't correct! This loop and all nested loops' analyses should - // be preserved, but unrolling should invalidate the parent loop's analyses. - PA.preserveSet>(); - - if (DebugLogging) - dbgs() << "Finished Loop pass manager run.\n"; - - return PA; -} - -bool LoopAnalysisManagerFunctionProxy::Result::invalidate( - Function &F, const PreservedAnalyses &PA, - FunctionAnalysisManager::Invalidator &Inv) { - // First compute the sequence of IR units covered by this proxy. We will want - // to visit this in postorder, but because this is a tree structure we can do - // this by building a preorder sequence and walking it in reverse. - SmallVector PreOrderLoops, PreOrderWorklist; - // Note that we want to walk the roots in reverse order because we will end - // up reversing the preorder sequence. However, it happens that the loop nest - // roots are in reverse order within the LoopInfo object. So we just walk - // forward here. - // FIXME: If we change the order of LoopInfo we will want to add a reverse - // here. - for (Loop *RootL : *LI) { - assert(PreOrderWorklist.empty() && - "Must start with an empty preorder walk worklist."); - PreOrderWorklist.push_back(RootL); - do { - Loop *L = PreOrderWorklist.pop_back_val(); - PreOrderWorklist.append(L->begin(), L->end()); - PreOrderLoops.push_back(L); - } while (!PreOrderWorklist.empty()); - } - - // If this proxy or the loop info is going to be invalidated, we also need - // to clear all the keys coming from that analysis. We also completely blow - // away the loop analyses if any of the standard analyses provided by the - // loop pass manager go away so that loop analyses can freely use these - // without worrying about declaring dependencies on them etc. - // FIXME: It isn't clear if this is the right tradeoff. We could instead make - // loop analyses declare any dependencies on these and use the more general - // invalidation logic below to act on that. - auto PAC = PA.getChecker(); - if (!(PAC.preserved() || PAC.preservedSet>()) || - Inv.invalidate(F, PA) || - Inv.invalidate(F, PA) || - Inv.invalidate(F, PA) || - Inv.invalidate(F, PA) || - Inv.invalidate(F, PA)) { - // Note that the LoopInfo may be stale at this point, however the loop - // objects themselves remain the only viable keys that could be in the - // analysis manager's cache. So we just walk the keys and forcibly clear - // those results. Note that the order doesn't matter here as this will just - // directly destroy the results without calling methods on them. - for (Loop *L : PreOrderLoops) - InnerAM->clear(*L); - - // We also need to null out the inner AM so that when the object gets - // destroyed as invalid we don't try to clear the inner AM again. At that - // point we won't be able to reliably walk the loops for this function and - // only clear results associated with those loops the way we do here. - // FIXME: Making InnerAM null at this point isn't very nice. Most analyses - // try to remain valid during invalidation. Maybe we should add an - // `IsClean` flag? - InnerAM = nullptr; - - // Now return true to indicate this *is* invalid and a fresh proxy result - // needs to be built. This is especially important given the null InnerAM. - return true; - } - - // Directly check if the relevant set is preserved so we can short circuit - // invalidating loops. - bool AreLoopAnalysesPreserved = - PA.allAnalysesInSetPreserved>(); - - // Since we have a valid LoopInfo we can actually leave the cached results in - // the analysis manager associated with the Loop keys, but we need to - // propagate any necessary invalidation logic into them. We'd like to - // invalidate things in roughly the same order as they were put into the - // cache and so we walk the preorder list in reverse to form a valid - // postorder. - for (Loop *L : reverse(PreOrderLoops)) { - Optional InnerPA; - - // Check to see whether the preserved set needs to be adjusted based on - // function-level analysis invalidation triggering deferred invalidation - // for this loop. - if (auto *OuterProxy = - InnerAM->getCachedResult(*L)) - for (const auto &OuterInvalidationPair : - OuterProxy->getOuterInvalidations()) { - AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first; - const auto &InnerAnalysisIDs = OuterInvalidationPair.second; - if (Inv.invalidate(OuterAnalysisID, F, PA)) { - if (!InnerPA) - InnerPA = PA; - for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs) - InnerPA->abandon(InnerAnalysisID); - } - } - - // Check if we needed a custom PA set. If so we'll need to run the inner - // invalidation. - if (InnerPA) { - InnerAM->invalidate(*L, *InnerPA); - continue; - } - - // Otherwise we only need to do invalidation if the original PA set didn't - // preserve all Loop analyses. - if (!AreLoopAnalysesPreserved) - InnerAM->invalidate(*L, PA); - } - - // Return false to indicate that this result is still a valid proxy. - return false; -} - -template <> -LoopAnalysisManagerFunctionProxy::Result -LoopAnalysisManagerFunctionProxy::run(Function &F, - FunctionAnalysisManager &AM) { - return Result(*InnerAM, AM.getResult(F)); -} -} - -PreservedAnalyses llvm::getLoopPassPreservedAnalyses() { - PreservedAnalyses PA; - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - // TODO: What we really want to do here is preserve an AA category, but that - // concept doesn't exist yet. - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - return PA; -} - -PrintLoopPass::PrintLoopPass() : OS(dbgs()) {} -PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner) - : OS(OS), Banner(Banner) {} - -PreservedAnalyses PrintLoopPass::run(Loop &L, LoopAnalysisManager &, - LoopStandardAnalysisResults &, - LPMUpdater &) { - printLoop(L, OS, Banner); - return PreservedAnalyses::all(); -} Index: llvm/trunk/lib/LTO/LTOBackend.cpp =================================================================== --- llvm/trunk/lib/LTO/LTOBackend.cpp +++ llvm/trunk/lib/LTO/LTOBackend.cpp @@ -17,7 +17,6 @@ #include "llvm/LTO/LTOBackend.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CGSCCPassManager.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Bitcode/BitcodeReader.h" @@ -36,6 +35,7 @@ #include "llvm/Target/TargetMachine.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/PassManagerBuilder.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/FunctionImportUtils.h" #include "llvm/Transforms/Utils/SplitModule.h" Index: llvm/trunk/lib/Passes/PassBuilder.cpp =================================================================== --- llvm/trunk/lib/Passes/PassBuilder.cpp +++ llvm/trunk/lib/Passes/PassBuilder.cpp @@ -38,7 +38,6 @@ #include "llvm/Analysis/LazyValueInfo.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/ModuleSummaryAnalysis.h" #include "llvm/Analysis/OptimizationDiagnosticInfo.h" @@ -95,14 +94,17 @@ #include "llvm/Transforms/Scalar/Float2Int.h" #include "llvm/Transforms/Scalar/GVN.h" #include "llvm/Transforms/Scalar/GuardWidening.h" +#include "llvm/Transforms/Scalar/IVUsersPrinter.h" #include "llvm/Transforms/Scalar/IndVarSimplify.h" #include "llvm/Transforms/Scalar/JumpThreading.h" #include "llvm/Transforms/Scalar/LICM.h" +#include "llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h" #include "llvm/Transforms/Scalar/LoopDataPrefetch.h" #include "llvm/Transforms/Scalar/LoopDeletion.h" #include "llvm/Transforms/Scalar/LoopDistribute.h" #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h" #include "llvm/Transforms/Scalar/LoopInstSimplify.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Scalar/LoopRotation.h" #include "llvm/Transforms/Scalar/LoopSimplifyCFG.h" #include "llvm/Transforms/Scalar/LoopStrengthReduce.h" Index: llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt =================================================================== --- llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt +++ llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt @@ -13,10 +13,12 @@ GuardWidening.cpp GVN.cpp GVNHoist.cpp + IVUsersPrinter.cpp InductiveRangeCheckElimination.cpp IndVarSimplify.cpp JumpThreading.cpp LICM.cpp + LoopAccessAnalysisPrinter.cpp LoopSink.cpp LoadCombine.cpp LoopDeletion.cpp @@ -26,6 +28,7 @@ LoopInstSimplify.cpp LoopInterchange.cpp LoopLoadElimination.cpp + LoopPassManager.cpp LoopRerollPass.cpp LoopRotation.cpp LoopSimplifyCFG.cpp Index: llvm/trunk/lib/Transforms/Scalar/IVUsersPrinter.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/IVUsersPrinter.cpp +++ llvm/trunk/lib/Transforms/Scalar/IVUsersPrinter.cpp @@ -0,0 +1,22 @@ +//===- IVUsersPrinter.cpp - Induction Variable Users Printer ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Scalar/IVUsersPrinter.h" +#include "llvm/Analysis/IVUsers.h" +#include "llvm/Support/Debug.h" +using namespace llvm; + +#define DEBUG_TYPE "iv-users" + +PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, + LPMUpdater &U) { + AM.getResult(L, AR).print(OS); + return PreservedAnalyses::all(); +} Index: llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp +++ llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -25,15 +25,13 @@ //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/IndVarSimplify.h" -#include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" -#include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" +#include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/BasicBlock.h" @@ -49,6 +47,8 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" Index: llvm/trunk/lib/Transforms/Scalar/LICM.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LICM.cpp +++ llvm/trunk/lib/Transforms/Scalar/LICM.cpp @@ -41,7 +41,6 @@ #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/OptimizationDiagnosticInfo.h" #include "llvm/Analysis/ScalarEvolution.h" @@ -62,6 +61,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" Index: llvm/trunk/lib/Transforms/Scalar/LoopAccessAnalysisPrinter.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopAccessAnalysisPrinter.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopAccessAnalysisPrinter.cpp @@ -0,0 +1,25 @@ +//===- LoopAccessAnalysisPrinter.cpp - Loop Access Analysis Printer --------==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h" +#include "llvm/Analysis/LoopAccessAnalysis.h" +using namespace llvm; + +#define DEBUG_TYPE "loop-accesses" + +PreservedAnalyses +LoopAccessInfoPrinterPass::run(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { + Function &F = *L.getHeader()->getParent(); + auto &LAI = AM.getResult(L, AR); + OS << "Loop access info in function '" << F.getName() << "':\n"; + OS.indent(2) << L.getHeader()->getName() << ":\n"; + LAI.print(OS, 4); + return PreservedAnalyses::all(); +} Index: llvm/trunk/lib/Transforms/Scalar/LoopDeletion.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopDeletion.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopDeletion.cpp @@ -19,9 +19,9 @@ #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/IR/Dominators.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/LoopUtils.h" using namespace llvm; Index: llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopDistribute.cpp @@ -31,13 +31,13 @@ #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/OptimizationDiagnosticInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/LoopUtils.h" Index: llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -46,7 +46,6 @@ #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" @@ -61,6 +60,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/BuildLibCalls.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" Index: llvm/trunk/lib/Transforms/Scalar/LoopInstSimplify.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopInstSimplify.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopInstSimplify.cpp @@ -18,7 +18,6 @@ #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/DataLayout.h" @@ -26,6 +25,7 @@ #include "llvm/IR/Instructions.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" using namespace llvm; Index: llvm/trunk/lib/Transforms/Scalar/LoopPassManager.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopPassManager.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopPassManager.cpp @@ -0,0 +1,85 @@ +//===- LoopPassManager.cpp - Loop pass management -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Scalar/LoopPassManager.h" +#include "llvm/Analysis/LoopInfo.h" + +using namespace llvm; + +// Explicit template instantiations and specialization defininitions for core +// template typedefs. +namespace llvm { +template class PassManager; + +/// Explicitly specialize the pass manager's run method to handle loop nest +/// structure updates. +template <> +PreservedAnalyses +PassManager::run(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &U) { + PreservedAnalyses PA = PreservedAnalyses::all(); + + if (DebugLogging) + dbgs() << "Starting Loop pass manager run.\n"; + + for (auto &Pass : Passes) { + if (DebugLogging) + dbgs() << "Running pass: " << Pass->name() << " on " << L; + + PreservedAnalyses PassPA = Pass->run(L, AM, AR, U); + + // If the loop was deleted, abort the run and return to the outer walk. + if (U.skipCurrentLoop()) { + PA.intersect(std::move(PassPA)); + break; + } + + // Update the analysis manager as each pass runs and potentially + // invalidates analyses. + AM.invalidate(L, PassPA); + + // Finally, we intersect the final preserved analyses to compute the + // aggregate preserved set for this pass manager. + PA.intersect(std::move(PassPA)); + + // FIXME: Historically, the pass managers all called the LLVM context's + // yield function here. We don't have a generic way to acquire the + // context and it isn't yet clear what the right pattern is for yielding + // in the new pass manager so it is currently omitted. + // ...getContext().yield(); + } + + // Invalidation for the current loop should be handled above, and other loop + // analysis results shouldn't be impacted by runs over this loop. Therefore, + // the remaining analysis results in the AnalysisManager are preserved. We + // mark this with a set so that we don't need to inspect each one + // individually. + // FIXME: This isn't correct! This loop and all nested loops' analyses should + // be preserved, but unrolling should invalidate the parent loop's analyses. + PA.preserveSet>(); + + if (DebugLogging) + dbgs() << "Finished Loop pass manager run.\n"; + + return PA; +} +} + +PrintLoopPass::PrintLoopPass() : OS(dbgs()) {} +PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner) + : OS(OS), Banner(Banner) {} + +PreservedAnalyses PrintLoopPass::run(Loop &L, LoopAnalysisManager &, + LoopStandardAnalysisResults &, + LPMUpdater &) { + printLoop(L, OS, Banner); + return PreservedAnalyses::all(); +} Index: llvm/trunk/lib/Transforms/Scalar/LoopRotation.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopRotation.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopRotation.cpp @@ -14,13 +14,12 @@ #include "llvm/Transforms/Scalar/LoopRotation.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/CodeMetrics.h" -#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/GlobalsModRef.h" +#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" #include "llvm/Analysis/TargetTransformInfo.h" @@ -34,6 +33,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" Index: llvm/trunk/lib/Transforms/Scalar/LoopSimplifyCFG.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopSimplifyCFG.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopSimplifyCFG.cpp @@ -18,18 +18,18 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/DependenceAnalysis.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" using namespace llvm; Index: llvm/trunk/lib/Transforms/Scalar/LoopSink.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopSink.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopSink.cpp @@ -38,7 +38,6 @@ #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" #include "llvm/IR/Dominators.h" @@ -47,6 +46,7 @@ #include "llvm/IR/Metadata.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" using namespace llvm; Index: llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -59,16 +59,15 @@ #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/IVUsers.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" @@ -80,13 +79,13 @@ #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/GlobalValue.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" -#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Module.h" #include "llvm/IR/OperandTraits.h" #include "llvm/IR/Operator.h" -#include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" #include "llvm/IR/ValueHandle.h" @@ -99,6 +98,7 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include Index: llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -19,7 +19,6 @@ #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/LoopUnrollAnalyzer.h" #include "llvm/Analysis/OptimizationDiagnosticInfo.h" #include "llvm/Analysis/ScalarEvolution.h" @@ -33,6 +32,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/UnrollLoop.h" #include Index: llvm/trunk/tools/opt/NewPMDriver.cpp =================================================================== --- llvm/trunk/tools/opt/NewPMDriver.cpp +++ llvm/trunk/tools/opt/NewPMDriver.cpp @@ -17,7 +17,6 @@ #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CGSCCPassManager.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRPrintingPasses.h" @@ -30,6 +29,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ToolOutputFile.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" using namespace llvm; using namespace opt_tool; Index: llvm/trunk/unittests/Analysis/CMakeLists.txt =================================================================== --- llvm/trunk/unittests/Analysis/CMakeLists.txt +++ llvm/trunk/unittests/Analysis/CMakeLists.txt @@ -13,7 +13,6 @@ CFGTest.cpp CGSCCPassManagerTest.cpp LazyCallGraphTest.cpp - LoopPassManagerTest.cpp MemoryBuiltinsTest.cpp ScalarEvolutionTest.cpp TBAATest.cpp Index: llvm/trunk/unittests/Analysis/LoopPassManagerTest.cpp =================================================================== --- llvm/trunk/unittests/Analysis/LoopPassManagerTest.cpp +++ llvm/trunk/unittests/Analysis/LoopPassManagerTest.cpp @@ -1,1438 +0,0 @@ -//===- llvm/unittest/Analysis/LoopPassManagerTest.cpp - LPM tests ---------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/AssumptionCache.h" -#include "llvm/Analysis/LoopPassManager.h" -#include "llvm/Analysis/ScalarEvolution.h" -#include "llvm/Analysis/TargetLibraryInfo.h" -#include "llvm/Analysis/TargetTransformInfo.h" -#include "llvm/AsmParser/Parser.h" -#include "llvm/IR/Dominators.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/PassManager.h" -#include "llvm/Support/SourceMgr.h" -#include "gmock/gmock.h" -#include "gtest/gtest.h" - -using namespace llvm; - -namespace { - -using testing::DoDefault; -using testing::Return; -using testing::Expectation; -using testing::Invoke; -using testing::InvokeWithoutArgs; -using testing::_; - -template , - typename... ExtraArgTs> -class MockAnalysisHandleBase { -public: - class Analysis : public AnalysisInfoMixin { - friend AnalysisInfoMixin; - friend MockAnalysisHandleBase; - static AnalysisKey Key; - - DerivedT *Handle; - - Analysis(DerivedT &Handle) : Handle(&Handle) {} - - public: - class Result { - friend MockAnalysisHandleBase; - - DerivedT *Handle; - - Result(DerivedT &Handle) : Handle(&Handle) {} - - public: - // Forward invalidation events to the mock handle. - bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA, - typename AnalysisManagerT::Invalidator &Inv) { - return Handle->invalidate(IR, PA, Inv); - } - }; - - Result run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs) { - return Handle->run(IR, AM, ExtraArgs...); - } - }; - - Analysis getAnalysis() { return Analysis(static_cast(*this)); } - typename Analysis::Result getResult() { - return typename Analysis::Result(static_cast(*this)); - } - -protected: - // FIXME: MSVC seems unable to handle a lambda argument to Invoke from within - // the template, so we use a boring static function. - static bool invalidateCallback(IRUnitT &IR, const PreservedAnalyses &PA, - typename AnalysisManagerT::Invalidator &Inv) { - auto PAC = PA.template getChecker(); - return !PAC.preserved() && - !PAC.template preservedSet>(); - } - - /// Derived classes should call this in their constructor to set up default - /// mock actions. (We can't do this in our constructor because this has to - /// run after the DerivedT is constructed.) - void setDefaults() { - ON_CALL(static_cast(*this), - run(_, _, testing::Matcher(_)...)) - .WillByDefault(Return(this->getResult())); - ON_CALL(static_cast(*this), invalidate(_, _, _)) - .WillByDefault(Invoke(&invalidateCallback)); - } -}; - -template -AnalysisKey MockAnalysisHandleBase::Analysis::Key; - -/// Mock handle for loop analyses. -/// -/// This is provided as a template accepting an (optional) integer. Because -/// analyses are identified and queried by type, this allows constructing -/// multiple handles with distinctly typed nested 'Analysis' types that can be -/// registered and queried. If you want to register multiple loop analysis -/// passes, you'll need to instantiate this type with different values for I. -/// For example: -/// -/// MockLoopAnalysisHandleTemplate<0> h0; -/// MockLoopAnalysisHandleTemplate<1> h1; -/// typedef decltype(h0)::Analysis Analysis0; -/// typedef decltype(h1)::Analysis Analysis1; -template (-1)> -struct MockLoopAnalysisHandleTemplate - : MockAnalysisHandleBase, Loop, - LoopAnalysisManager, - LoopStandardAnalysisResults &> { - typedef typename MockLoopAnalysisHandleTemplate::Analysis Analysis; - - MOCK_METHOD3_T(run, typename Analysis::Result(Loop &, LoopAnalysisManager &, - LoopStandardAnalysisResults &)); - - MOCK_METHOD3_T(invalidate, bool(Loop &, const PreservedAnalyses &, - LoopAnalysisManager::Invalidator &)); - - MockLoopAnalysisHandleTemplate() { this->setDefaults(); } -}; - -typedef MockLoopAnalysisHandleTemplate<> MockLoopAnalysisHandle; - -struct MockFunctionAnalysisHandle - : MockAnalysisHandleBase { - MOCK_METHOD2(run, Analysis::Result(Function &, FunctionAnalysisManager &)); - - MOCK_METHOD3(invalidate, bool(Function &, const PreservedAnalyses &, - FunctionAnalysisManager::Invalidator &)); - - MockFunctionAnalysisHandle() { setDefaults(); } -}; - -template , - typename... ExtraArgTs> -class MockPassHandleBase { -public: - class Pass : public PassInfoMixin { - friend MockPassHandleBase; - - DerivedT *Handle; - - Pass(DerivedT &Handle) : Handle(&Handle) {} - - public: - PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, - ExtraArgTs... ExtraArgs) { - return Handle->run(IR, AM, ExtraArgs...); - } - }; - - Pass getPass() { return Pass(static_cast(*this)); } - -protected: - /// Derived classes should call this in their constructor to set up default - /// mock actions. (We can't do this in our constructor because this has to - /// run after the DerivedT is constructed.) - void setDefaults() { - ON_CALL(static_cast(*this), - run(_, _, testing::Matcher(_)...)) - .WillByDefault(Return(PreservedAnalyses::all())); - } -}; - -struct MockLoopPassHandle - : MockPassHandleBase { - MOCK_METHOD4(run, - PreservedAnalyses(Loop &, LoopAnalysisManager &, - LoopStandardAnalysisResults &, LPMUpdater &)); - MockLoopPassHandle() { setDefaults(); } -}; - -struct MockFunctionPassHandle - : MockPassHandleBase { - MOCK_METHOD2(run, PreservedAnalyses(Function &, FunctionAnalysisManager &)); - - MockFunctionPassHandle() { setDefaults(); } -}; - -struct MockModulePassHandle : MockPassHandleBase { - MOCK_METHOD2(run, PreservedAnalyses(Module &, ModuleAnalysisManager &)); - - MockModulePassHandle() { setDefaults(); } -}; - -/// Define a custom matcher for objects which support a 'getName' method -/// returning a StringRef. -/// -/// LLVM often has IR objects or analysis objects which expose a StringRef name -/// and in tests it is convenient to match these by name for readability. This -/// matcher supports any type exposing a getName() method of this form. -/// -/// It should be used as: -/// -/// HasName("my_function") -/// -/// No namespace or other qualification is required. -MATCHER_P(HasName, Name, "") { - // The matcher's name and argument are printed in the case of failure, but we - // also want to print out the name of the argument. This uses an implicitly - // avaiable std::ostream, so we have to construct a std::string. - *result_listener << "has name '" << arg.getName().str() << "'"; - return Name == arg.getName(); -} - -std::unique_ptr parseIR(LLVMContext &C, const char *IR) { - SMDiagnostic Err; - return parseAssemblyString(IR, Err, C); -} - -class LoopPassManagerTest : public ::testing::Test { -protected: - LLVMContext Context; - std::unique_ptr M; - - LoopAnalysisManager LAM; - FunctionAnalysisManager FAM; - ModuleAnalysisManager MAM; - - MockLoopAnalysisHandle MLAHandle; - MockLoopPassHandle MLPHandle; - MockFunctionPassHandle MFPHandle; - MockModulePassHandle MMPHandle; - - static PreservedAnalyses - getLoopAnalysisResult(Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &) { - (void)AM.getResult(L, AR); - return PreservedAnalyses::all(); - }; - -public: - LoopPassManagerTest() - : M(parseIR(Context, "define void @f() {\n" - "entry:\n" - " br label %loop.0\n" - "loop.0:\n" - " br i1 undef, label %loop.0.0, label %end\n" - "loop.0.0:\n" - " br i1 undef, label %loop.0.0, label %loop.0.1\n" - "loop.0.1:\n" - " br i1 undef, label %loop.0.1, label %loop.0\n" - "end:\n" - " ret void\n" - "}\n" - "\n" - "define void @g() {\n" - "entry:\n" - " br label %loop.g.0\n" - "loop.g.0:\n" - " br i1 undef, label %loop.g.0, label %end\n" - "end:\n" - " ret void\n" - "}\n")), - LAM(true), FAM(true), MAM(true) { - // Register our mock analysis. - LAM.registerPass([&] { return MLAHandle.getAnalysis(); }); - - // We need DominatorTreeAnalysis for LoopAnalysis. - FAM.registerPass([&] { return DominatorTreeAnalysis(); }); - FAM.registerPass([&] { return LoopAnalysis(); }); - // We also allow loop passes to assume a set of other analyses and so need - // those. - FAM.registerPass([&] { return AAManager(); }); - FAM.registerPass([&] { return AssumptionAnalysis(); }); - FAM.registerPass([&] { return ScalarEvolutionAnalysis(); }); - FAM.registerPass([&] { return TargetLibraryAnalysis(); }); - FAM.registerPass([&] { return TargetIRAnalysis(); }); - - // Cross-register proxies. - LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); }); - FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); }); - FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); }); - MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); }); - } -}; - -TEST_F(LoopPassManagerTest, Basic) { - ModulePassManager MPM(true); - ::testing::InSequence MakeExpectationsSequenced; - - // First we just visit all the loops in all the functions and get their - // analysis results. This will run the analysis a total of four times, - // once for each loop. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); - // Wire the loop pass through pass managers into the module pipeline. - { - LoopPassManager LPM(true); - LPM.addPass(MLPHandle.getPass()); - FunctionPassManager FPM(true); - FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - } - - // Next we run two passes over the loops. The first one invalidates the - // analyses for one loop, the second ones try to get the analysis results. - // This should force only one analysis to re-run within the loop PM, but will - // also invalidate everything after the loop pass manager finishes. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(DoDefault()) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(InvokeWithoutArgs([] { return PreservedAnalyses::none(); })) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(DoDefault()) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) - .WillOnce(DoDefault()) - .WillOnce(Invoke(getLoopAnalysisResult)); - // Wire two loop pass runs into the module pipeline. - { - LoopPassManager LPM(true); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - FunctionPassManager FPM(true); - FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - } - - // And now run the pipeline across the module. - MPM.run(*M, MAM); -} - -TEST_F(LoopPassManagerTest, FunctionPassInvalidationOfLoopAnalyses) { - ModulePassManager MPM(true); - FunctionPassManager FPM(true); - // We process each function completely in sequence. - ::testing::Sequence FSequence, GSequence; - - // First, force the analysis result to be computed for each loop. - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)) - .InSequence(GSequence) - .WillOnce(DoDefault()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - // No need to re-run if we require again from a fresh loop pass manager. - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - // For 'f', preserve most things but not the specific loop analyses. - EXPECT_CALL(MFPHandle, run(HasName("f"), _)) - .InSequence(FSequence) - .WillOnce(Return(getLoopPassPreservedAnalyses())); - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - // On one loop, skip the invalidation (as though we did an internal update). - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)) - .InSequence(FSequence) - .WillOnce(Return(false)); - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - // Now two loops still have to be recomputed. - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - // Preserve things in the second function to ensure invalidation remains - // isolated to one function. - EXPECT_CALL(MFPHandle, run(HasName("g"), _)) - .InSequence(GSequence) - .WillOnce(DoDefault()); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - EXPECT_CALL(MFPHandle, run(HasName("f"), _)) - .InSequence(FSequence) - .WillOnce(DoDefault()); - // For 'g', fail to preserve anything, causing the loops themselves to be - // cleared. We don't get an invalidation event here as the loop is gone, but - // we should still have to recompute the analysis. - EXPECT_CALL(MFPHandle, run(HasName("g"), _)) - .InSequence(GSequence) - .WillOnce(Return(PreservedAnalyses::none())); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)) - .InSequence(GSequence) - .WillOnce(DoDefault()); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - - // Verify with a separate function pass run that we didn't mess up 'f's - // cache. No analysis runs should be necessary here. - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - MPM.run(*M, MAM); -} - -TEST_F(LoopPassManagerTest, ModulePassInvalidationOfLoopAnalyses) { - ModulePassManager MPM(true); - ::testing::InSequence MakeExpectationsSequenced; - - // First, force the analysis result to be computed for each loop. - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - // Walking all the way out and all the way back in doesn't re-run the - // analysis. - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - // But a module pass that doesn't preserve the actual mock loop analysis - // invalidates all the way down and forces recomputing. - EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { - auto PA = getLoopPassPreservedAnalyses(); - PA.preserve(); - return PA; - })); - // All the loop analyses from both functions get invalidated before we - // recompute anything. - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)); - // On one loop, again skip the invalidation (as though we did an internal - // update). - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)) - .WillOnce(Return(false)); - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)); - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.g.0"), _, _)); - // Now all but one of the loops gets re-analyzed. - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); - MPM.addPass(MMPHandle.getPass()); - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - // Verify that the cached values persist. - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - // Now we fail to preserve the loop analysis and observe that the loop - // analyses are cleared (so no invalidation event) as the loops themselves - // are no longer valid. - EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - PA.preserve(); - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); - MPM.addPass(MMPHandle.getPass()); - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - // Verify that the cached values persist. - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - // Next, check that even if we preserve everything within the function itelf, - // if the function's module pass proxy isn't preserved and the potential set - // of functions changes, the clear reaches the loop analyses as well. This - // will again trigger re-runs but not invalidation events. - EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - PA.preserveSet>(); - PA.preserveSet>(); - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); - MPM.addPass(MMPHandle.getPass()); - MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass()))); - - MPM.run(*M, MAM); -} - -// Test that if any of the bundled analyses provided in the LPM's signature -// become invalid, the analysis proxy itself becomes invalid and we clear all -// loop analysis results. -TEST_F(LoopPassManagerTest, InvalidationOfBundledAnalyses) { - ModulePassManager MPM(true); - FunctionPassManager FPM(true); - ::testing::InSequence MakeExpectationsSequenced; - - // First, force the analysis result to be computed for each loop. - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - // No need to re-run if we require again from a fresh loop pass manager. - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - // Preserving everything but the loop analyses themselves results in - // invalidation and running. - EXPECT_CALL(MFPHandle, run(HasName("f"), _)) - .WillOnce(Return(getLoopPassPreservedAnalyses())); - EXPECT_CALL(MLAHandle, invalidate(_, _, _)).Times(3); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - // The rest don't invalidate analyses, they only trigger re-runs because we - // clear the cache completely. - EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - // Not preserving `AAManager`. - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - PA.preserve(); - // Not preserving `AssumptionAnalysis`. - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - PA.preserve(); - PA.preserve(); - // Not preserving `DominatorTreeAnalysis`. - PA.preserve(); - PA.preserve(); - PA.preserve(); - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - // Not preserving the `LoopAnalysis`. - PA.preserve(); - PA.preserve(); - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - // Not preserving the `LoopAnalysisManagerFunctionProxy`. - PA.preserve(); - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = PreservedAnalyses::none(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - PA.preserve(); - // Not preserving `ScalarEvolutionAnalysis`. - return PA; - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor( - RequireAnalysisLoopPass())); - - // After all the churn on 'f', we'll compute the loop analysis results for - // 'g' once with a requires pass and then run our mock pass over g a bunch - // but just get cached results each time. - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); - EXPECT_CALL(MFPHandle, run(HasName("g"), _)).Times(7); - - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - MPM.run(*M, MAM); -} - -TEST_F(LoopPassManagerTest, IndirectInvalidation) { - // We need two distinct analysis types and handles. - enum { A, B }; - MockLoopAnalysisHandleTemplate MLAHandleA; - MockLoopAnalysisHandleTemplate MLAHandleB; - LAM.registerPass([&] { return MLAHandleA.getAnalysis(); }); - LAM.registerPass([&] { return MLAHandleB.getAnalysis(); }); - typedef decltype(MLAHandleA)::Analysis AnalysisA; - typedef decltype(MLAHandleB)::Analysis AnalysisB; - - // Set up AnalysisA to depend on our AnalysisB. For testing purposes we just - // need to get the AnalysisB results in AnalysisA's run method and check if - // AnalysisB gets invalidated in AnalysisA's invalidate method. - ON_CALL(MLAHandleA, run(_, _, _)) - .WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR) { - (void)AM.getResult(L, AR); - return MLAHandleA.getResult(); - })); - ON_CALL(MLAHandleA, invalidate(_, _, _)) - .WillByDefault(Invoke([](Loop &L, const PreservedAnalyses &PA, - LoopAnalysisManager::Invalidator &Inv) { - auto PAC = PA.getChecker(); - return !(PAC.preserved() || PAC.preservedSet>()) || - Inv.invalidate(L, PA); - })); - - ::testing::InSequence MakeExpectationsSequenced; - - // Compute the analyses across all of 'f' first. - EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandleA, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandleB, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandleA, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLAHandleB, run(HasName("loop.0"), _, _)); - - // Now we invalidate AnalysisB (but not AnalysisA) for one of the loops and - // preserve everything for the rest. This in turn triggers that one loop to - // recompute both AnalysisB *and* AnalysisA if indirect invalidation is - // working. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(InvokeWithoutArgs([] { - auto PA = getLoopPassPreservedAnalyses(); - // Specifically preserve AnalysisA so that it would survive if it - // didn't depend on AnalysisB. - PA.preserve(); - return PA; - })); - // It happens that AnalysisB is invalidated first. That shouldn't matter - // though, and we should still call AnalysisA's invalidation. - EXPECT_CALL(MLAHandleB, invalidate(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandleA, invalidate(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke([](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &) { - (void)AM.getResult(L, AR); - return PreservedAnalyses::all(); - })); - EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _)); - // The rest of the loops should run and get cached results. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &) { - (void)AM.getResult(L, AR); - return PreservedAnalyses::all(); - })); - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &) { - (void)AM.getResult(L, AR); - return PreservedAnalyses::all(); - })); - - // The run over 'g' should be boring, with us just computing the analyses once - // up front and then running loop passes and getting cached results. - EXPECT_CALL(MLAHandleA, run(HasName("loop.g.0"), _, _)); - EXPECT_CALL(MLAHandleB, run(HasName("loop.g.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &) { - (void)AM.getResult(L, AR); - return PreservedAnalyses::all(); - })); - - // Build the pipeline and run it. - ModulePassManager MPM(true); - FunctionPassManager FPM(true); - FPM.addPass( - createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); - LoopPassManager LPM(true); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - MPM.run(*M, MAM); -} - -TEST_F(LoopPassManagerTest, IndirectOuterPassInvalidation) { - typedef decltype(MLAHandle)::Analysis LoopAnalysis; - - MockFunctionAnalysisHandle MFAHandle; - FAM.registerPass([&] { return MFAHandle.getAnalysis(); }); - typedef decltype(MFAHandle)::Analysis FunctionAnalysis; - - // Set up the loop analysis to depend on both the function and module - // analysis. - ON_CALL(MLAHandle, run(_, _, _)) - .WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR) { - auto &FAMP = AM.getResult(L, AR); - auto &FAM = FAMP.getManager(); - Function &F = *L.getHeader()->getParent(); - if (auto *FA = FAM.getCachedResult(F)) - FAMP.registerOuterAnalysisInvalidation(); - return MLAHandle.getResult(); - })); - - ::testing::InSequence MakeExpectationsSequenced; - - // Compute the analyses across all of 'f' first. - EXPECT_CALL(MFPHandle, run(HasName("f"), _)) - .WillOnce(Invoke([](Function &F, FunctionAnalysisManager &AM) { - // Force the computing of the function analysis so it is available in - // this function. - (void)AM.getResult(F); - return PreservedAnalyses::all(); - })); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - - // Now invalidate the function analysis but preserve the loop analyses. - // This should trigger immediate invalidation of the loop analyses, despite - // the fact that they were preserved. - EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = getLoopPassPreservedAnalyses(); - PA.preserveSet>(); - return PA; - })); - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)); - - // And re-running a requires pass recomputes them. - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - - // When we run over 'g' we don't populate the cache with the function - // analysis. - EXPECT_CALL(MFPHandle, run(HasName("g"), _)) - .WillOnce(Return(PreservedAnalyses::all())); - EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); - - // Which means that no extra invalidation occurs and cached values are used. - EXPECT_CALL(MFPHandle, run(HasName("g"), _)).WillOnce(InvokeWithoutArgs([] { - auto PA = getLoopPassPreservedAnalyses(); - PA.preserveSet>(); - return PA; - })); - - // Build the pipeline and run it. - ModulePassManager MPM(true); - FunctionPassManager FPM(true); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass( - createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); - FPM.addPass(MFPHandle.getPass()); - FPM.addPass( - createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - MPM.run(*M, MAM); -} - -TEST_F(LoopPassManagerTest, LoopChildInsertion) { - // Super boring module with three loops in a single loop nest. - M = parseIR(Context, "define void @f() {\n" - "entry:\n" - " br label %loop.0\n" - "loop.0:\n" - " br i1 undef, label %loop.0.0, label %end\n" - "loop.0.0:\n" - " br i1 undef, label %loop.0.0, label %loop.0.1\n" - "loop.0.1:\n" - " br i1 undef, label %loop.0.1, label %loop.0.2\n" - "loop.0.2:\n" - " br i1 undef, label %loop.0.2, label %loop.0\n" - "end:\n" - " ret void\n" - "}\n"); - - // Build up variables referring into the IR so we can rewrite it below - // easily. - Function &F = *M->begin(); - ASSERT_THAT(F, HasName("f")); - auto BBI = F.begin(); - BasicBlock &EntryBB = *BBI++; - ASSERT_THAT(EntryBB, HasName("entry")); - BasicBlock &Loop0BB = *BBI++; - ASSERT_THAT(Loop0BB, HasName("loop.0")); - BasicBlock &Loop00BB = *BBI++; - ASSERT_THAT(Loop00BB, HasName("loop.0.0")); - BasicBlock &Loop01BB = *BBI++; - ASSERT_THAT(Loop01BB, HasName("loop.0.1")); - BasicBlock &Loop02BB = *BBI++; - ASSERT_THAT(Loop02BB, HasName("loop.0.2")); - BasicBlock &EndBB = *BBI++; - ASSERT_THAT(EndBB, HasName("end")); - ASSERT_THAT(BBI, F.end()); - - // Build the pass managers and register our pipeline. We build a single loop - // pass pipeline consisting of three mock pass runs over each loop. After - // this we run both domtree and loop verification passes to make sure that - // the IR remained valid during our mutations. - ModulePassManager MPM(true); - FunctionPassManager FPM(true); - LoopPassManager LPM(true); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); - FPM.addPass(DominatorTreeVerifierPass()); - FPM.addPass(LoopVerifierPass()); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - - // All the visit orders are deterministic, so we use simple fully order - // expectations. - ::testing::InSequence MakeExpectationsSequenced; - - // We run loop passes three times over each of the loops. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - - // When running over the middle loop, the second run inserts two new child - // loops, inserting them and itself into the worklist. - BasicBlock *NewLoop010BB; - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, - LPMUpdater &Updater) { - auto *NewLoop = new Loop(); - L.addChildLoop(NewLoop); - NewLoop010BB = BasicBlock::Create(Context, "loop.0.1.0", &F, &Loop02BB); - BranchInst::Create(&Loop01BB, NewLoop010BB, - UndefValue::get(Type::getInt1Ty(Context)), - NewLoop010BB); - Loop01BB.getTerminator()->replaceUsesOfWith(&Loop01BB, NewLoop010BB); - AR.DT.addNewBlock(NewLoop010BB, &Loop01BB); - NewLoop->addBasicBlockToLoop(NewLoop010BB, AR.LI); - Updater.addChildLoops({NewLoop}); - return PreservedAnalyses::all(); - })); - - // We should immediately drop down to fully visit the new inner loop. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // After visiting the inner loop, we should re-visit the second loop - // reflecting its new loop nest structure. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - - // In the second run over the middle loop after we've visited the new child, - // we add another child to check that we can repeatedly add children, and add - // children to a loop that already has children. - BasicBlock *NewLoop011BB; - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, - LPMUpdater &Updater) { - auto *NewLoop = new Loop(); - L.addChildLoop(NewLoop); - NewLoop011BB = BasicBlock::Create(Context, "loop.0.1.1", &F, &Loop02BB); - BranchInst::Create(&Loop01BB, NewLoop011BB, - UndefValue::get(Type::getInt1Ty(Context)), - NewLoop011BB); - NewLoop010BB->getTerminator()->replaceUsesOfWith(&Loop01BB, - NewLoop011BB); - AR.DT.addNewBlock(NewLoop011BB, NewLoop010BB); - NewLoop->addBasicBlockToLoop(NewLoop011BB, AR.LI); - Updater.addChildLoops({NewLoop}); - return PreservedAnalyses::all(); - })); - - // Again, we should immediately drop down to visit the new, unvisited child - // loop. We don't need to revisit the other child though. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.1"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // And now we should pop back up to the second loop and do a full pipeline of - // three passes on its current form. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // Now that all the expected actions are registered, run the pipeline over - // our module. All of our expectations are verified when the test finishes. - MPM.run(*M, MAM); -} - -TEST_F(LoopPassManagerTest, LoopPeerInsertion) { - // Super boring module with two loop nests and loop nest with two child - // loops. - M = parseIR(Context, "define void @f() {\n" - "entry:\n" - " br label %loop.0\n" - "loop.0:\n" - " br i1 undef, label %loop.0.0, label %loop.2\n" - "loop.0.0:\n" - " br i1 undef, label %loop.0.0, label %loop.0.2\n" - "loop.0.2:\n" - " br i1 undef, label %loop.0.2, label %loop.0\n" - "loop.2:\n" - " br i1 undef, label %loop.2, label %end\n" - "end:\n" - " ret void\n" - "}\n"); - - // Build up variables referring into the IR so we can rewrite it below - // easily. - Function &F = *M->begin(); - ASSERT_THAT(F, HasName("f")); - auto BBI = F.begin(); - BasicBlock &EntryBB = *BBI++; - ASSERT_THAT(EntryBB, HasName("entry")); - BasicBlock &Loop0BB = *BBI++; - ASSERT_THAT(Loop0BB, HasName("loop.0")); - BasicBlock &Loop00BB = *BBI++; - ASSERT_THAT(Loop00BB, HasName("loop.0.0")); - BasicBlock &Loop02BB = *BBI++; - ASSERT_THAT(Loop02BB, HasName("loop.0.2")); - BasicBlock &Loop2BB = *BBI++; - ASSERT_THAT(Loop2BB, HasName("loop.2")); - BasicBlock &EndBB = *BBI++; - ASSERT_THAT(EndBB, HasName("end")); - ASSERT_THAT(BBI, F.end()); - Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context)); - - // Build the pass managers and register our pipeline. We build a single loop - // pass pipeline consisting of three mock pass runs over each loop. After - // this we run both domtree and loop verification passes to make sure that - // the IR remained valid during our mutations. - ModulePassManager MPM(true); - FunctionPassManager FPM(true); - LoopPassManager LPM(true); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); - FPM.addPass(DominatorTreeVerifierPass()); - FPM.addPass(LoopVerifierPass()); - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - - // All the visit orders are deterministic, so we use simple fully order - // expectations. - ::testing::InSequence MakeExpectationsSequenced; - - // We run loop passes three times over each of the loops. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - - // On the second run, we insert a sibling loop. - BasicBlock *NewLoop01BB; - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, - LPMUpdater &Updater) { - auto *NewLoop = new Loop(); - L.getParentLoop()->addChildLoop(NewLoop); - NewLoop01BB = BasicBlock::Create(Context, "loop.0.1", &F, &Loop02BB); - BranchInst::Create(&Loop02BB, NewLoop01BB, Undefi1, NewLoop01BB); - Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, NewLoop01BB); - auto *NewDTNode = AR.DT.addNewBlock(NewLoop01BB, &Loop00BB); - AR.DT.changeImmediateDominator(AR.DT[&Loop02BB], NewDTNode); - NewLoop->addBasicBlockToLoop(NewLoop01BB, AR.LI); - Updater.addSiblingLoops({NewLoop}); - return PreservedAnalyses::all(); - })); - // We finish processing this loop as sibling loops don't perturb the - // postorder walk. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - - // We visit the inserted sibling next. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - // Next, on the third pass run on the last inner loop we add more new - // siblings, more than one, and one with nested child loops. By doing this at - // the end we make sure that edge case works well. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, - LPMUpdater &Updater) { - Loop *NewLoops[] = {new Loop(), new Loop(), new Loop()}; - L.getParentLoop()->addChildLoop(NewLoops[0]); - L.getParentLoop()->addChildLoop(NewLoops[1]); - NewLoops[1]->addChildLoop(NewLoops[2]); - auto *NewLoop03BB = - BasicBlock::Create(Context, "loop.0.3", &F, &Loop2BB); - auto *NewLoop04BB = - BasicBlock::Create(Context, "loop.0.4", &F, &Loop2BB); - auto *NewLoop040BB = - BasicBlock::Create(Context, "loop.0.4.0", &F, &Loop2BB); - Loop02BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB); - BranchInst::Create(NewLoop04BB, NewLoop03BB, Undefi1, NewLoop03BB); - BranchInst::Create(&Loop0BB, NewLoop040BB, Undefi1, NewLoop04BB); - BranchInst::Create(NewLoop04BB, NewLoop040BB, Undefi1, NewLoop040BB); - AR.DT.addNewBlock(NewLoop03BB, &Loop02BB); - AR.DT.addNewBlock(NewLoop04BB, NewLoop03BB); - AR.DT.addNewBlock(NewLoop040BB, NewLoop04BB); - NewLoops[0]->addBasicBlockToLoop(NewLoop03BB, AR.LI); - NewLoops[1]->addBasicBlockToLoop(NewLoop04BB, AR.LI); - NewLoops[2]->addBasicBlockToLoop(NewLoop040BB, AR.LI); - Updater.addSiblingLoops({NewLoops[0], NewLoops[1]}); - return PreservedAnalyses::all(); - })); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // Note that we need to visit the inner loop of this added sibling before the - // sibling itself! - EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.4.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.4"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // And only now do we visit the outermost loop of the nest. - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - // On the second pass, we add sibling loops which become new top-level loops. - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, - LPMUpdater &Updater) { - auto *NewLoop = new Loop(); - AR.LI.addTopLevelLoop(NewLoop); - auto *NewLoop1BB = BasicBlock::Create(Context, "loop.1", &F, &Loop2BB); - BranchInst::Create(&Loop2BB, NewLoop1BB, Undefi1, NewLoop1BB); - Loop0BB.getTerminator()->replaceUsesOfWith(&Loop2BB, NewLoop1BB); - auto *NewDTNode = AR.DT.addNewBlock(NewLoop1BB, &Loop0BB); - AR.DT.changeImmediateDominator(AR.DT[&Loop2BB], NewDTNode); - NewLoop->addBasicBlockToLoop(NewLoop1BB, AR.LI); - Updater.addSiblingLoops({NewLoop}); - return PreservedAnalyses::all(); - })); - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.1"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.2"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // Now that all the expected actions are registered, run the pipeline over - // our module. All of our expectations are verified when the test finishes. - MPM.run(*M, MAM); -} - -TEST_F(LoopPassManagerTest, LoopDeletion) { - // Build a module with a single loop nest that contains one outer loop with - // three subloops, and one of those with its own subloop. We will - // incrementally delete all of these to test different deletion scenarios. - M = parseIR(Context, "define void @f() {\n" - "entry:\n" - " br label %loop.0\n" - "loop.0:\n" - " br i1 undef, label %loop.0.0, label %end\n" - "loop.0.0:\n" - " br i1 undef, label %loop.0.0, label %loop.0.1\n" - "loop.0.1:\n" - " br i1 undef, label %loop.0.1, label %loop.0.2\n" - "loop.0.2:\n" - " br i1 undef, label %loop.0.2.0, label %loop.0\n" - "loop.0.2.0:\n" - " br i1 undef, label %loop.0.2.0, label %loop.0.2\n" - "end:\n" - " ret void\n" - "}\n"); - - // Build up variables referring into the IR so we can rewrite it below - // easily. - Function &F = *M->begin(); - ASSERT_THAT(F, HasName("f")); - auto BBI = F.begin(); - BasicBlock &EntryBB = *BBI++; - ASSERT_THAT(EntryBB, HasName("entry")); - BasicBlock &Loop0BB = *BBI++; - ASSERT_THAT(Loop0BB, HasName("loop.0")); - BasicBlock &Loop00BB = *BBI++; - ASSERT_THAT(Loop00BB, HasName("loop.0.0")); - BasicBlock &Loop01BB = *BBI++; - ASSERT_THAT(Loop01BB, HasName("loop.0.1")); - BasicBlock &Loop02BB = *BBI++; - ASSERT_THAT(Loop02BB, HasName("loop.0.2")); - BasicBlock &Loop020BB = *BBI++; - ASSERT_THAT(Loop020BB, HasName("loop.0.2.0")); - BasicBlock &EndBB = *BBI++; - ASSERT_THAT(EndBB, HasName("end")); - ASSERT_THAT(BBI, F.end()); - Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context)); - - // Helper to do the actual deletion of a loop. We directly encode this here - // to isolate ourselves from the rest of LLVM and for simplicity. Here we can - // egregiously cheat based on knowledge of the test case. For example, we - // have no PHI nodes and there is always a single i-dom. - auto DeleteLoopBlocks = [](Loop &L, BasicBlock &IDomBB, - LoopStandardAnalysisResults &AR, - LPMUpdater &Updater) { - for (BasicBlock *LoopBB : L.blocks()) { - SmallVector ChildNodes(AR.DT[LoopBB]->begin(), - AR.DT[LoopBB]->end()); - for (DomTreeNode *ChildNode : ChildNodes) - AR.DT.changeImmediateDominator(ChildNode, AR.DT[&IDomBB]); - AR.DT.eraseNode(LoopBB); - LoopBB->dropAllReferences(); - } - SmallVector LoopBBs(L.block_begin(), L.block_end()); - Updater.markLoopAsDeleted(L); - AR.LI.markAsRemoved(&L); - for (BasicBlock *LoopBB : LoopBBs) - LoopBB->eraseFromParent(); - }; - - // Build up the pass managers. - ModulePassManager MPM(true); - FunctionPassManager FPM(true); - // We run several loop pass pipelines across the loop nest, but they all take - // the same form of three mock pass runs in a loop pipeline followed by - // domtree and loop verification. We use a lambda to stamp this out each - // time. - auto AddLoopPipelineAndVerificationPasses = [&] { - LoopPassManager LPM(true); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - LPM.addPass(MLPHandle.getPass()); - FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); - FPM.addPass(DominatorTreeVerifierPass()); - FPM.addPass(LoopVerifierPass()); - }; - - // All the visit orders are deterministic so we use simple fully order - // expectations. - ::testing::InSequence MakeExpectationsSequenced; - - // We run the loop pipeline with three passes over each of the loops. When - // running over the middle loop, the second pass in the pipeline deletes it. - // This should prevent the third pass from visiting it but otherwise leave - // the process unimpacted. - AddLoopPipelineAndVerificationPasses(); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) - .WillOnce( - Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { - AR.SE.forgetLoop(&L); - Loop00BB.getTerminator()->replaceUsesOfWith(&Loop01BB, &Loop02BB); - DeleteLoopBlocks(L, Loop00BB, AR, Updater); - return PreservedAnalyses::all(); - })); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.2.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // Run the loop pipeline again. This time we delete the last loop, which - // contains a nested loop within it, and we reuse its inner loop object to - // insert a new loop into the nest. This makes sure that we don't reuse - // cached analysis results for loop objects when removed just because their - // pointers match, and that we can handle nested loop deletion. - AddLoopPipelineAndVerificationPasses(); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - BasicBlock *NewLoop03BB; - EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) - .WillOnce( - Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { - // Delete the inner loop first. we also do this manually because we - // want to preserve the loop object and reuse it. - AR.SE.forgetLoop(*L.begin()); - Loop02BB.getTerminator()->replaceUsesOfWith(&Loop020BB, &Loop02BB); - assert(std::next((*L.begin())->block_begin()) == - (*L.begin())->block_end() && - "There should only be one block."); - assert(AR.DT[&Loop020BB]->getNumChildren() == 0 && - "Cannot have children in the domtree!"); - AR.DT.eraseNode(&Loop020BB); - Updater.markLoopAsDeleted(**L.begin()); - AR.LI.removeBlock(&Loop020BB); - auto *OldL = L.removeChildLoop(L.begin()); - Loop020BB.eraseFromParent(); - - auto *ParentL = L.getParentLoop(); - AR.SE.forgetLoop(&L); - Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, &Loop0BB); - DeleteLoopBlocks(L, Loop00BB, AR, Updater); - - // Now insert a new sibling loop, reusing a loop pointer. - ParentL->addChildLoop(OldL); - NewLoop03BB = BasicBlock::Create(Context, "loop.0.3", &F, &EndBB); - BranchInst::Create(&Loop0BB, NewLoop03BB, Undefi1, NewLoop03BB); - Loop00BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB); - AR.DT.addNewBlock(NewLoop03BB, &Loop00BB); - OldL->addBasicBlockToLoop(NewLoop03BB, AR.LI); - Updater.addSiblingLoops({OldL}); - return PreservedAnalyses::all(); - })); - - // To respect our inner-to-outer traversal order, we must visit the - // newly-inserted sibling of the loop we just deleted before we visit the - // outer loop. When we do so, this must compute a fresh analysis result, even - // though our new loop has the same pointer value as the loop we deleted. - EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) - .Times(2) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .Times(3) - .WillRepeatedly(Invoke(getLoopAnalysisResult)); - - // In the final loop pipeline run we delete every loop, including the last - // loop of the nest. We do this again in the second pass in the pipeline, and - // as a consequence we never make it to three runs on any loop. We also cover - // deleting multiple loops in a single pipeline, deleting the first loop and - // deleting the (last) top level loop. - AddLoopPipelineAndVerificationPasses(); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) - .WillOnce( - Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { - AR.SE.forgetLoop(&L); - Loop0BB.getTerminator()->replaceUsesOfWith(&Loop00BB, NewLoop03BB); - DeleteLoopBlocks(L, Loop0BB, AR, Updater); - return PreservedAnalyses::all(); - })); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) - .WillOnce( - Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { - AR.SE.forgetLoop(&L); - Loop0BB.getTerminator()->replaceUsesOfWith(NewLoop03BB, &Loop0BB); - DeleteLoopBlocks(L, Loop0BB, AR, Updater); - return PreservedAnalyses::all(); - })); - - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce(Invoke(getLoopAnalysisResult)); - EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) - .WillOnce( - Invoke([&](Loop &L, LoopAnalysisManager &AM, - LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { - AR.SE.forgetLoop(&L); - EntryBB.getTerminator()->replaceUsesOfWith(&Loop0BB, &EndBB); - DeleteLoopBlocks(L, EntryBB, AR, Updater); - return PreservedAnalyses::all(); - })); - - // Add the function pass pipeline now that it is fully built up and run it - // over the module's one function. - MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); - MPM.run(*M, MAM); -} -} Index: llvm/trunk/unittests/Transforms/CMakeLists.txt =================================================================== --- llvm/trunk/unittests/Transforms/CMakeLists.txt +++ llvm/trunk/unittests/Transforms/CMakeLists.txt @@ -1,2 +1,3 @@ add_subdirectory(IPO) +add_subdirectory(Scalar) add_subdirectory(Utils) Index: llvm/trunk/unittests/Transforms/Scalar/CMakeLists.txt =================================================================== --- llvm/trunk/unittests/Transforms/Scalar/CMakeLists.txt +++ llvm/trunk/unittests/Transforms/Scalar/CMakeLists.txt @@ -0,0 +1,12 @@ +set(LLVM_LINK_COMPONENTS + Analysis + AsmParser + Core + Support + ScalarOpts + TransformUtils + ) + +add_llvm_unittest(ScalarTests + LoopPassManagerTest.cpp + ) Index: llvm/trunk/unittests/Transforms/Scalar/LoopPassManagerTest.cpp =================================================================== --- llvm/trunk/unittests/Transforms/Scalar/LoopPassManagerTest.cpp +++ llvm/trunk/unittests/Transforms/Scalar/LoopPassManagerTest.cpp @@ -0,0 +1,1438 @@ +//===- llvm/unittest/Analysis/LoopPassManagerTest.cpp - LPM tests ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/AsmParser/Parser.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using namespace llvm; + +namespace { + +using testing::DoDefault; +using testing::Return; +using testing::Expectation; +using testing::Invoke; +using testing::InvokeWithoutArgs; +using testing::_; + +template , + typename... ExtraArgTs> +class MockAnalysisHandleBase { +public: + class Analysis : public AnalysisInfoMixin { + friend AnalysisInfoMixin; + friend MockAnalysisHandleBase; + static AnalysisKey Key; + + DerivedT *Handle; + + Analysis(DerivedT &Handle) : Handle(&Handle) {} + + public: + class Result { + friend MockAnalysisHandleBase; + + DerivedT *Handle; + + Result(DerivedT &Handle) : Handle(&Handle) {} + + public: + // Forward invalidation events to the mock handle. + bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA, + typename AnalysisManagerT::Invalidator &Inv) { + return Handle->invalidate(IR, PA, Inv); + } + }; + + Result run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs) { + return Handle->run(IR, AM, ExtraArgs...); + } + }; + + Analysis getAnalysis() { return Analysis(static_cast(*this)); } + typename Analysis::Result getResult() { + return typename Analysis::Result(static_cast(*this)); + } + +protected: + // FIXME: MSVC seems unable to handle a lambda argument to Invoke from within + // the template, so we use a boring static function. + static bool invalidateCallback(IRUnitT &IR, const PreservedAnalyses &PA, + typename AnalysisManagerT::Invalidator &Inv) { + auto PAC = PA.template getChecker(); + return !PAC.preserved() && + !PAC.template preservedSet>(); + } + + /// Derived classes should call this in their constructor to set up default + /// mock actions. (We can't do this in our constructor because this has to + /// run after the DerivedT is constructed.) + void setDefaults() { + ON_CALL(static_cast(*this), + run(_, _, testing::Matcher(_)...)) + .WillByDefault(Return(this->getResult())); + ON_CALL(static_cast(*this), invalidate(_, _, _)) + .WillByDefault(Invoke(&invalidateCallback)); + } +}; + +template +AnalysisKey MockAnalysisHandleBase::Analysis::Key; + +/// Mock handle for loop analyses. +/// +/// This is provided as a template accepting an (optional) integer. Because +/// analyses are identified and queried by type, this allows constructing +/// multiple handles with distinctly typed nested 'Analysis' types that can be +/// registered and queried. If you want to register multiple loop analysis +/// passes, you'll need to instantiate this type with different values for I. +/// For example: +/// +/// MockLoopAnalysisHandleTemplate<0> h0; +/// MockLoopAnalysisHandleTemplate<1> h1; +/// typedef decltype(h0)::Analysis Analysis0; +/// typedef decltype(h1)::Analysis Analysis1; +template (-1)> +struct MockLoopAnalysisHandleTemplate + : MockAnalysisHandleBase, Loop, + LoopAnalysisManager, + LoopStandardAnalysisResults &> { + typedef typename MockLoopAnalysisHandleTemplate::Analysis Analysis; + + MOCK_METHOD3_T(run, typename Analysis::Result(Loop &, LoopAnalysisManager &, + LoopStandardAnalysisResults &)); + + MOCK_METHOD3_T(invalidate, bool(Loop &, const PreservedAnalyses &, + LoopAnalysisManager::Invalidator &)); + + MockLoopAnalysisHandleTemplate() { this->setDefaults(); } +}; + +typedef MockLoopAnalysisHandleTemplate<> MockLoopAnalysisHandle; + +struct MockFunctionAnalysisHandle + : MockAnalysisHandleBase { + MOCK_METHOD2(run, Analysis::Result(Function &, FunctionAnalysisManager &)); + + MOCK_METHOD3(invalidate, bool(Function &, const PreservedAnalyses &, + FunctionAnalysisManager::Invalidator &)); + + MockFunctionAnalysisHandle() { setDefaults(); } +}; + +template , + typename... ExtraArgTs> +class MockPassHandleBase { +public: + class Pass : public PassInfoMixin { + friend MockPassHandleBase; + + DerivedT *Handle; + + Pass(DerivedT &Handle) : Handle(&Handle) {} + + public: + PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, + ExtraArgTs... ExtraArgs) { + return Handle->run(IR, AM, ExtraArgs...); + } + }; + + Pass getPass() { return Pass(static_cast(*this)); } + +protected: + /// Derived classes should call this in their constructor to set up default + /// mock actions. (We can't do this in our constructor because this has to + /// run after the DerivedT is constructed.) + void setDefaults() { + ON_CALL(static_cast(*this), + run(_, _, testing::Matcher(_)...)) + .WillByDefault(Return(PreservedAnalyses::all())); + } +}; + +struct MockLoopPassHandle + : MockPassHandleBase { + MOCK_METHOD4(run, + PreservedAnalyses(Loop &, LoopAnalysisManager &, + LoopStandardAnalysisResults &, LPMUpdater &)); + MockLoopPassHandle() { setDefaults(); } +}; + +struct MockFunctionPassHandle + : MockPassHandleBase { + MOCK_METHOD2(run, PreservedAnalyses(Function &, FunctionAnalysisManager &)); + + MockFunctionPassHandle() { setDefaults(); } +}; + +struct MockModulePassHandle : MockPassHandleBase { + MOCK_METHOD2(run, PreservedAnalyses(Module &, ModuleAnalysisManager &)); + + MockModulePassHandle() { setDefaults(); } +}; + +/// Define a custom matcher for objects which support a 'getName' method +/// returning a StringRef. +/// +/// LLVM often has IR objects or analysis objects which expose a StringRef name +/// and in tests it is convenient to match these by name for readability. This +/// matcher supports any type exposing a getName() method of this form. +/// +/// It should be used as: +/// +/// HasName("my_function") +/// +/// No namespace or other qualification is required. +MATCHER_P(HasName, Name, "") { + // The matcher's name and argument are printed in the case of failure, but we + // also want to print out the name of the argument. This uses an implicitly + // avaiable std::ostream, so we have to construct a std::string. + *result_listener << "has name '" << arg.getName().str() << "'"; + return Name == arg.getName(); +} + +std::unique_ptr parseIR(LLVMContext &C, const char *IR) { + SMDiagnostic Err; + return parseAssemblyString(IR, Err, C); +} + +class LoopPassManagerTest : public ::testing::Test { +protected: + LLVMContext Context; + std::unique_ptr M; + + LoopAnalysisManager LAM; + FunctionAnalysisManager FAM; + ModuleAnalysisManager MAM; + + MockLoopAnalysisHandle MLAHandle; + MockLoopPassHandle MLPHandle; + MockFunctionPassHandle MFPHandle; + MockModulePassHandle MMPHandle; + + static PreservedAnalyses + getLoopAnalysisResult(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + }; + +public: + LoopPassManagerTest() + : M(parseIR(Context, "define void @f() {\n" + "entry:\n" + " br label %loop.0\n" + "loop.0:\n" + " br i1 undef, label %loop.0.0, label %end\n" + "loop.0.0:\n" + " br i1 undef, label %loop.0.0, label %loop.0.1\n" + "loop.0.1:\n" + " br i1 undef, label %loop.0.1, label %loop.0\n" + "end:\n" + " ret void\n" + "}\n" + "\n" + "define void @g() {\n" + "entry:\n" + " br label %loop.g.0\n" + "loop.g.0:\n" + " br i1 undef, label %loop.g.0, label %end\n" + "end:\n" + " ret void\n" + "}\n")), + LAM(true), FAM(true), MAM(true) { + // Register our mock analysis. + LAM.registerPass([&] { return MLAHandle.getAnalysis(); }); + + // We need DominatorTreeAnalysis for LoopAnalysis. + FAM.registerPass([&] { return DominatorTreeAnalysis(); }); + FAM.registerPass([&] { return LoopAnalysis(); }); + // We also allow loop passes to assume a set of other analyses and so need + // those. + FAM.registerPass([&] { return AAManager(); }); + FAM.registerPass([&] { return AssumptionAnalysis(); }); + FAM.registerPass([&] { return ScalarEvolutionAnalysis(); }); + FAM.registerPass([&] { return TargetLibraryAnalysis(); }); + FAM.registerPass([&] { return TargetIRAnalysis(); }); + + // Cross-register proxies. + LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); }); + FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); }); + FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); }); + MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); }); + } +}; + +TEST_F(LoopPassManagerTest, Basic) { + ModulePassManager MPM(true); + ::testing::InSequence MakeExpectationsSequenced; + + // First we just visit all the loops in all the functions and get their + // analysis results. This will run the analysis a total of four times, + // once for each loop. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); + // Wire the loop pass through pass managers into the module pipeline. + { + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + FunctionPassManager FPM(true); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + } + + // Next we run two passes over the loops. The first one invalidates the + // analyses for one loop, the second ones try to get the analysis results. + // This should force only one analysis to re-run within the loop PM, but will + // also invalidate everything after the loop pass manager finishes. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(DoDefault()) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(InvokeWithoutArgs([] { return PreservedAnalyses::none(); })) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(DoDefault()) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) + .WillOnce(DoDefault()) + .WillOnce(Invoke(getLoopAnalysisResult)); + // Wire two loop pass runs into the module pipeline. + { + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FunctionPassManager FPM(true); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + } + + // And now run the pipeline across the module. + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, FunctionPassInvalidationOfLoopAnalyses) { + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + // We process each function completely in sequence. + ::testing::Sequence FSequence, GSequence; + + // First, force the analysis result to be computed for each loop. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)) + .InSequence(GSequence) + .WillOnce(DoDefault()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // No need to re-run if we require again from a fresh loop pass manager. + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // For 'f', preserve most things but not the specific loop analyses. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .InSequence(FSequence) + .WillOnce(Return(getLoopPassPreservedAnalyses())); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // On one loop, skip the invalidation (as though we did an internal update). + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)) + .InSequence(FSequence) + .WillOnce(Return(false)); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // Now two loops still have to be recomputed. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // Preserve things in the second function to ensure invalidation remains + // isolated to one function. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)) + .InSequence(GSequence) + .WillOnce(DoDefault()); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .InSequence(FSequence) + .WillOnce(DoDefault()); + // For 'g', fail to preserve anything, causing the loops themselves to be + // cleared. We don't get an invalidation event here as the loop is gone, but + // we should still have to recompute the analysis. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)) + .InSequence(GSequence) + .WillOnce(Return(PreservedAnalyses::none())); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)) + .InSequence(GSequence) + .WillOnce(DoDefault()); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + + // Verify with a separate function pass run that we didn't mess up 'f's + // cache. No analysis runs should be necessary here. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, ModulePassInvalidationOfLoopAnalyses) { + ModulePassManager MPM(true); + ::testing::InSequence MakeExpectationsSequenced; + + // First, force the analysis result to be computed for each loop. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Walking all the way out and all the way back in doesn't re-run the + // analysis. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // But a module pass that doesn't preserve the actual mock loop analysis + // invalidates all the way down and forces recomputing. + EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + PA.preserve(); + return PA; + })); + // All the loop analyses from both functions get invalidated before we + // recompute anything. + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)); + // On one loop, again skip the invalidation (as though we did an internal + // update). + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)) + .WillOnce(Return(false)); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.g.0"), _, _)); + // Now all but one of the loops gets re-analyzed. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); + MPM.addPass(MMPHandle.getPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Verify that the cached values persist. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Now we fail to preserve the loop analysis and observe that the loop + // analyses are cleared (so no invalidation event) as the loops themselves + // are no longer valid. + EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); + MPM.addPass(MMPHandle.getPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Verify that the cached values persist. + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + // Next, check that even if we preserve everything within the function itelf, + // if the function's module pass proxy isn't preserved and the potential set + // of functions changes, the clear reaches the loop analyses as well. This + // will again trigger re-runs but not invalidation events. + EXPECT_CALL(MMPHandle, run(_, _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserveSet>(); + PA.preserveSet>(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); + MPM.addPass(MMPHandle.getPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass()))); + + MPM.run(*M, MAM); +} + +// Test that if any of the bundled analyses provided in the LPM's signature +// become invalid, the analysis proxy itself becomes invalid and we clear all +// loop analysis results. +TEST_F(LoopPassManagerTest, InvalidationOfBundledAnalyses) { + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + ::testing::InSequence MakeExpectationsSequenced; + + // First, force the analysis result to be computed for each loop. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // No need to re-run if we require again from a fresh loop pass manager. + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // Preserving everything but the loop analyses themselves results in + // invalidation and running. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .WillOnce(Return(getLoopPassPreservedAnalyses())); + EXPECT_CALL(MLAHandle, invalidate(_, _, _)).Times(3); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // The rest don't invalidate analyses, they only trigger re-runs because we + // clear the cache completely. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + // Not preserving `AAManager`. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + // Not preserving `AssumptionAnalysis`. + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + // Not preserving `DominatorTreeAnalysis`. + PA.preserve(); + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // Not preserving the `LoopAnalysis`. + PA.preserve(); + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // Not preserving the `LoopAnalysisManagerFunctionProxy`. + PA.preserve(); + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = PreservedAnalyses::none(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + PA.preserve(); + // Not preserving `ScalarEvolutionAnalysis`. + return PA; + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor( + RequireAnalysisLoopPass())); + + // After all the churn on 'f', we'll compute the loop analysis results for + // 'g' once with a requires pass and then run our mock pass over g a bunch + // but just get cached results each time. + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); + EXPECT_CALL(MFPHandle, run(HasName("g"), _)).Times(7); + + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, IndirectInvalidation) { + // We need two distinct analysis types and handles. + enum { A, B }; + MockLoopAnalysisHandleTemplate MLAHandleA; + MockLoopAnalysisHandleTemplate MLAHandleB; + LAM.registerPass([&] { return MLAHandleA.getAnalysis(); }); + LAM.registerPass([&] { return MLAHandleB.getAnalysis(); }); + typedef decltype(MLAHandleA)::Analysis AnalysisA; + typedef decltype(MLAHandleB)::Analysis AnalysisB; + + // Set up AnalysisA to depend on our AnalysisB. For testing purposes we just + // need to get the AnalysisB results in AnalysisA's run method and check if + // AnalysisB gets invalidated in AnalysisA's invalidate method. + ON_CALL(MLAHandleA, run(_, _, _)) + .WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR) { + (void)AM.getResult(L, AR); + return MLAHandleA.getResult(); + })); + ON_CALL(MLAHandleA, invalidate(_, _, _)) + .WillByDefault(Invoke([](Loop &L, const PreservedAnalyses &PA, + LoopAnalysisManager::Invalidator &Inv) { + auto PAC = PA.getChecker(); + return !(PAC.preserved() || PAC.preservedSet>()) || + Inv.invalidate(L, PA); + })); + + ::testing::InSequence MakeExpectationsSequenced; + + // Compute the analyses across all of 'f' first. + EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandleA, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandleA, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0"), _, _)); + + // Now we invalidate AnalysisB (but not AnalysisA) for one of the loops and + // preserve everything for the rest. This in turn triggers that one loop to + // recompute both AnalysisB *and* AnalysisA if indirect invalidation is + // working. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + // Specifically preserve AnalysisA so that it would survive if it + // didn't depend on AnalysisB. + PA.preserve(); + return PA; + })); + // It happens that AnalysisB is invalidated first. That shouldn't matter + // though, and we should still call AnalysisA's invalidation. + EXPECT_CALL(MLAHandleB, invalidate(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandleA, invalidate(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke([](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLAHandleA, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandleB, run(HasName("loop.0.0"), _, _)); + // The rest of the loops should run and get cached results. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + + // The run over 'g' should be boring, with us just computing the analyses once + // up front and then running loop passes and getting cached results. + EXPECT_CALL(MLAHandleA, run(HasName("loop.g.0"), _, _)); + EXPECT_CALL(MLAHandleB, run(HasName("loop.g.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.g.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke([](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { + (void)AM.getResult(L, AR); + return PreservedAnalyses::all(); + })); + + // Build the pipeline and run it. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + FPM.addPass( + createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, IndirectOuterPassInvalidation) { + typedef decltype(MLAHandle)::Analysis LoopAnalysis; + + MockFunctionAnalysisHandle MFAHandle; + FAM.registerPass([&] { return MFAHandle.getAnalysis(); }); + typedef decltype(MFAHandle)::Analysis FunctionAnalysis; + + // Set up the loop analysis to depend on both the function and module + // analysis. + ON_CALL(MLAHandle, run(_, _, _)) + .WillByDefault(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR) { + auto &FAMP = AM.getResult(L, AR); + auto &FAM = FAMP.getManager(); + Function &F = *L.getHeader()->getParent(); + if (auto *FA = FAM.getCachedResult(F)) + FAMP.registerOuterAnalysisInvalidation(); + return MLAHandle.getResult(); + })); + + ::testing::InSequence MakeExpectationsSequenced; + + // Compute the analyses across all of 'f' first. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)) + .WillOnce(Invoke([](Function &F, FunctionAnalysisManager &AM) { + // Force the computing of the function analysis so it is available in + // this function. + (void)AM.getResult(F); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + + // Now invalidate the function analysis but preserve the loop analyses. + // This should trigger immediate invalidation of the loop analyses, despite + // the fact that they were preserved. + EXPECT_CALL(MFPHandle, run(HasName("f"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + PA.preserveSet>(); + return PA; + })); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, invalidate(HasName("loop.0"), _, _)); + + // And re-running a requires pass recomputes them. + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + + // When we run over 'g' we don't populate the cache with the function + // analysis. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)) + .WillOnce(Return(PreservedAnalyses::all())); + EXPECT_CALL(MLAHandle, run(HasName("loop.g.0"), _, _)); + + // Which means that no extra invalidation occurs and cached values are used. + EXPECT_CALL(MFPHandle, run(HasName("g"), _)).WillOnce(InvokeWithoutArgs([] { + auto PA = getLoopPassPreservedAnalyses(); + PA.preserveSet>(); + return PA; + })); + + // Build the pipeline and run it. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass( + createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); + FPM.addPass(MFPHandle.getPass()); + FPM.addPass( + createFunctionToLoopPassAdaptor(RequireAnalysisLoopPass())); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, LoopChildInsertion) { + // Super boring module with three loops in a single loop nest. + M = parseIR(Context, "define void @f() {\n" + "entry:\n" + " br label %loop.0\n" + "loop.0:\n" + " br i1 undef, label %loop.0.0, label %end\n" + "loop.0.0:\n" + " br i1 undef, label %loop.0.0, label %loop.0.1\n" + "loop.0.1:\n" + " br i1 undef, label %loop.0.1, label %loop.0.2\n" + "loop.0.2:\n" + " br i1 undef, label %loop.0.2, label %loop.0\n" + "end:\n" + " ret void\n" + "}\n"); + + // Build up variables referring into the IR so we can rewrite it below + // easily. + Function &F = *M->begin(); + ASSERT_THAT(F, HasName("f")); + auto BBI = F.begin(); + BasicBlock &EntryBB = *BBI++; + ASSERT_THAT(EntryBB, HasName("entry")); + BasicBlock &Loop0BB = *BBI++; + ASSERT_THAT(Loop0BB, HasName("loop.0")); + BasicBlock &Loop00BB = *BBI++; + ASSERT_THAT(Loop00BB, HasName("loop.0.0")); + BasicBlock &Loop01BB = *BBI++; + ASSERT_THAT(Loop01BB, HasName("loop.0.1")); + BasicBlock &Loop02BB = *BBI++; + ASSERT_THAT(Loop02BB, HasName("loop.0.2")); + BasicBlock &EndBB = *BBI++; + ASSERT_THAT(EndBB, HasName("end")); + ASSERT_THAT(BBI, F.end()); + + // Build the pass managers and register our pipeline. We build a single loop + // pass pipeline consisting of three mock pass runs over each loop. After + // this we run both domtree and loop verification passes to make sure that + // the IR remained valid during our mutations. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + FPM.addPass(DominatorTreeVerifierPass()); + FPM.addPass(LoopVerifierPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + + // All the visit orders are deterministic, so we use simple fully order + // expectations. + ::testing::InSequence MakeExpectationsSequenced; + + // We run loop passes three times over each of the loops. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + + // When running over the middle loop, the second run inserts two new child + // loops, inserting them and itself into the worklist. + BasicBlock *NewLoop010BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, + LPMUpdater &Updater) { + auto *NewLoop = new Loop(); + L.addChildLoop(NewLoop); + NewLoop010BB = BasicBlock::Create(Context, "loop.0.1.0", &F, &Loop02BB); + BranchInst::Create(&Loop01BB, NewLoop010BB, + UndefValue::get(Type::getInt1Ty(Context)), + NewLoop010BB); + Loop01BB.getTerminator()->replaceUsesOfWith(&Loop01BB, NewLoop010BB); + AR.DT.addNewBlock(NewLoop010BB, &Loop01BB); + NewLoop->addBasicBlockToLoop(NewLoop010BB, AR.LI); + Updater.addChildLoops({NewLoop}); + return PreservedAnalyses::all(); + })); + + // We should immediately drop down to fully visit the new inner loop. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // After visiting the inner loop, we should re-visit the second loop + // reflecting its new loop nest structure. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + + // In the second run over the middle loop after we've visited the new child, + // we add another child to check that we can repeatedly add children, and add + // children to a loop that already has children. + BasicBlock *NewLoop011BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, + LPMUpdater &Updater) { + auto *NewLoop = new Loop(); + L.addChildLoop(NewLoop); + NewLoop011BB = BasicBlock::Create(Context, "loop.0.1.1", &F, &Loop02BB); + BranchInst::Create(&Loop01BB, NewLoop011BB, + UndefValue::get(Type::getInt1Ty(Context)), + NewLoop011BB); + NewLoop010BB->getTerminator()->replaceUsesOfWith(&Loop01BB, + NewLoop011BB); + AR.DT.addNewBlock(NewLoop011BB, NewLoop010BB); + NewLoop->addBasicBlockToLoop(NewLoop011BB, AR.LI); + Updater.addChildLoops({NewLoop}); + return PreservedAnalyses::all(); + })); + + // Again, we should immediately drop down to visit the new, unvisited child + // loop. We don't need to revisit the other child though. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1.1"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // And now we should pop back up to the second loop and do a full pipeline of + // three passes on its current form. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Now that all the expected actions are registered, run the pipeline over + // our module. All of our expectations are verified when the test finishes. + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, LoopPeerInsertion) { + // Super boring module with two loop nests and loop nest with two child + // loops. + M = parseIR(Context, "define void @f() {\n" + "entry:\n" + " br label %loop.0\n" + "loop.0:\n" + " br i1 undef, label %loop.0.0, label %loop.2\n" + "loop.0.0:\n" + " br i1 undef, label %loop.0.0, label %loop.0.2\n" + "loop.0.2:\n" + " br i1 undef, label %loop.0.2, label %loop.0\n" + "loop.2:\n" + " br i1 undef, label %loop.2, label %end\n" + "end:\n" + " ret void\n" + "}\n"); + + // Build up variables referring into the IR so we can rewrite it below + // easily. + Function &F = *M->begin(); + ASSERT_THAT(F, HasName("f")); + auto BBI = F.begin(); + BasicBlock &EntryBB = *BBI++; + ASSERT_THAT(EntryBB, HasName("entry")); + BasicBlock &Loop0BB = *BBI++; + ASSERT_THAT(Loop0BB, HasName("loop.0")); + BasicBlock &Loop00BB = *BBI++; + ASSERT_THAT(Loop00BB, HasName("loop.0.0")); + BasicBlock &Loop02BB = *BBI++; + ASSERT_THAT(Loop02BB, HasName("loop.0.2")); + BasicBlock &Loop2BB = *BBI++; + ASSERT_THAT(Loop2BB, HasName("loop.2")); + BasicBlock &EndBB = *BBI++; + ASSERT_THAT(EndBB, HasName("end")); + ASSERT_THAT(BBI, F.end()); + Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context)); + + // Build the pass managers and register our pipeline. We build a single loop + // pass pipeline consisting of three mock pass runs over each loop. After + // this we run both domtree and loop verification passes to make sure that + // the IR remained valid during our mutations. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + FPM.addPass(DominatorTreeVerifierPass()); + FPM.addPass(LoopVerifierPass()); + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + + // All the visit orders are deterministic, so we use simple fully order + // expectations. + ::testing::InSequence MakeExpectationsSequenced; + + // We run loop passes three times over each of the loops. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + + // On the second run, we insert a sibling loop. + BasicBlock *NewLoop01BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, + LPMUpdater &Updater) { + auto *NewLoop = new Loop(); + L.getParentLoop()->addChildLoop(NewLoop); + NewLoop01BB = BasicBlock::Create(Context, "loop.0.1", &F, &Loop02BB); + BranchInst::Create(&Loop02BB, NewLoop01BB, Undefi1, NewLoop01BB); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, NewLoop01BB); + auto *NewDTNode = AR.DT.addNewBlock(NewLoop01BB, &Loop00BB); + AR.DT.changeImmediateDominator(AR.DT[&Loop02BB], NewDTNode); + NewLoop->addBasicBlockToLoop(NewLoop01BB, AR.LI); + Updater.addSiblingLoops({NewLoop}); + return PreservedAnalyses::all(); + })); + // We finish processing this loop as sibling loops don't perturb the + // postorder walk. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + + // We visit the inserted sibling next. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + // Next, on the third pass run on the last inner loop we add more new + // siblings, more than one, and one with nested child loops. By doing this at + // the end we make sure that edge case works well. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, + LPMUpdater &Updater) { + Loop *NewLoops[] = {new Loop(), new Loop(), new Loop()}; + L.getParentLoop()->addChildLoop(NewLoops[0]); + L.getParentLoop()->addChildLoop(NewLoops[1]); + NewLoops[1]->addChildLoop(NewLoops[2]); + auto *NewLoop03BB = + BasicBlock::Create(Context, "loop.0.3", &F, &Loop2BB); + auto *NewLoop04BB = + BasicBlock::Create(Context, "loop.0.4", &F, &Loop2BB); + auto *NewLoop040BB = + BasicBlock::Create(Context, "loop.0.4.0", &F, &Loop2BB); + Loop02BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB); + BranchInst::Create(NewLoop04BB, NewLoop03BB, Undefi1, NewLoop03BB); + BranchInst::Create(&Loop0BB, NewLoop040BB, Undefi1, NewLoop04BB); + BranchInst::Create(NewLoop04BB, NewLoop040BB, Undefi1, NewLoop040BB); + AR.DT.addNewBlock(NewLoop03BB, &Loop02BB); + AR.DT.addNewBlock(NewLoop04BB, NewLoop03BB); + AR.DT.addNewBlock(NewLoop040BB, NewLoop04BB); + NewLoops[0]->addBasicBlockToLoop(NewLoop03BB, AR.LI); + NewLoops[1]->addBasicBlockToLoop(NewLoop04BB, AR.LI); + NewLoops[2]->addBasicBlockToLoop(NewLoop040BB, AR.LI); + Updater.addSiblingLoops({NewLoops[0], NewLoops[1]}); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Note that we need to visit the inner loop of this added sibling before the + // sibling itself! + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.4.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.4"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.4"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // And only now do we visit the outermost loop of the nest. + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + // On the second pass, we add sibling loops which become new top-level loops. + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, + LPMUpdater &Updater) { + auto *NewLoop = new Loop(); + AR.LI.addTopLevelLoop(NewLoop); + auto *NewLoop1BB = BasicBlock::Create(Context, "loop.1", &F, &Loop2BB); + BranchInst::Create(&Loop2BB, NewLoop1BB, Undefi1, NewLoop1BB); + Loop0BB.getTerminator()->replaceUsesOfWith(&Loop2BB, NewLoop1BB); + auto *NewDTNode = AR.DT.addNewBlock(NewLoop1BB, &Loop0BB); + AR.DT.changeImmediateDominator(AR.DT[&Loop2BB], NewDTNode); + NewLoop->addBasicBlockToLoop(NewLoop1BB, AR.LI); + Updater.addSiblingLoops({NewLoop}); + return PreservedAnalyses::all(); + })); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.1"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.1"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.2"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.2"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Now that all the expected actions are registered, run the pipeline over + // our module. All of our expectations are verified when the test finishes. + MPM.run(*M, MAM); +} + +TEST_F(LoopPassManagerTest, LoopDeletion) { + // Build a module with a single loop nest that contains one outer loop with + // three subloops, and one of those with its own subloop. We will + // incrementally delete all of these to test different deletion scenarios. + M = parseIR(Context, "define void @f() {\n" + "entry:\n" + " br label %loop.0\n" + "loop.0:\n" + " br i1 undef, label %loop.0.0, label %end\n" + "loop.0.0:\n" + " br i1 undef, label %loop.0.0, label %loop.0.1\n" + "loop.0.1:\n" + " br i1 undef, label %loop.0.1, label %loop.0.2\n" + "loop.0.2:\n" + " br i1 undef, label %loop.0.2.0, label %loop.0\n" + "loop.0.2.0:\n" + " br i1 undef, label %loop.0.2.0, label %loop.0.2\n" + "end:\n" + " ret void\n" + "}\n"); + + // Build up variables referring into the IR so we can rewrite it below + // easily. + Function &F = *M->begin(); + ASSERT_THAT(F, HasName("f")); + auto BBI = F.begin(); + BasicBlock &EntryBB = *BBI++; + ASSERT_THAT(EntryBB, HasName("entry")); + BasicBlock &Loop0BB = *BBI++; + ASSERT_THAT(Loop0BB, HasName("loop.0")); + BasicBlock &Loop00BB = *BBI++; + ASSERT_THAT(Loop00BB, HasName("loop.0.0")); + BasicBlock &Loop01BB = *BBI++; + ASSERT_THAT(Loop01BB, HasName("loop.0.1")); + BasicBlock &Loop02BB = *BBI++; + ASSERT_THAT(Loop02BB, HasName("loop.0.2")); + BasicBlock &Loop020BB = *BBI++; + ASSERT_THAT(Loop020BB, HasName("loop.0.2.0")); + BasicBlock &EndBB = *BBI++; + ASSERT_THAT(EndBB, HasName("end")); + ASSERT_THAT(BBI, F.end()); + Constant *Undefi1 = UndefValue::get(Type::getInt1Ty(Context)); + + // Helper to do the actual deletion of a loop. We directly encode this here + // to isolate ourselves from the rest of LLVM and for simplicity. Here we can + // egregiously cheat based on knowledge of the test case. For example, we + // have no PHI nodes and there is always a single i-dom. + auto DeleteLoopBlocks = [](Loop &L, BasicBlock &IDomBB, + LoopStandardAnalysisResults &AR, + LPMUpdater &Updater) { + for (BasicBlock *LoopBB : L.blocks()) { + SmallVector ChildNodes(AR.DT[LoopBB]->begin(), + AR.DT[LoopBB]->end()); + for (DomTreeNode *ChildNode : ChildNodes) + AR.DT.changeImmediateDominator(ChildNode, AR.DT[&IDomBB]); + AR.DT.eraseNode(LoopBB); + LoopBB->dropAllReferences(); + } + SmallVector LoopBBs(L.block_begin(), L.block_end()); + Updater.markLoopAsDeleted(L); + AR.LI.markAsRemoved(&L); + for (BasicBlock *LoopBB : LoopBBs) + LoopBB->eraseFromParent(); + }; + + // Build up the pass managers. + ModulePassManager MPM(true); + FunctionPassManager FPM(true); + // We run several loop pass pipelines across the loop nest, but they all take + // the same form of three mock pass runs in a loop pipeline followed by + // domtree and loop verification. We use a lambda to stamp this out each + // time. + auto AddLoopPipelineAndVerificationPasses = [&] { + LoopPassManager LPM(true); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + LPM.addPass(MLPHandle.getPass()); + FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM))); + FPM.addPass(DominatorTreeVerifierPass()); + FPM.addPass(LoopVerifierPass()); + }; + + // All the visit orders are deterministic so we use simple fully order + // expectations. + ::testing::InSequence MakeExpectationsSequenced; + + // We run the loop pipeline with three passes over each of the loops. When + // running over the middle loop, the second pass in the pipeline deletes it. + // This should prevent the third pass from visiting it but otherwise leave + // the process unimpacted. + AddLoopPipelineAndVerificationPasses(); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.1"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.1"), _, _, _)) + .WillOnce( + Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { + AR.SE.forgetLoop(&L); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop01BB, &Loop02BB); + DeleteLoopBlocks(L, Loop00BB, AR, Updater); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.2"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // Run the loop pipeline again. This time we delete the last loop, which + // contains a nested loop within it, and we reuse its inner loop object to + // insert a new loop into the nest. This makes sure that we don't reuse + // cached analysis results for loop objects when removed just because their + // pointers match, and that we can handle nested loop deletion. + AddLoopPipelineAndVerificationPasses(); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2.0"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + BasicBlock *NewLoop03BB; + EXPECT_CALL(MLPHandle, run(HasName("loop.0.2"), _, _, _)) + .WillOnce( + Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { + // Delete the inner loop first. we also do this manually because we + // want to preserve the loop object and reuse it. + AR.SE.forgetLoop(*L.begin()); + Loop02BB.getTerminator()->replaceUsesOfWith(&Loop020BB, &Loop02BB); + assert(std::next((*L.begin())->block_begin()) == + (*L.begin())->block_end() && + "There should only be one block."); + assert(AR.DT[&Loop020BB]->getNumChildren() == 0 && + "Cannot have children in the domtree!"); + AR.DT.eraseNode(&Loop020BB); + Updater.markLoopAsDeleted(**L.begin()); + AR.LI.removeBlock(&Loop020BB); + auto *OldL = L.removeChildLoop(L.begin()); + Loop020BB.eraseFromParent(); + + auto *ParentL = L.getParentLoop(); + AR.SE.forgetLoop(&L); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop02BB, &Loop0BB); + DeleteLoopBlocks(L, Loop00BB, AR, Updater); + + // Now insert a new sibling loop, reusing a loop pointer. + ParentL->addChildLoop(OldL); + NewLoop03BB = BasicBlock::Create(Context, "loop.0.3", &F, &EndBB); + BranchInst::Create(&Loop0BB, NewLoop03BB, Undefi1, NewLoop03BB); + Loop00BB.getTerminator()->replaceUsesOfWith(&Loop0BB, NewLoop03BB); + AR.DT.addNewBlock(NewLoop03BB, &Loop00BB); + OldL->addBasicBlockToLoop(NewLoop03BB, AR.LI); + Updater.addSiblingLoops({OldL}); + return PreservedAnalyses::all(); + })); + + // To respect our inner-to-outer traversal order, we must visit the + // newly-inserted sibling of the loop we just deleted before we visit the + // outer loop. When we do so, this must compute a fresh analysis result, even + // though our new loop has the same pointer value as the loop we deleted. + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLAHandle, run(HasName("loop.0.3"), _, _)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .Times(2) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .Times(3) + .WillRepeatedly(Invoke(getLoopAnalysisResult)); + + // In the final loop pipeline run we delete every loop, including the last + // loop of the nest. We do this again in the second pass in the pipeline, and + // as a consequence we never make it to three runs on any loop. We also cover + // deleting multiple loops in a single pipeline, deleting the first loop and + // deleting the (last) top level loop. + AddLoopPipelineAndVerificationPasses(); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.0"), _, _, _)) + .WillOnce( + Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { + AR.SE.forgetLoop(&L); + Loop0BB.getTerminator()->replaceUsesOfWith(&Loop00BB, NewLoop03BB); + DeleteLoopBlocks(L, Loop0BB, AR, Updater); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0.3"), _, _, _)) + .WillOnce( + Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { + AR.SE.forgetLoop(&L); + Loop0BB.getTerminator()->replaceUsesOfWith(NewLoop03BB, &Loop0BB); + DeleteLoopBlocks(L, Loop0BB, AR, Updater); + return PreservedAnalyses::all(); + })); + + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce(Invoke(getLoopAnalysisResult)); + EXPECT_CALL(MLPHandle, run(HasName("loop.0"), _, _, _)) + .WillOnce( + Invoke([&](Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &Updater) { + AR.SE.forgetLoop(&L); + EntryBB.getTerminator()->replaceUsesOfWith(&Loop0BB, &EndBB); + DeleteLoopBlocks(L, EntryBB, AR, Updater); + return PreservedAnalyses::all(); + })); + + // Add the function pass pipeline now that it is fully built up and run it + // over the module's one function. + MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + MPM.run(*M, MAM); +} +}