diff --git a/llvm/include/llvm/Analysis/MustExecute.h b/llvm/include/llvm/Analysis/MustExecute.h --- a/llvm/include/llvm/Analysis/MustExecute.h +++ b/llvm/include/llvm/Analysis/MustExecute.h @@ -7,16 +7,25 @@ //===----------------------------------------------------------------------===// /// \file /// Contains a collection of routines for determining if a given instruction is -/// guaranteed to execute if a given point in control flow is reached. The most +/// guaranteed to execute if a given point in control flow is reached. The most /// common example is an instruction within a loop being provably executed if we /// branch to the header of it's containing loop. /// +/// There are two interfaces available to determine if an instruction is +/// executed once a given point in the control flow is reached: +/// 1) A loop-centric one derived from LoopSafetyInfo. +/// 2) A "must be executed context"-based one implemented in the +/// MustBeExecutedContextExplorer. +/// Please refer to the class comments for more information. +/// //===----------------------------------------------------------------------===// #ifndef LLVM_ANALYSIS_MUSTEXECUTE_H #define LLVM_ANALYSIS_MUSTEXECUTE_H #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/SetVector.h" #include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/InstructionPrecedenceTracking.h" #include "llvm/Analysis/LoopInfo.h" @@ -26,8 +35,13 @@ namespace llvm { +namespace { +template using GetterTy = std::function; +} + class Instruction; class DominatorTree; +class PostDominatorTree; class Loop; /// The common interface for the loop safety information. @@ -203,6 +217,486 @@ virtual ~ICFLoopSafetyInfo() {}; }; -} +/// Forward declaration. +struct MustBeExecutedContextExplorer; + +/// Enum that allows us to spell out the direction. +enum class ExplorationDirection { + BACKWARD = 0, + FORWARD = 1, +}; + +/// Must be executed iterators visit stretches of instructions that are +/// guaranteed to be executed together, potentially with other instruction +/// executed in-between. +/// +/// Given the following code, and assuming all statements are single +/// instructions which transfer execution to the successor (see +/// isGuaranteedToTransferExecutionToSuccessor), there are two possible +/// outcomes. If we start the iterator at A, B, or E, we will visit only A, B, +/// and E. If we start at C or D, we will visit all instructions A-E. +/// +/// \code +/// A; +/// B; +/// if (...) { +/// C; +/// D; +/// } +/// E; +/// \endcode +/// +/// +/// Below is the example extneded with instructions F and G. Now we assume F +/// might not transfer execution to it's successor G. As a result we get the +/// following visit sets: +/// +/// Start Instruction | Visit Set +/// A | A, B, E, F +/// B, | A, B, E, F +/// C | A, B, C, D, E, F +/// D | A, B, C, D, E, F +/// E | A, B, E, F +/// F | A, B, E, F +/// G | A, B, E, F, G +/// +/// +/// \code +/// A; +/// B; +/// if (...) { +/// C; +/// D; +/// } +/// E; +/// F; // Might not transfer execution to its successor G. +/// G; +/// \endcode +/// +/// +/// A more complex example involving conditionals, loops, break, and continue +/// is shown below. We again assume all instructions will transmit control to +/// the successor and we assume we can prove the inner loop to be finite. We +/// omit non-trivial branch conditions as the exploration is oblivious to them. +/// Constant branches are assumed to be unconditional in the CFG. The resulting +/// visist sets are shown in the table below. +/// +/// \code +/// A; +/// while (true) { +/// B; +/// if (...) +/// C; +/// if (...) +/// continue; +/// D; +/// if (...) +/// break; +/// do { +/// if (...) +/// continue; +/// E; +/// } while (...); +/// F; +/// } +/// G; +/// \endcode +/// +/// Start Instruction | Visit Set +/// A | A, B +/// B | A, B +/// C | A, B, C +/// D | A, B, D +/// E | A, B, D, E, F +/// F | A, B, D, F +/// G | A, B, D, G +/// +/// +/// Note that the examples show optimal visist sets but not necessarily the ones +/// derived by the explorer depending on the available CFG analyses (see +/// MustBeExecutedContextExplorer). Also note that we, depending on the options, +/// the visit set can contain instructions from other functions. +struct MustBeExecutedIterator { + /// Type declarations that make his class an input iterator. + ///{ + typedef const Instruction *value_type; + typedef std::ptrdiff_t difference_type; + typedef const Instruction **pointer; + typedef const Instruction *&reference; + typedef std::input_iterator_tag iterator_category; + ///} + + using ExplorerTy = MustBeExecutedContextExplorer; + + MustBeExecutedIterator(const MustBeExecutedIterator &Other) + : Visited(Other.Visited), ForwardCallStack(Other.ForwardCallStack), + BackwardCallStack(Other.BackwardCallStack), + DelayStack(Other.DelayStack), + Explorer(Other.Explorer), + CurInst(Other.CurInst), Head(Other.Head), Tail(Other.Tail) {} + + MustBeExecutedIterator(MustBeExecutedIterator &&Other) + : Visited(std::move(Other.Visited)), + ForwardCallStack(std::move(Other.ForwardCallStack)), + BackwardCallStack(std::move(Other.BackwardCallStack)), + DelayStack(std::move(Other.DelayStack)), + Explorer(Other.Explorer), CurInst(Other.CurInst), Head(Other.Head), + Tail(Other.Tail) {} + + MustBeExecutedIterator &operator=(MustBeExecutedIterator &&Other) { + if (this != &Other) { + std::swap(Visited, Other.Visited); + std::swap(ForwardCallStack, Other.ForwardCallStack); + std::swap(BackwardCallStack, Other.BackwardCallStack); + std::swap(DelayStack, Other.DelayStack); + std::swap(CurInst, Other.CurInst); + std::swap(Head, Other.Head); + std::swap(Tail, Other.Tail); + } + return *this; + } + + ~MustBeExecutedIterator() { + } + + /// Pre- and post-increment operators. + ///{ + MustBeExecutedIterator &operator++() { + CurInst = advance(); + return *this; + } + + MustBeExecutedIterator operator++(int) { + MustBeExecutedIterator tmp(*this); + operator++(); + return tmp; + } + ///} + + /// Equality and inequality operators. Note that we ignore the history here. + ///{ + bool operator==(const MustBeExecutedIterator &Other) const { + return CurInst == Other.CurInst && Head == Other.Head && Tail == Other.Tail; + } + + bool operator!=(const MustBeExecutedIterator &Other) const { + return !(*this == Other); + } + ///} + + /// Return the underlying instruction. + const Instruction *&operator*() { return CurInst; } + const Instruction *getCurrentInst() const { return CurInst; } + + /// Return true if \p I was encountered by this iterator already. + bool count(const Instruction *I) const { + return Visited->count({I, ExplorationDirection::FORWARD}) || + Visited->count({I, ExplorationDirection::BACKWARD}); + } + + /// Call stack value map interface + ///{ + // TODO: allow the user to map values encountered in other functions back to + // the function the iterator was exploring initially. + ///} + + /// Configurable print method for debugging purposes. + /// + /// \param OS The stream we print to. + /// \param PrintInst If set, the current instruction is printed. + /// \param PrintInterval If set, the interval containing the current + /// instruction is printed. + void print(raw_ostream &OS, bool PrintInst, bool PrintInterval) const { + if (PrintInst) + OS << "[" << *CurInst << "]"; + + if (PrintInterval) + OS << "[H: " << *Head << "|T: " << *Tail << "]"; + } + +private: + using VisitedSetTy = + DenseSet>; + + /// Private constructors. + MustBeExecutedIterator(ExplorerTy &Explorer, const Instruction *I); + + MustBeExecutedIterator(MustBeExecutedContextExplorer &Explorer, + const Instruction *I, + std::shared_ptr Visited); + + /// Reset the iterator to its initial state pointing at \p I. + void reset(const Instruction *I); + + /// Reset the iterator to point at \p I, keep cached state. + void resetInstruction(const Instruction *I); + + /// Try to advance one of the underlying positions (Head or Tail). + /// + /// \param PoppedCallStack Flag to indicate if we just popped an instruction + /// from the call stack such that we do not enter the + /// same call site again. + /// + /// \return The next instruction in the must be executed context, or nullptr + /// if none was found. + const Instruction *advance(bool PoppedCallStack = false); + + /// A set to track the visited instructions in order to deal with endless + /// loops and recursion. + std::shared_ptr Visited; + + /// Call stack used in forward exploration of the call graph. + SmallVector ForwardCallStack; + + /// Call stack used in backward exploration of the call graph. + SmallVector BackwardCallStack; + + /// Stack used in forward exploration when multiple successors are known to be + /// executed. + SetVector DelayStack; + + /// A reference to the explorer that created this iterator. + ExplorerTy &Explorer; + + /// The instruction we are currently exposing to the user. There is always an + /// instruction that we know is executed with the given program point, + /// initially the program point itself. + const Instruction *CurInst; + + /// Two positions that mark the program points where this iterator will look + /// for the next instruction. Note that the current instruction is either the + /// one pointed to by Head, Tail, or both. + const Instruction *Head, *Tail; + + friend struct MustBeExecutedContextExplorer; +}; + +/// A "must be executed context" for a given program point PP is the set of +/// instructions, before and after PP, that are executed always when PP is +/// reached. The MustBeExecutedContextExplorer is a lazy and caching interface +/// to explore "must be executed contexts" in a module. +/// +/// The explorer exposes "must be executed iterators" that traverse the must be +/// executed context. There is little information sharing between iterators as +/// the expected use case involves few iterators for "far apart" instructions. +/// If that changes, we should consider caching more intermediate results. +struct MustBeExecutedContextExplorer { + + /// Create a CFG only explorer with the given analyses as support. + /// + /// \param ExploreInterBlock Flag to indicate if instructions in blocks + /// other than the parent of PP should be + /// explored. + /// \param ExploreCFGForward Flag to indicate if instructions located after + /// PP in the CFG, e.g., post-dominating PP, + /// should be explored. + /// \param ExploreCFGBackward Flag to indicate if instructions located + /// before PP in the CFG, e.g., dominating PP, + /// should be explored. + /// \param ExploreFlowSensitive Flag to indicate if flow-sensitive reasoning + /// should be performed during exploration. + MustBeExecutedContextExplorer(bool ExploreInterBlock, bool ExploreCFGForward, + bool ExploreCFGBackward, + bool ExploreFlowSensitive, + const DominatorTree *DT = nullptr, + const PostDominatorTree *PDT = nullptr, + const LoopInfo *LI = nullptr) + : MustBeExecutedContextExplorer( + ExploreInterBlock, ExploreCFGForward, ExploreCFGBackward, false, + false, ExploreFlowSensitive, [=](const Function &) { return LI; }, + [=](const Function &) { return DT; }, + [=](const Function &) { return PDT; }) {} + + /// In the description of the parameters we use PP to denote a program point + /// for which the must be executed context is explored. + /// + /// \param ExploreInterBlock Flag to indicate if instructions in blocks + /// other than the parent of PP should be + /// explored. + /// \param ExploreCFGForward Flag to indicate if instructions located after + /// PP in the CFG, e.g., post-dominating PP, + /// should be explored. + /// \param ExploreCFGBackward Flag to indicate if instructions located + /// before PP in the CFG, e.g., dominating PP, + /// should be explored. + /// \param ExploreCGForward Flag to indicate if instructions located in a + /// callee that is reached from PP should be + /// explored. Hence if the call graph (CG) is + /// traverse in forward direction. + /// \param ExploreCGBackward Flag to indicate if instructions located in a + /// caller that leads to PP should be explored. + /// Hence if the call graph (CG) is traverse in + /// backwards direction. + /// \param ExploreFlowSensitive Flag to indicate if flow-sensitive reasoning + /// should be performed during exploration. + /// \param LIGetter Return the LoopInfo analysis for a given function or a + /// nullptr if it is not available. + /// \param DTGetter Return the DominatorTree analysis for a given function or + /// a nullptr if it is not available. + /// \param PDTGetter Return the PostDominatorTree analysis for a given + /// function or a nullptr if it is not available. + MustBeExecutedContextExplorer( + bool ExploreInterBlock, bool ExploreCFGForward, bool ExploreCFGBackward, + bool ExploreCGForward, bool ExploreCGBackward, bool ExploreFlowSensitive, + GetterTy LIGetter = + [](const Function &) { return nullptr; }, + GetterTy DTGetter = + [](const Function &) { return nullptr; }, + GetterTy PDTGetter = + [](const Function &) { return nullptr; }) + : ExploreInterBlock(ExploreInterBlock), + ExploreCFGForward(ExploreCFGForward), + ExploreCFGBackward(ExploreCFGBackward), + ExploreCGForward(ExploreCGForward), + ExploreCGBackward(ExploreCGBackward), + ExploreFlowSensitive(ExploreFlowSensitive), LIGetter(LIGetter), + DTGetter(DTGetter), PDTGetter(PDTGetter), EndIterator(*this, nullptr) { + assert((ExploreInterBlock || (!ExploreCGForward && !ExploreCGBackward)) && + "Cannot explore the call graph if inter-block exploration is not " + "allowed."); + } + + /// Clean up the dynamically allocated iterators. + ~MustBeExecutedContextExplorer() { + DeleteContainerPointers(Iterators); + } + + /// Iterator-based interface. \see MustBeExecutedIterator. + ///{ + using iterator = MustBeExecutedIterator; + using const_iterator = const MustBeExecutedIterator; + + /// Return an iterator to explore the context around \p PP. + iterator& begin(const Instruction *PP) { + auto *&It = InstructionIteratorMap[PP]; + if (!It) { + It = new iterator(*this, PP); + Iterators.push_back(It); + } + return *It; + } + + /// Return an iterator to explore the cached context around \p PP. + const_iterator& begin(const Instruction *PP) const { + return *InstructionIteratorMap.lookup(PP); + } + + /// Return an universal end iterator. + ///{ + iterator& end() { return EndIterator; } + iterator& end(const Instruction *) { return EndIterator; } + + const_iterator& end() const { return EndIterator; } + const_iterator& end(const Instruction *) const { return EndIterator; } + ///} + + /// Return an iterator range to explore the context around \p PP. + llvm::iterator_range range(const Instruction *PP) { + return llvm::make_range(begin(PP), end(PP)); + } + + /// Return an iterator range to explore the cached context around \p PP. + llvm::iterator_range range(const Instruction *PP) const { + return llvm::make_range(begin(PP), end(PP)); + } + ///} + + /// Update interface + ///{ + + /// Insert the instruction \p NewI before the instruction \p PosI in the + /// cached information. + void insertInstructionBefore(const Instruction *NewI, + const Instruction *PosI); + + /// Insert the instruction \p NewI after the instruction \p PosI in the + /// cached information. This is not allowed for the last instruction in a + /// basic block. + void insertInstructionAfter(const Instruction *NewI, + const Instruction *PosI); + + /// Remove instruction \p I from the cached information. + void removeInstruction(const Instruction *I); + + ///} + + /// Return the next instruction that is guaranteed to be executed after \p PP. + /// + /// \param It The iterator that is used to traverse the must be + /// executed context. + /// \param PP The program point for which the next instruction + /// that is guaranteed to execute is determined. + /// \param PoppedCallStack Flag to indicate if we just popped an instruction + /// from the call stack such that we do not enter the + /// same call site again. + const Instruction * + getMustBeExecutedNextInstruction(MustBeExecutedIterator &It, + const Instruction *PP, bool PoppedCallStack); + + /// Return the previous instr. that is guaranteed to be executed before \p PP. + /// + /// \param It The iterator that is used to traverse the must be + /// executed context. + /// \param PP The program point for which the previous instr. + /// that is guaranteed to execute is determined. + /// \param PoppedCallStack Flag to indicate if we just popped an instruction + /// from the call stack such that we do not enter the + /// same call site again. + const Instruction * + getMustBeExecutedPrevInstruction(MustBeExecutedIterator &It, + const Instruction *PP, bool PoppedCallStack); + + /// Find the next join point from \p InitBB in forward direction. + const BasicBlock *findForwardJoinPoint(const BasicBlock *InitBB, + const LoopInfo *LI, + const PostDominatorTree *PDT); + + /// Find the next join point from \p InitBB in backward direction. + const BasicBlock *findBackwardJoinPoint(const BasicBlock *InitBB, + const LoopInfo *LI, + const DominatorTree *DT); + + /// Parameter that limit the performed exploration. See the constructor for + /// their meaning. + ///{ + const bool ExploreInterBlock; + const bool ExploreCFGForward; + const bool ExploreCFGBackward; + const bool ExploreCGForward; + const bool ExploreCGBackward; + const bool ExploreFlowSensitive; + ///} + +private: + /// Getters for common CFG analyses: LoopInfo, DominatorTree, and + /// PostDominatorTree. + ///{ + GetterTy LIGetter; + GetterTy DTGetter; + GetterTy PDTGetter; + ///} + + /// Map to cache isGuaranteedToTransferExecutionToSuccessor results. + DenseMap> BlockTransferMap; + + /// Map to cache containsIrreducibleCFG results. + DenseMap> IrreducibleControlMap; + + /// Map to cache function exit join points. + DenseMap> + FunctionExitJoinPointMap; + + /// Map from instructions to associated must be executed iterators. + DenseMap + InstructionIteratorMap; + + /// Collection of all iterators in flight. + SmallVector Iterators; + + /// A unique end iterator. + MustBeExecutedIterator EndIterator; +}; + +} // namespace llvm #endif diff --git a/llvm/lib/Analysis/MustExecute.cpp b/llvm/lib/Analysis/MustExecute.cpp --- a/llvm/lib/Analysis/MustExecute.cpp +++ b/llvm/lib/Analysis/MustExecute.cpp @@ -7,9 +7,12 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/MustExecute.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/Analysis/CFG.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/Passes.h" +#include "llvm/Analysis/PostDominators.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/AssemblyAnnotationWriter.h" #include "llvm/IR/DataLayout.h" @@ -19,8 +22,11 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Support/raw_ostream.h" + using namespace llvm; +#define DEBUG_TYPE "must-execute" + const DenseMap & LoopSafetyInfo::getBlockColors() const { return BlockColors; @@ -430,3 +436,693 @@ return false; } + +/// Return true if \p L might be an endless loop. +static bool maybeEndlessLoop(const Loop &L) { + if (L.getHeader()->getParent()->hasFnAttribute(Attribute::WillReturn)) + return false; + // TODO: Actually try to prove it is not. + // TODO: If maybeEndlessLoop is going to be expensive, cache it. + return true; +} + +static bool mayContainIrreducibleControl(const Function &F, + const LoopInfo *LI) { + if (!LI) + return true; + + using RPOTraversal = ReversePostOrderTraversal; + RPOTraversal FuncRPOT(&F); + return containsIrreducibleCFG(FuncRPOT, *LI); +} + +static void getGuaranteedExecutedBlocks( + const Instruction &TI, + SmallVectorImpl &GuaranteedExecutedBlocks, + const DominatorTree *DT, const LoopInfo *LI) { + const BasicBlock *TIBlock = TI.getParent(); + bool NoThrow = TIBlock->getParent()->doesNotThrow(); + const Loop *L = LI ? LI->getLoopFor(TIBlock) : nullptr; + + if (const BasicBlock *SuccBB = getSuccessorInFirstIteration(TI, L, DT, LI)) + GuaranteedExecutedBlocks.push_back(SuccBB); + + // TODO: This might be better off in findForwardJoinPoint. + // TODO: Check no-throw to this block in the loop not for the whole function + if (DT && !maybeEndlessLoop(*L) && NoThrow && TI.getNumSuccessors() == 2 && + L->isLoopLatch(TIBlock)) { + // TI is a latch of a finite loop. If it dominates all exiting blocks, + // the non backedge has to be taken eventually. + SmallVector ExitingBlocks; + L->getExitingBlocks(ExitingBlocks); + bool DominatesAllExitingBlocks = + llvm::all_of(ExitingBlocks, [DT, TIBlock](BasicBlock *ExitingBB) { + return DT->dominates(TIBlock, ExitingBB); + }); + if (DominatesAllExitingBlocks) { + if (TI.getSuccessor(1) == L->getHeader()) + GuaranteedExecutedBlocks.push_back(TI.getSuccessor(0)); + else + GuaranteedExecutedBlocks.push_back(TI.getSuccessor(1)); + } + } +} + +/// Return an instruction that is always executed before the function \p F is +/// left, assuming it is left through a return. +static const Instruction *findFunctionExitJoinPoint(const Function *F, + const DominatorTree *DT) { + const BasicBlock *JoinBB = nullptr; + for (const BasicBlock &BB : *F) { + // Skip all but return instructions. + if (!isa(BB.getTerminator())) + continue; + + // The first return instruction found is the initial join point. + if (!JoinBB) { + JoinBB = &BB; + continue; + } + + // When we find more return instructions the nearest common dominator of all + // of them is known to be executed prior to all of them. + if (DT) { + JoinBB = DT->findNearestCommonDominator(JoinBB, &BB); + assert(JoinBB && "Assumed a common dominator!"); + continue; + } + + // If we do no have a dominator tree we still know that *at least* the entry + // block is executed as we assume the function is left through a return. + // TODO: Improve this by using getMustBeExecutedNextInstruction() from this + // point. + return F->getEntryBlock().getTerminator(); + } + + // If we traversed all the blocks and found the nearest common dominator we + // return it. If we did not find a return instruction we return a nullptr but + // we should indicate a problem instead because the function is never left + // through a return. + return JoinBB ? JoinBB->getTerminator() : nullptr; +} + +/// Lookup \p Key in \p Map and return the result, potentially after +/// initializing the optional through \p Fn(\p args). +template +static V getOrCreateCachedOptional(K Key, DenseMap> &Map, + FnTy &&Fn, ArgsTy &&... args) { + Optional &OptVal = Map[Key]; + if (!OptVal.hasValue()) + OptVal = Fn(std::forward(args)...); + return OptVal.getValue(); +} + +const BasicBlock *MustBeExecutedContextExplorer::findForwardJoinPoint( + const BasicBlock *InitBB, const LoopInfo *LI, + const PostDominatorTree *PDT) { + LLVM_DEBUG(dbgs() << "\tFind forward join point for " << InitBB->getName() + << (LI ? " [LI]" : "") << (PDT ? " [PDT]" : "")); + + const Function &F = *InitBB->getParent(); + const Loop *L = LI ? LI->getLoopFor(InitBB) : nullptr; + const BasicBlock *HeaderBB = L ? L->getHeader() : InitBB; + bool WillReturnAndNoThrow = (F.hasFnAttribute(Attribute::WillReturn) || + (L && !maybeEndlessLoop(*L))) && + F.doesNotThrow(); + LLVM_DEBUG(dbgs() << (L ? " [in loop]" : "") + << (WillReturnAndNoThrow ? " [WillReturn] [NoUnwind]" : "") + << "\n"); + + // Determine the adjacent blocks in the given direction but exclude (self) + // loops under certain circumstances. + SmallVector Worklist; + for (const BasicBlock *SuccBB : successors(InitBB)) { + bool IsLatch = SuccBB == HeaderBB; + // Loop latches are ignored in forward propagation if the loops cannot be + // endless and may not throw: control has to go somewhere. + if (!WillReturnAndNoThrow || !IsLatch) + Worklist.push_back(SuccBB); + } + LLVM_DEBUG(dbgs() << "\t\t#Worklist: " << Worklist.size() << "\n"); + + // If there are no other adjacent blocks, there is no join point. + if (Worklist.empty()) + return nullptr; + + // If there is one adjacent block, it is the join point. + if (Worklist.size() == 1) + return Worklist[0]; + + // Try to determine a join block through the help of the post-dominance + // tree. If no tree was provided, we perform simple pattern matching for one + // block conditionals only. + const BasicBlock *JoinBB = nullptr; + if (PDT) + if (const auto *InitNode = PDT->getNode(InitBB)) + if (const auto *IDomNode = InitNode->getIDom()) + JoinBB = IDomNode->getBlock(); + + if (!JoinBB && Worklist.size() == 2) { + const BasicBlock *Succ0 = Worklist[0]; + const BasicBlock *Succ1 = Worklist[1]; + const BasicBlock *Succ0UniqueSucc = Succ0->getUniqueSuccessor(); + const BasicBlock *Succ1UniqueSucc = Succ1->getUniqueSuccessor(); + if (Succ0 == Succ1UniqueSucc) { + // InitBB -> Succ0 = JoinBB + // InitBB -> Succ1 -> Succ0 = JoinBB + JoinBB = Succ0; + } else if (Succ1 == Succ0UniqueSucc) { + // InitBB -> Succ0 -> Succ1 = JoinBB + // InitBB -> Succ1 = JoinBB + JoinBB = Succ1; + } else if (Succ0UniqueSucc == Succ1UniqueSucc) { + // InitBB -> Succ0 -> JoinBB + // InitBB -> Succ1 -> JoinBB + JoinBB = Succ0UniqueSucc; + } + } + + if (!JoinBB && L) + JoinBB = L->getUniqueExitBlock(); + + if (!JoinBB) + return nullptr; + + LLVM_DEBUG(dbgs() << "\t\tJoin block candidate: " << JoinBB->getName() + << "\n"); + + // In forward direction we check if control will for sure reach InitBB from + // JoinBB, thus it can not be "stopped" along the way. Ways to "stop" control + // are: infinite loops and instructions that do not necessarily transfer + // execution to their successor. To check for them we traverse the CFG from + // the adjacent blocks to the JoinBB, looking at all intermediate blocks. + + if (!F.hasFnAttribute(Attribute::WillReturn) || !F.doesNotThrow()) { + + auto BlockTransfersExecutionToSuccessor = [](const BasicBlock *BB) { + return isGuaranteedToTransferExecutionToSuccessor(BB); + }; + + SmallPtrSet Visited; + while (!Worklist.empty()) { + const BasicBlock *ToBB = Worklist.pop_back_val(); + if (ToBB == JoinBB) + continue; + + // Make sure all loops in-between are finite. + if (!Visited.insert(ToBB).second) { + if (!F.hasFnAttribute(Attribute::WillReturn)) { + bool MayContainIrreducibleControl = getOrCreateCachedOptional( + &F, IrreducibleControlMap, mayContainIrreducibleControl, F, LI); + if (MayContainIrreducibleControl) + return nullptr; + + const Loop *L = LI->getLoopFor(ToBB); + if (L && maybeEndlessLoop(*L)) + return nullptr; + } + + continue; + } + + // Make sure the block has no instructions that could stop control + // transfer. + bool TransfersExecution = getOrCreateCachedOptional( + ToBB, BlockTransferMap, BlockTransfersExecutionToSuccessor, ToBB); + if (!TransfersExecution) + return nullptr; + + for (const BasicBlock *AdjacentBB : successors(ToBB)) + Worklist.push_back(AdjacentBB); + } + } + + LLVM_DEBUG(dbgs() << "\tJoin block: " << JoinBB->getName() << "\n"); + return JoinBB; +} + +const BasicBlock *MustBeExecutedContextExplorer::findBackwardJoinPoint( + const BasicBlock *InitBB, const LoopInfo *LI, const DominatorTree *DT) { + LLVM_DEBUG(dbgs() << "\tFind backward join point for " << InitBB->getName() + << (LI ? " [LI]" : "") << (DT ? " [DT]" : "")); + + // Try to determine a join block through the help of the dominance tree. If no + // tree was provided, we perform simple pattern matching for one block + // conditionals only. + if (DT) { + const auto *InitNode = DT->getNode(InitBB); + assert(InitNode && "Expected dominator tree node!"); + const auto *IDomNode = InitNode->getIDom(); + assert(IDomNode && + "Expected dominator tree node to have a dominator node!"); + assert(IDomNode->getBlock() && + "Expected dominator tree node to have a block!"); + return IDomNode->getBlock(); + } + + const Function &F = *InitBB->getParent(); + const Loop *L = LI ? LI->getLoopFor(InitBB) : nullptr; + const BasicBlock *HeaderBB = L ? L->getHeader() : nullptr; + + // Determine the predecessor blocks but ignore backedges. + SmallVector Worklist; + for (const BasicBlock *PredBB : predecessors(InitBB)) { + bool IsBackedge = + (PredBB == InitBB) || (HeaderBB == InitBB && L->contains(PredBB)); + // Loop backedges are ignored in backwards propagation: control has to come + // from somewhere. + if (!IsBackedge) + Worklist.push_back(PredBB); + } + + // If there are no other predecessor blocks, there is no join point. + if (Worklist.empty()) + return nullptr; + + // If there is one predecessor block, it is the join point. + if (Worklist.size() == 1) + return Worklist[0]; + + const BasicBlock *JoinBB = nullptr; + if (Worklist.size() == 2) { + const BasicBlock *Pred0 = Worklist[0]; + const BasicBlock *Pred1 = Worklist[1]; + const BasicBlock *Pred0UniquePred = Pred0->getUniquePredecessor(); + const BasicBlock *Pred1UniquePred = Pred1->getUniquePredecessor(); + if (Pred0 == Pred1UniquePred) { + // InitBB <- Pred0 = JoinBB + // InitBB <- Pred1 <- Pred0 = JoinBB + JoinBB = Pred0; + } else if (Pred1 == Pred0UniquePred) { + // InitBB <- Pred0 <- Pred1 = JoinBB + // InitBB <- Pred1 = JoinBB + JoinBB = Pred1; + } else if (Pred0UniquePred == Pred1UniquePred) { + // InitBB <- Pred0 <- JoinBB + // InitBB <- Pred1 <- JoinBB + JoinBB = Pred0UniquePred; + } + } + + if (!JoinBB && L) + JoinBB = L->getHeader(); + + // In backwards direction there is no need to show termination of previous + // instructions. If they do not terminate, the code afterward is dead, making + // any information/transformation correct anyway. + return JoinBB; +} + +const Instruction * +MustBeExecutedContextExplorer::getMustBeExecutedNextInstruction( + MustBeExecutedIterator &It, const Instruction *PP, bool PoppedCallStack) { + if (!PP) + return PP; + LLVM_DEBUG(dbgs() << "Find next instruction for " << *PP + << (PoppedCallStack ? " [PoppedCallStack]" : "") << "\n"); + + // If we explore only inside a given basic block we stop at terminators. + if (!ExploreInterBlock && PP->isTerminator()) { + LLVM_DEBUG(dbgs() << "\tReached terminator in intra-block mode, done\n"); + return nullptr; + } + + // The function that contains the current position. + const Function *PPFunc = PP->getFunction(); + + // If we explore the call graph (CG) forward and we see a call site we can + // continue with the callee instructions if the callee has an exact + // definition. If this is the case, we add the call site in the forward call + // stack such that we can return to the position after the call and also + // translate values for the user. + if (ExploreCGForward && !PoppedCallStack) { + if (ImmutableCallSite ICS = ImmutableCallSite(PP)) + if (const Function *F = ICS.getCalledFunction()) + if (F->hasExactDefinition()) { + It.ForwardCallStack.push_back({PP}); + return &F->getEntryBlock().front(); + } + } + + // At a return instruction we have two options that allow us to continue: + // 1) We are in a function that we entered earlier, in this case we can + // simply pop the last call site from the forward call stack and return the + // instruction after the call site. (this happens in the advance method!) + // 2) We explore the call graph (CG) backwards trying to find a point that + // is know to be executed every time when the current function is. + if (isa(PP)) { + // The advance method will pop the stack. + if (!It.ForwardCallStack.empty()) { + LLVM_DEBUG(dbgs() << "\tReached return, will pop call stack\n"); + return nullptr; + } + + // Check if backwards call graph exploration is allowed. + if (!ExploreCGBackward) { + LLVM_DEBUG( + dbgs() + << "\tReached return, no backward CG exploration allowed, done\n"); + return nullptr; + } + + // We do not know all the callers for non-internal functions. + if (!PPFunc->hasInternalLinkage()) { + LLVM_DEBUG(dbgs() << "\tReached return, no unique call site (external " + "linkage), done\n"); + return nullptr; + } + + // TODO: We restrict it to a single call site for now but we could allow + // more and find a join point interprocedurally. + if (PPFunc->getNumUses() != 1) { + LLVM_DEBUG( + dbgs() + << "\tReached return, no unique call site (multiple uses), done\n"); + return nullptr; + } + + // Make sure the user is a direct call. + // TODO: Indirect calls can be fine too. + ImmutableCallSite ICS(PPFunc->user_back()); + if (!ICS || ICS.getCalledFunction() != PPFunc) { + LLVM_DEBUG( + dbgs() + << "\tReached return, no unique call site (indirect call), done\n"); + return nullptr; + } + + // We know we reached the return instruction of the callee so we will + // continue at the next instruction after the call. + LLVM_DEBUG(dbgs() << "\tReached return, continue after unique call site\n"); + return ICS.getInstruction()->getNextNode(); + } + + // If we do not traverse the call graph we check if we can make progress in + // the current function. First, check if the instruction is guaranteed to + // transfer execution to the successor. + bool TransfersExecution = isGuaranteedToTransferExecutionToSuccessor(PP); + + // If this is not a terminator we know that there is a single instruction + // after this one that is executed next if control is transfered. If not, + // we can try to go back to a call site we entered earlier. If none exists, we + // do not know any instruction that has to be executd next. + if (!PP->isTerminator()) { + const Instruction *NextPP = + TransfersExecution ? PP->getNextNode() : nullptr; + LLVM_DEBUG(dbgs() << "\tIntermediate instruction " + << (!TransfersExecution ? "does not" : "") + << "transfer control\n"); + return NextPP; + } + + // Finally, we have to handle terminators, trivial ones first. + assert(PP->isTerminator() && "Expected a terminator!"); + + // A terminator without a successor which is not a return is not handled yet. + // We can still try to continue at a known call site on the forward call stack + // of the iterator. + if (PP->getNumSuccessors() == 0) { + LLVM_DEBUG(dbgs() << "\tUnhandled terminator\n"); + return nullptr; + } + + // A terminator with a single successor, we will continue at the beginning of + // that one. + if (PP->getNumSuccessors() == 1) { + LLVM_DEBUG( + dbgs() << "\tUnconditional terminator, continue with successor\n"); + return &PP->getSuccessor(0)->front(); + } + + // Use flow-sensitive reasoning if allowed, e.g., to fold branches in the + // first loop iteration. + const LoopInfo *LI = LIGetter(*PPFunc); + const DominatorTree *DT = DTGetter(*PPFunc); + if (ExploreFlowSensitive) { + SmallVector GuaranteedExecutedBlocks; + getGuaranteedExecutedBlocks(*PP, GuaranteedExecutedBlocks, DT, LI); + LLVM_DEBUG(dbgs() << "\tFound " << GuaranteedExecutedBlocks.size() + << " blocks that are guaranteed executed as well\n"); + for (const BasicBlock *GEBB : GuaranteedExecutedBlocks) { + LLVM_DEBUG(dbgs() << "\t\t- " << GEBB->getName() << "\n"); + It.DelayStack.insert(&GEBB->front()); + } + } + + // Multiple successors mean we need to find the join point where control flow + // converges again. We use the findForwardJoinPoint helper function with + // information about the function and helper analyses, if available. + const PostDominatorTree *PDT = PDTGetter(*PPFunc); + if (const BasicBlock *JoinBB = findForwardJoinPoint(PP->getParent(), LI, PDT)) + return &JoinBB->front(); + + LLVM_DEBUG(dbgs() << "\tNo join point found\n"); + return nullptr; +} + +const Instruction * +MustBeExecutedContextExplorer::getMustBeExecutedPrevInstruction( + MustBeExecutedIterator &It, const Instruction *PP, bool PoppedCallStack) { + if (!PP) + return PP; + + bool IsFirst = !(PP->getPrevNode()); + LLVM_DEBUG(dbgs() << "Find next instruction for " << *PP + << (PoppedCallStack ? " [PoppedCallStack]" : "") + << (IsFirst ? " [IsFirst]" : "") << "\n"); + + // If we explore only inside a given basic block we stop at the first + // instruction. + if (!ExploreInterBlock && IsFirst) { + LLVM_DEBUG(dbgs() << "\tReached block front in intra-block mode, done\n"); + return nullptr; + } + + // The block and function that contains the current position. + const BasicBlock *PPBlock = PP->getParent(); + const Function *PPFunc = PPBlock->getParent(); + + // Ready the dominator tree if available. + const DominatorTree *DT = DTGetter(*PPFunc); + + // If we explore the call graph (CG) forward and the current instruction + // is a call site we can continue with the callee instructions if the callee + // has an exact definition. If this is the case, we add the call site in the + // backward call stack such that we can return to the position of the call and + // also translate values for the user. + if (ExploreCGForward && !PoppedCallStack) { + if (ImmutableCallSite ICS = ImmutableCallSite(PP)) + if (const Function *F = ICS.getCalledFunction()) + if (F->hasExactDefinition()) + if (const Instruction *JoinPP = + getOrCreateCachedOptional(F, FunctionExitJoinPointMap, + findFunctionExitJoinPoint, F, DT)) { + It.BackwardCallStack.push_back({PP}); + return JoinPP; + } + } + + // At a first instruction in a function we have two options that allow us to + // continue: + // 1) We are in a function that we entered earlier, in this case we can + // simply pop the last call site from the backward call stack and + // return the instruction before the call site. (this happens in the + // advance method!) + // 2) We explore the call graph (CG) backwards trying to find a point that + // is know to be executed every time when the current function is. + if (IsFirst && PPBlock == &PPFunc->getEntryBlock()) { + // The advance method will pop the stack. + if (!It.BackwardCallStack.empty()) { + LLVM_DEBUG( + dbgs() << "\tReached function beginning, will pop call stack\n"); + return nullptr; + } + + // Check if backwards call graph exploration is allowed. + if (!ExploreCGBackward) { + LLVM_DEBUG(dbgs() << "\tReached function beginning, no backward CG " + "exploration allowed, done\n"); + return nullptr; + } + + // We do not know all the callers for non-internal functions. + if (!PPFunc->hasInternalLinkage()) { + LLVM_DEBUG( + dbgs() + << "\tReached function beginning, no unique call site (external " + "linkage), done\n"); + return nullptr; + } + + // TODO: We restrict it to a single call site for now but we could allow + // more and find a join point interprocedurally. + if (PPFunc->getNumUses() != 1) { + LLVM_DEBUG(dbgs() << "\tReached function beginning, no unique call site " + "(multiple uses), done\n"); + return nullptr; + } + + // Make sure the user is a direct call. + // TODO: Indirect calls can be fine too. + ImmutableCallSite ICS(PPFunc->user_back()); + if (!ICS || ICS.getCalledFunction() != PPFunc) { + LLVM_DEBUG(dbgs() << "\tReached function beginning, no unique call site " + "(indirect call), done\n"); + return nullptr; + } + + // We know we reached the first instruction of the callee so we must have + // executed the instruction before the call. + LLVM_DEBUG( + dbgs() + << "\tReached function beginning, continue with unique call site\n"); + return ICS.getInstruction(); + } + + // If we are inside a block we know what instruction was executed before, the + // previous one. However, if the previous one is a call site, we can enter the + // callee and visit its instructions as well. + if (!IsFirst) { + const Instruction *PrevPP = PP->getPrevNode(); + LLVM_DEBUG( + dbgs() << "\tIntermediate instruction, continue with previous\n"); + // We did not enter a callee so we simply return the previous instruction. + return PrevPP; + } + + // Finally, we have to handle the case where the program point is the first in + // a block but not in the function. We use the findBackwardJoinPoint helper + // function with information about the function and helper analyses, if + // available. + const LoopInfo *LI = LIGetter(*PPFunc); + if (const BasicBlock *JoinBB = findBackwardJoinPoint(PPBlock, LI, DT)) + return &JoinBB->back(); + + LLVM_DEBUG(dbgs() << "\tNo join point found\n"); + return nullptr; +} + +MustBeExecutedIterator::MustBeExecutedIterator( + MustBeExecutedContextExplorer &Explorer, const Instruction *I) + : Visited(new VisitedSetTy()), Explorer(Explorer), CurInst(I), + Head(nullptr), Tail(nullptr) { + reset(I); +} + +void MustBeExecutedIterator::reset(const Instruction *I) { + Visited->clear(); + DelayStack.clear(); + ForwardCallStack.clear(); + BackwardCallStack.clear(); + resetInstruction(I); +} + +void MustBeExecutedIterator::resetInstruction(const Instruction *I) { + CurInst = I; + Head = Tail = nullptr; + Visited->insert({I, ExplorationDirection::FORWARD}); + Visited->insert({I, ExplorationDirection::BACKWARD}); + if (Explorer.ExploreCFGForward) + Head = I; + if (Explorer.ExploreCFGBackward) + Tail = I; +} + +const Instruction *MustBeExecutedIterator::advance(bool PoppedCallStack) { + assert(CurInst && "Cannot advance an end iterator!"); + Head = + Explorer.getMustBeExecutedNextInstruction(*this, Head, PoppedCallStack); + if (Head && Visited->insert({Head, ExplorationDirection::FORWARD}).second) + return Head; + + if (!ForwardCallStack.empty()) { + Head = ForwardCallStack.pop_back_val(); + return advance(true); + } + Head = nullptr; + + Tail = + Explorer.getMustBeExecutedPrevInstruction(*this, Tail, PoppedCallStack); + if (Tail && Visited->insert({Tail, ExplorationDirection::BACKWARD}).second) + return Tail; + + if (!BackwardCallStack.empty()) { + Tail = BackwardCallStack.pop_back_val(); + return advance(true); + } + + Tail = nullptr; + + if (!DelayStack.empty()) { + const Instruction *DelayedI = DelayStack.pop_back_val(); + LLVM_DEBUG(dbgs() << "Poped new program point from delay stack: " + << *DelayedI << "\n"); + resetInstruction(DelayedI); + assert(CurInst && "Expected valid instruction after reset!"); + return CurInst; + } + + return nullptr; +} + +void MustBeExecutedContextExplorer::insertInstructionBefore( + const Instruction *NewI, const Instruction *PosI) { + bool NewTransfersEx = isGuaranteedToTransferExecutionToSuccessor(NewI); + const Instruction *PosIPrev = PosI->getPrevNode(); + for (auto &MapIt : InstructionIteratorMap) { + iterator &It = *MapIt.getSecond(); + if (It.Visited->count({PosIPrev, ExplorationDirection::FORWARD}) && + It.Visited->count({PosI, ExplorationDirection::FORWARD})) { + if (!NewTransfersEx) { + It.reset(MapIt.getFirst()); + continue; + } + It.Visited->insert({NewI, ExplorationDirection::FORWARD}); + } + if (It.Tail != PosI && + It.Visited->count({PosI, ExplorationDirection::BACKWARD})) { + It.Visited->insert({NewI, ExplorationDirection::BACKWARD}); + if (!It.Tail) + It.Tail = NewI; + } + } +} + +void MustBeExecutedContextExplorer::insertInstructionAfter( + const Instruction *NewI, const Instruction *PosI) { + bool NewTransfersEx = isGuaranteedToTransferExecutionToSuccessor(NewI); + bool PosTransfersEx = isGuaranteedToTransferExecutionToSuccessor(PosI); + const Instruction *PosINext = PosI->getNextNode(); + for (auto &MapIt : InstructionIteratorMap) { + iterator &It = *MapIt.getSecond(); + if (PosTransfersEx) { + if (It.Head != PosI && + It.Visited->count({PosI, ExplorationDirection::FORWARD})) { + if (!NewTransfersEx) { + It.reset(MapIt.getFirst()); + continue; + } + It.Visited->insert({NewI, ExplorationDirection::FORWARD}); + } + } + if (It.Visited->count({PosI, ExplorationDirection::BACKWARD})) { + if (!PosINext) { + It.reset(MapIt.getFirst()); + continue; + } + if (It.Visited->count({PosINext, ExplorationDirection::BACKWARD})) + It.Visited->insert({NewI, ExplorationDirection::BACKWARD}); + } + } +} + +void MustBeExecutedContextExplorer::removeInstruction(const Instruction *I) { + InstructionIteratorMap.erase(I); + BlockTransferMap.erase(I->getParent()); + FunctionExitJoinPointMap.erase(I->getFunction()); + for (auto &It : InstructionIteratorMap) { + It.getSecond()->Visited->erase({I, ExplorationDirection::FORWARD}); + It.getSecond()->Visited->erase({I, ExplorationDirection::BACKWARD}); + } +}