Index: llvm/include/llvm/InitializePasses.h =================================================================== --- llvm/include/llvm/InitializePasses.h +++ llvm/include/llvm/InitializePasses.h @@ -133,6 +133,7 @@ void initializeDataFlowSanitizerPass(PassRegistry&); void initializeScalarizerPass(PassRegistry&); void initializeEarlyCSELegacyPassPass(PassRegistry &); +void initializeGVNHoistLegacyPassPass(PassRegistry &); void initializeEliminateAvailableExternallyPass(PassRegistry&); void initializeExpandISelPseudosPass(PassRegistry&); void initializeForceFunctionAttrsLegacyPassPass(PassRegistry&); Index: llvm/include/llvm/LinkAllPasses.h =================================================================== --- llvm/include/llvm/LinkAllPasses.h +++ llvm/include/llvm/LinkAllPasses.h @@ -156,6 +156,7 @@ (void) llvm::createConstantHoistingPass(); (void) llvm::createCodeGenPreparePass(); (void) llvm::createEarlyCSEPass(); + (void) llvm::createGVNHoistPass(); (void) llvm::createMergedLoadStoreMotionPass(); (void) llvm::createGVNPass(); (void) llvm::createMemCpyOptPass(); Index: llvm/include/llvm/Transforms/Scalar.h =================================================================== --- llvm/include/llvm/Transforms/Scalar.h +++ llvm/include/llvm/Transforms/Scalar.h @@ -328,6 +328,13 @@ //===----------------------------------------------------------------------===// // +// GVNHoist - This pass performs a simple and fast GVN pass over the dominator +// tree to hoist common expressions from sibling branches. +// +FunctionPass *createGVNHoistPass(); + +//===----------------------------------------------------------------------===// +// // MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads // are hoisted into the header, while stores sink into the footer. // Index: llvm/include/llvm/Transforms/Scalar/GVN.h =================================================================== --- llvm/include/llvm/Transforms/Scalar/GVN.h +++ llvm/include/llvm/Transforms/Scalar/GVN.h @@ -58,11 +58,7 @@ AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } MemoryDependenceResults &getMemDep() const { return *MD; } -private: - friend class gvn::GVNLegacyPass; - struct Expression; - friend struct DenseMapInfo; /// This class holds the mapping between values and value numbers. It is used /// as an efficient mechanism to determine the expression-wise equivalence of @@ -105,6 +101,10 @@ void verifyRemoved(const Value *) const; }; +private: + friend class gvn::GVNLegacyPass; + friend struct DenseMapInfo; + MemoryDependenceResults *MD; DominatorTree *DT; const TargetLibraryInfo *TLI; @@ -229,6 +229,13 @@ /// loads are eliminated by the pass. FunctionPass *createGVNPass(bool NoLoads = false); +/// \brief A simple and fast domtree-based GVN pass to hoist common expressions +/// from sibling branches. +struct GVNHoistPass : PassInfoMixin { + /// \brief Run the pass over the function. + PreservedAnalyses run(Function &F, AnalysisManager &AM); +}; + } #endif Index: llvm/lib/Passes/PassRegistry.def =================================================================== --- llvm/lib/Passes/PassRegistry.def +++ llvm/lib/Passes/PassRegistry.def @@ -95,6 +95,7 @@ FUNCTION_PASS("aa-eval", AAEvaluator()) FUNCTION_PASS("adce", ADCEPass()) FUNCTION_PASS("early-cse", EarlyCSEPass()) +FUNCTION_PASS("gvn-hoist", GVNHoistPass()) FUNCTION_PASS("instcombine", InstCombinePass()) FUNCTION_PASS("invalidate", InvalidateAllAnalysesPass()) FUNCTION_PASS("no-op-function", NoOpFunctionPass()) Index: llvm/lib/Transforms/IPO/PassManagerBuilder.cpp =================================================================== --- llvm/lib/Transforms/IPO/PassManagerBuilder.cpp +++ llvm/lib/Transforms/IPO/PassManagerBuilder.cpp @@ -210,6 +210,7 @@ else FPM.add(createScalarReplAggregatesPass()); FPM.add(createEarlyCSEPass()); + FPM.add(createGVNHoistPass()); FPM.add(createLowerExpectIntrinsicPass()); } Index: llvm/lib/Transforms/Scalar/CMakeLists.txt =================================================================== --- llvm/lib/Transforms/Scalar/CMakeLists.txt +++ llvm/lib/Transforms/Scalar/CMakeLists.txt @@ -11,6 +11,7 @@ FlattenCFGPass.cpp Float2Int.cpp GVN.cpp + GVNHoist.cpp InductiveRangeCheckElimination.cpp IndVarSimplify.cpp JumpThreading.cpp Index: llvm/lib/Transforms/Scalar/GVNHoist.cpp =================================================================== --- /dev/null +++ llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -0,0 +1,623 @@ +//===- GVNHoist.cpp - Hoist scalar and load expressions -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass hoists expressions from branches to a common dominator. This pass +// uses GVN (global value numbering) to discover expressions computing the same +// values. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/GVN.h" +#include "llvm/Transforms/Utils/MemorySSA.h" +#include +#include +#include + +using namespace llvm; + +#define DEBUG_TYPE "gvn-hoist" + +STATISTIC(NumHoisted, "Number of hoisted instructions"); +STATISTIC(NumRemoved, "Number of instructions removed"); + +static cl::opt + MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1), + cl::desc("Max number of instructions to hoist " + "(default unlimited = -1)")); +static int HoistedCtr = 0; + +namespace { + +struct SortByDFSIn { +private: + DenseMap &DFSNumber; + +public: + SortByDFSIn(DenseMap &D) : DFSNumber(D) {} + + bool operator()(const Instruction *A, const Instruction *B) const { + assert(A != B); + const BasicBlock *BA = A->getParent(); + const BasicBlock *BB = B->getParent(); + unsigned NA = DFSNumber[BA]; + unsigned NB = DFSNumber[BB]; + if (NA < NB) + return true; + if (NA == NB) { + // Sort them in the order they occur in the same basic block. + assert(BA == BB); // FIXME: Remove after testing. + BasicBlock::const_iterator AI(A), BI(B); + return std::distance(AI, BI) < 0; + } + return false; + } +}; + +enum SideEffectKind { Unknown, HasEH, MayNotTerminate }; + +inline SideEffectKind operator|(SideEffectKind a, SideEffectKind b) { + return (SideEffectKind)((int)a | (int)b); +} + +inline SideEffectKind &operator|=(SideEffectKind &a, SideEffectKind b) { + return (SideEffectKind &)((int &)a |= (int)b); +} + +typedef DenseMap BBDetailsSet; + +// This pass hoists common computations across branches sharing +// common immediate dominator. The primary goal is to reduce the code size, +// and in some cases reduce critical path (by exposing more ILP). +class GVNHoistLegacyPassImpl { +public: + GVN::ValueTable VN; + DominatorTree *DT; + AliasAnalysis *AA; + MemoryDependenceResults *MD; + DenseMap DFSNumber; + BBDetailsSet BBDetails; + MemorySSA *MSSA; + MemorySSAWalker *MSSAW; + + GVNHoistLegacyPassImpl(DominatorTree *Dt, AliasAnalysis *Aa, + MemoryDependenceResults *Md) + : DT(Dt), AA(Aa), MD(Md) {} + + bool mayNotTerminate(BasicBlock *BB) { + auto B = BBDetails.find(BB); + if (B != BBDetails.end()) + return B->second & SideEffectKind::MayNotTerminate; + + if (BB->getTerminator()->mayThrow() || !BB->getTerminator()->mayReturn()) { + BBDetails[BB] |= SideEffectKind::MayNotTerminate; + return true; + } + return false; + } + + // Return true when there are exception handling in BB. + bool hasEH(BasicBlock *BB) { + auto B = BBDetails.find(BB); + if (B != BBDetails.end()) + return B->second & SideEffectKind::HasEH; + + if (BB->isEHPad() || BB->hasAddressTaken()) { + BBDetails[BB] |= SideEffectKind::HasEH; + return true; + } + return false; + } + + // Return true when there are exception handling blocks on the execution path. + bool hasEH(SmallPtrSetImpl &Paths) { + for (BasicBlock *BB : Paths) + if (hasEH(BB) || mayNotTerminate(BB)) + return true; + return false; + } + + // Return true when all paths from A to the end of the function pass through + // either B or C. + bool hoistingFromAllPaths(BasicBlock *A, BasicBlock *B, BasicBlock *C) { + // We fully copy the WL in order to be able to remove items from it. + SmallPtrSet WL; + WL.insert(B); + WL.insert(C); + + for (auto It = df_begin(A), E = df_end(A); It != E;) { + // There exists a path from A to the exit of the function if we are still + // iterating in DF traversal and we removed all instructions from the work + // list. + if (WL.empty()) + return false; + + BasicBlock *BB = *It; + if (WL.erase(BB)) { + // Stop DFS traversal when BB is in the work list. + It.skipChildren(); + continue; + } + + // Check for end of function, calls that do not return, etc. + if (!isGuaranteedToTransferExecutionToSuccessor(BB->getTerminator())) + return false; + + // Increment DFS traversal when not skipping children. + ++It; + } + + return true; + } + + // A multimap from a VN (value number) to all the instructions with that VN. + typedef std::multimap VNtoInsns; + // Each element of a hoisting list contains the basic block where to hoist and + // a list of instructions to be hoisted. + typedef SmallVector>, 4> + HoistingPointList; + + // Initialize Paths with all the basic blocks executed in between A and B. + void gatherAllBlocks(SmallPtrSetImpl &Paths, BasicBlock *A, + BasicBlock *B) { + assert(DT->dominates(A, B) && "Invalid path"); + + // We may need to keep B in the Paths set if we have already added it + // to Paths for another expression. + bool Keep = Paths.count(B); + + // Record in Paths all basic blocks reachable in depth-first iteration on + // the inverse CFG from B to A. These blocks are all the blocks that may be + // executed between the execution of A and B. Hoisting an expression from B + // into A has to be safe on all execution paths. + for (auto I = idf_ext_begin(B, Paths), E = idf_ext_end(B, Paths); I != E;) { + if (*I == A) + // Stop traversal when reaching A. + I.skipChildren(); + else + ++I; + } + + // Safety check for B will be handled separately. + if (!Keep) + Paths.erase(B); + + // Safety check for A will be handled separately. + Paths.erase(A); + } + + // Return true when there are users of A in one of the BBs of Paths. + bool hasMemoryUseOnPaths(MemoryAccess *A, + SmallPtrSetImpl &Paths) { + Value::user_iterator UI = A->user_begin(); + Value::user_iterator UE = A->user_end(); + BasicBlock *BBA = A->getBlock(); + for (; UI != UE; ++UI) + if (MemoryAccess *UM = dyn_cast(*UI)) + for (BasicBlock *PBB : Paths) { + if (PBB == BBA) { + if (MSSA->locallyDominates(UM, A)) + return true; + continue; + } + if (PBB == UM->getBlock()) + return true; + } + return false; + } + + // Return true when it is safe to hoist an instruction Insn to NewInsertBB and + // move the insertion point from InsertBB to NewInsertBB. + bool safeToHoist(BasicBlock *NewInsertBB, BasicBlock *InsertBB, + Instruction *Insn, Instruction *First, bool IsScalar, + bool IsLoad) { + if (hasEH(InsertBB)) + return false; + + BasicBlock *BBI = Insn->getParent(); + // When InsertBB already contains an instruction to be hoisted, the + // expression is needed on all paths. + + // Check that the hoisted expression is needed on all paths: it is unsafe + // to hoist loads to a place where there may be a path not loading from + // the same address: for instance there may be a branch on which the + // address of the load may not be initialized. FIXME: at -Oz we may want + // to hoist scalars to a place where they are partially needed. + if (BBI != NewInsertBB && !hoistingFromAllPaths(NewInsertBB, InsertBB, BBI)) + return false; + + // Check for unsafe hoistings due to side effects. + SmallPtrSet Paths; + gatherAllBlocks(Paths, NewInsertBB, InsertBB); + gatherAllBlocks(Paths, NewInsertBB, BBI); + + if (hasEH(Paths)) + return false; + + if (IsScalar) + // Safe to hoist scalars. + return true; + + // For loads and stores, we check for dependences on the Memory SSA. + MemoryAccess *A = + cast(MSSA->getMemoryAccess(Insn))->getDefiningAccess(); + BasicBlock *BBA = A->getBlock(); + + if (DT->properlyDominates(NewInsertBB, BBA)) + // Cannot move Insn past BBA to NewInsertBB. + return false; + + MemoryAccess *B = + cast(MSSA->getMemoryAccess(First))->getDefiningAccess(); + BasicBlock *BBB = B->getBlock(); + + if (DT->properlyDominates(NewInsertBB, BBB)) + // Cannot move First past BBB to NewInsertBB. + return false; + + if (!IsLoad) { + // Check that we do not move a store past loads. + if (DT->dominates(BBA, NewInsertBB)) + if (hasMemoryUseOnPaths(A, Paths)) + return false; + + if (DT->dominates(BBB, NewInsertBB)) + if (hasMemoryUseOnPaths(B, Paths)) + return false; + } + + if (DT->properlyDominates(BBA, NewInsertBB) && + DT->properlyDominates(BBB, NewInsertBB)) + return true; + + BasicBlock *BBF = First->getParent(); + if (BBI == BBF) + return false; + + assert(BBA == NewInsertBB || BBB == NewInsertBB); + + if (BBI != NewInsertBB && BBF != NewInsertBB) + return true; + + if (BBI == NewInsertBB) { + if (DT->properlyDominates(BBB, NewInsertBB)) + return true; + assert(BBI == BBB); + if (MSSA->locallyDominates(MSSA->getMemoryAccess(Insn), B)) + return false; + return true; + } + + if (BBF == NewInsertBB) { + if (DT->properlyDominates(BBA, NewInsertBB)) + return true; + assert(BBF == BBA); + if (MSSA->locallyDominates(MSSA->getMemoryAccess(First), A)) + return false; + return true; + } + + // No side effects: it is safe to hoist. + return true; + } + + // Initialize HPL from Map. + void computeInsertionPoints(VNtoInsns &Map, HoistingPointList &HPL, + bool IsScalar, bool IsLoad) { + SortByDFSIn Pred(DFSNumber); + + for (auto It = Map.begin(); It != Map.end(); + It = Map.upper_bound(It->first)) { + if (MaxHoistedThreshold != -1 && ++HoistedCtr > MaxHoistedThreshold) + return; + + unsigned V = It->first; + if (Map.count(V) < 2) + continue; + + // Compute the insertion point and the list of expressions to be hoisted. + auto R = Map.equal_range(V); + auto First = R.first; + auto Last = R.second; + SmallVector InstructionsToHoist; + for (; First != Last; ++First) { + Instruction *I = First->second; + + BasicBlock *BB = I->getParent(); + if (hasEH(BB)) + continue; + + InstructionsToHoist.push_back(I); + } + + if (InstructionsToHoist.empty()) + continue; + + // No need to sort for three instructions. + if (InstructionsToHoist.size() > 2) + std::sort(InstructionsToHoist.begin(), InstructionsToHoist.end(), Pred); + + // Create a work list of all the BB of the Insns to be hoisted. + SmallPtrSet WL; + SmallVectorImpl::iterator II = InstructionsToHoist.begin(); + SmallVectorImpl::iterator Start = II; + BasicBlock *InsertBB = (*II)->getParent(); + WL.insert((*II)->getParent()); + ++II; + for (; II != InstructionsToHoist.end(); ++II) { + Instruction *Insn = *II; + BasicBlock *BB = Insn->getParent(); + BasicBlock *NewInsertBB = DT->findNearestCommonDominator(InsertBB, BB); + WL.insert(BB); + if (safeToHoist(NewInsertBB, InsertBB, Insn, *Start, IsScalar, + IsLoad)) { + // Extend InsertBB to NewInsertBB. + InsertBB = NewInsertBB; + continue; + } + + // Not safe to hoist: save the previous work list and start over from + // BB. + if (std::distance(Start, II) > 1) + HPL.push_back(std::make_pair( + InsertBB, SmallVector(Start, II))); + else + WL.clear(); + + // We start over to compute InsertBB from BB. + Start = II; + InsertBB = BB; + } + + // Save the last partition. + if (std::distance(Start, II) > 1) + HPL.push_back( + std::make_pair(InsertBB, SmallVector(Start, II))); + } + } + + // Return true when all operands of Instr are available at insertion point + // InsertBB. When limiting the number of hoisted expressions, one could hoist + // a load without hoisting its access function. So before hoisting any + // expression, make sure that all its operands are available at insert point. + bool allOperandsAvailable(Instruction *I, BasicBlock *InsertBB) { + for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { + Value *Op = I->getOperand(i); + Instruction *Inst = dyn_cast(Op); + if (!Inst) + continue; + + if (!DT->dominates(Inst->getParent(), InsertBB)) + return false; + } + + return true; + } + + bool hoist(HoistingPointList &HPL) { + bool Res = false; + for (const auto &HP : HPL) { + // Find out whether we already have one of the instructions in InsertBB, + // in which case we do not have to move it. + BasicBlock *InsertBB = HP.first; + const SmallVector &InstructionsToHoist = HP.second; + Instruction *Repl = nullptr; + for (auto &I : InstructionsToHoist) + if (I->getParent() == InsertBB) { + if (!Repl) { + Repl = I; + } else { + // There are two instructions in InsertBB to be hoisted in place: + // update Repl to be the first one, such that we can rename the uses + // of the second based on the first. + for (Instruction &I1 : *InsertBB) + if (&I1 == Repl) { + // Repl was already the first one. + break; + } else if (&I1 == I) { + Repl = I; + break; + } + } + } + + if (Repl) { + // Repl is already in InsertBB: it remains in place. + assert(allOperandsAvailable(Repl, InsertBB) && + "instruction depends on operands that are not available"); + } else { + // When we do not find Repl in InsertBB, select the first in the list + // and move it to InsertBB. + Repl = InstructionsToHoist.front(); + + // We can move Repl in InsertBB only when all operands are available. + // The order in which hoistings are done may influence the availability + // of operands: for example, a load may not be hoisted until the gep + // computing the address of the load is hoisted. + if (!allOperandsAvailable(Repl, InsertBB)) + continue; + + Repl->moveBefore(InsertBB->getTerminator()); + } + + Res = true; + NumHoisted++; + + // Remove and rename all other instructions. + for (Instruction *I : InstructionsToHoist) + if (I != Repl) { + NumRemoved++; + I->replaceAllUsesWith(Repl); + I->eraseFromParent(); + } + } + + return Res; + } + + // Hoist all expressions. + bool hoistExpressions(Function &F) { + // Record all scalar expressions with the same VN in VNtoScalars, and all + // loads with the same VN in VNtoLoads. + VNtoInsns VNtoScalars; + VNtoInsns VNtoLoads; + VNtoInsns VNtoStores; + VNtoInsns VNtoCallsScalars; + VNtoInsns VNtoCallsLoads; + VNtoInsns VNtoCallsStores; + for (BasicBlock *BB : depth_first(&F.getEntryBlock())) { + for (Instruction &I1 : *BB) { + if (LoadInst *Load = dyn_cast(&I1)) { + if (Load->isSimple()) { + Value *Ptr = Load->getPointerOperand(); + unsigned V = VN.lookup_or_add(Ptr); + VNtoLoads.insert(std::make_pair(V, Load)); + } + continue; + } + + if (StoreInst *Store = dyn_cast(&I1)) { + if (Store->isSimple()) { + // Hash the store address and the stored value. + std::string VNS; + Value *Ptr = Store->getPointerOperand(); + VNS += std::to_string(VN.lookup_or_add(Ptr)); + VNS += ","; + Value *Val = Store->getValueOperand(); + VNS += std::to_string(VN.lookup_or_add(Val)); + VNtoStores.insert( + std::make_pair(std::hash()(VNS), Store)); + } + continue; + } + + if (CallInst *Call = dyn_cast(&I1)) { + if (Call->doesNotReturn() || !Call->doesNotThrow()) + continue; + + // A call that doesNotAccessMemory is handled as a Scalar, + // onlyReadsMemory will be handled as a Load instruction, + // all other calls will be handled as stores. + unsigned V = VN.lookup_or_add(&I1); + + if (Call->doesNotAccessMemory()) + VNtoCallsScalars.insert(std::make_pair(V, &I1)); + else if (Call->onlyReadsMemory()) + VNtoCallsLoads.insert(std::make_pair(V, &I1)); + else + VNtoCallsStores.insert(std::make_pair(V, &I1)); + + continue; + } + + // Scalar instruction. + unsigned V = VN.lookup_or_add(&I1); + VNtoScalars.insert(std::make_pair(V, &I1)); + } + } + + unsigned I = 0; + for (BasicBlock *BB : depth_first(&F.getEntryBlock())) + DFSNumber.insert(std::make_pair(BB, ++I)); + + HoistingPointList HPL; + computeInsertionPoints(VNtoScalars, HPL, true, false); + computeInsertionPoints(VNtoLoads, HPL, false, true); + computeInsertionPoints(VNtoStores, HPL, false, false); + computeInsertionPoints(VNtoCallsScalars, HPL, true, false); + computeInsertionPoints(VNtoCallsLoads, HPL, false, true); + computeInsertionPoints(VNtoCallsStores, HPL, false, false); + return hoist(HPL); + } + + bool run(Function &F) { + VN.setDomTree(DT); + VN.setAliasAnalysis(AA); + VN.setMemDep(MD); + + bool Res = false; + // FIXME: use lazy evaluation of VN to avoid the fix-point computation. + while (1) { + // To address a limitation of the current GVN, we need to rerun the + // hoisting after we hoisted loads in order to be able to hoist all + // scalars dependent on the hoisted loads. Same for stores. + VN.clear(); + + // Memory SSA is not updated by our code generator: recompute it. + MemorySSA M(F); + MSSA = &M; + MSSAW = MSSA->buildMemorySSA(AA, DT); + + if (!hoistExpressions(F)) + return Res; + Res = true; + delete MSSAW; + } + + return Res; + } +}; + +class GVNHoistLegacyPass : public FunctionPass { +public: + static char ID; + + GVNHoistLegacyPass() : FunctionPass(ID) { + initializeGVNHoistLegacyPassPass(*PassRegistry::getPassRegistry()); + } + + bool runOnFunction(Function &F) override { + if (skipOptnoneFunction(F)) + return false; + + auto &DT = getAnalysis().getDomTree(); + auto &AA = getAnalysis().getAAResults(); + auto &MD = getAnalysis().getMemDep(); + + GVNHoistLegacyPassImpl G(&DT, &AA, &MD); + return G.run(F); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.addPreserved(); + } +}; +} // namespace + +PreservedAnalyses GVNHoistPass::run(Function &F, + AnalysisManager &AM) { + DominatorTree &DT = AM.getResult(F); + AliasAnalysis &AA = AM.getResult(F); + MemoryDependenceResults &MD = AM.getResult(F); + + GVNHoistLegacyPassImpl G(&DT, &AA, &MD); + if (!G.run(F)) + return PreservedAnalyses::all(); + + PreservedAnalyses PA; + PA.preserve(); + return PA; +} + +char GVNHoistLegacyPass::ID = 0; +INITIALIZE_PASS_BEGIN(GVNHoistLegacyPass, "gvn-hoist", + "Early GVN Hoisting of Expressions", false, false) +INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) +INITIALIZE_PASS_END(GVNHoistLegacyPass, "gvn-hoist", + "Early GVN Hoisting of Expressions", false, false) + +FunctionPass *llvm::createGVNHoistPass() { return new GVNHoistLegacyPass(); } Index: llvm/lib/Transforms/Scalar/Scalar.cpp =================================================================== --- llvm/lib/Transforms/Scalar/Scalar.cpp +++ llvm/lib/Transforms/Scalar/Scalar.cpp @@ -43,6 +43,7 @@ initializeDSEPass(Registry); initializeGVNLegacyPassPass(Registry); initializeEarlyCSELegacyPassPass(Registry); + initializeGVNHoistLegacyPassPass(Registry); initializeFlattenCFGPassPass(Registry); initializeInductiveRangeCheckEliminationPass(Registry); initializeIndVarSimplifyPass(Registry); @@ -236,6 +237,10 @@ unwrap(PM)->add(createEarlyCSEPass()); } +void LLVMAddGVNHoistLegacyPass(LLVMPassManagerRef PM) { + unwrap(PM)->add(createGVNHoistPass()); +} + void LLVMAddTypeBasedAliasAnalysisPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createTypeBasedAAWrapperPass()); } Index: llvm/lib/Transforms/Utils/MemorySSA.cpp =================================================================== --- llvm/lib/Transforms/Utils/MemorySSA.cpp +++ llvm/lib/Transforms/Utils/MemorySSA.cpp @@ -612,6 +612,10 @@ assert((Dominator->getBlock() == Dominatee->getBlock()) && "Asking for local domination when accesses are in different blocks!"); + + if (isLiveOnEntryDef(Dominatee)) + return false; + // Get the access list for the block const AccessListType *AccessList = getBlockAccesses(Dominator->getBlock()); AccessListType::const_reverse_iterator It(Dominator->getIterator()); Index: llvm/lib/Transforms/Utils/SimplifyCFG.cpp =================================================================== --- llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1068,173 +1068,8 @@ return Changed; } -// If we would need to insert a select that uses the value of this invoke -// (comments in HoistThenElseCodeToIf explain why we would need to do this), we -// can't hoist the invoke, as there is nowhere to put the select in this case. -static bool isSafeToHoistInvoke(BasicBlock *BB1, BasicBlock *BB2, - Instruction *I1, Instruction *I2) { - for (BasicBlock *Succ : successors(BB1)) { - PHINode *PN; - for (BasicBlock::iterator BBI = Succ->begin(); - (PN = dyn_cast(BBI)); ++BBI) { - Value *BB1V = PN->getIncomingValueForBlock(BB1); - Value *BB2V = PN->getIncomingValueForBlock(BB2); - if (BB1V != BB2V && (BB1V==I1 || BB2V==I2)) { - return false; - } - } - } - return true; -} - static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I); -/// Given a conditional branch that goes to BB1 and BB2, hoist any common code -/// in the two blocks up into the branch block. The caller of this function -/// guarantees that BI's block dominates BB1 and BB2. -static bool HoistThenElseCodeToIf(BranchInst *BI, - const TargetTransformInfo &TTI) { - // This does very trivial matching, with limited scanning, to find identical - // instructions in the two blocks. In particular, we don't want to get into - // O(M*N) situations here where M and N are the sizes of BB1 and BB2. As - // such, we currently just scan for obviously identical instructions in an - // identical order. - BasicBlock *BB1 = BI->getSuccessor(0); // The true destination. - BasicBlock *BB2 = BI->getSuccessor(1); // The false destination - - BasicBlock::iterator BB1_Itr = BB1->begin(); - BasicBlock::iterator BB2_Itr = BB2->begin(); - - Instruction *I1 = &*BB1_Itr++, *I2 = &*BB2_Itr++; - // Skip debug info if it is not identical. - DbgInfoIntrinsic *DBI1 = dyn_cast(I1); - DbgInfoIntrinsic *DBI2 = dyn_cast(I2); - if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) { - while (isa(I1)) - I1 = &*BB1_Itr++; - while (isa(I2)) - I2 = &*BB2_Itr++; - } - if (isa(I1) || !I1->isIdenticalToWhenDefined(I2) || - (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2))) - return false; - - BasicBlock *BIParent = BI->getParent(); - - bool Changed = false; - do { - // If we are hoisting the terminator instruction, don't move one (making a - // broken BB), instead clone it, and remove BI. - if (isa(I1)) - goto HoistTerminator; - - if (!TTI.isProfitableToHoist(I1) || !TTI.isProfitableToHoist(I2)) - return Changed; - - // For a normal instruction, we just move one to right before the branch, - // then replace all uses of the other with the first. Finally, we remove - // the now redundant second instruction. - BIParent->getInstList().splice(BI->getIterator(), BB1->getInstList(), I1); - if (!I2->use_empty()) - I2->replaceAllUsesWith(I1); - I1->intersectOptionalDataWith(I2); - unsigned KnownIDs[] = { - LLVMContext::MD_tbaa, LLVMContext::MD_range, - LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, - LLVMContext::MD_nonnull, LLVMContext::MD_invariant_group, - LLVMContext::MD_align, LLVMContext::MD_dereferenceable, - LLVMContext::MD_dereferenceable_or_null}; - combineMetadata(I1, I2, KnownIDs); - I2->eraseFromParent(); - Changed = true; - - I1 = &*BB1_Itr++; - I2 = &*BB2_Itr++; - // Skip debug info if it is not identical. - DbgInfoIntrinsic *DBI1 = dyn_cast(I1); - DbgInfoIntrinsic *DBI2 = dyn_cast(I2); - if (!DBI1 || !DBI2 || !DBI1->isIdenticalToWhenDefined(DBI2)) { - while (isa(I1)) - I1 = &*BB1_Itr++; - while (isa(I2)) - I2 = &*BB2_Itr++; - } - } while (I1->isIdenticalToWhenDefined(I2)); - - return true; - -HoistTerminator: - // It may not be possible to hoist an invoke. - if (isa(I1) && !isSafeToHoistInvoke(BB1, BB2, I1, I2)) - return Changed; - - for (BasicBlock *Succ : successors(BB1)) { - PHINode *PN; - for (BasicBlock::iterator BBI = Succ->begin(); - (PN = dyn_cast(BBI)); ++BBI) { - Value *BB1V = PN->getIncomingValueForBlock(BB1); - Value *BB2V = PN->getIncomingValueForBlock(BB2); - if (BB1V == BB2V) - continue; - - // Check for passingValueIsAlwaysUndefined here because we would rather - // eliminate undefined control flow then converting it to a select. - if (passingValueIsAlwaysUndefined(BB1V, PN) || - passingValueIsAlwaysUndefined(BB2V, PN)) - return Changed; - - if (isa(BB1V) && !isSafeToSpeculativelyExecute(BB1V)) - return Changed; - if (isa(BB2V) && !isSafeToSpeculativelyExecute(BB2V)) - return Changed; - } - } - - // Okay, it is safe to hoist the terminator. - Instruction *NT = I1->clone(); - BIParent->getInstList().insert(BI->getIterator(), NT); - if (!NT->getType()->isVoidTy()) { - I1->replaceAllUsesWith(NT); - I2->replaceAllUsesWith(NT); - NT->takeName(I1); - } - - IRBuilder Builder(NT); - // Hoisting one of the terminators from our successor is a great thing. - // Unfortunately, the successors of the if/else blocks may have PHI nodes in - // them. If they do, all PHI entries for BB1/BB2 must agree for all PHI - // nodes, so we insert select instruction to compute the final result. - std::map, SelectInst*> InsertedSelects; - for (BasicBlock *Succ : successors(BB1)) { - PHINode *PN; - for (BasicBlock::iterator BBI = Succ->begin(); - (PN = dyn_cast(BBI)); ++BBI) { - Value *BB1V = PN->getIncomingValueForBlock(BB1); - Value *BB2V = PN->getIncomingValueForBlock(BB2); - if (BB1V == BB2V) continue; - - // These values do not agree. Insert a select instruction before NT - // that determines the right value. - SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)]; - if (!SI) - SI = cast - (Builder.CreateSelect(BI->getCondition(), BB1V, BB2V, - BB1V->getName() + "." + BB2V->getName())); - - // Make the PHI node use the select for all incoming values for BB1/BB2 - for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) - if (PN->getIncomingBlock(i) == BB1 || PN->getIncomingBlock(i) == BB2) - PN->setIncomingValue(i, SI); - } - } - - // Update any PHI nodes in our new successors. - for (BasicBlock *Succ : successors(BB1)) - AddPredecessorToBlock(Succ, BIParent, BB1); - - EraseTerminatorInstAndDCECond(BI); - return true; -} /// Given an unconditional branch that goes to BBEnd, /// check whether BBEnd has only two predecessors and the other predecessor @@ -1595,7 +1430,6 @@ Value *OrigV = PN->getIncomingValueForBlock(BB); Value *ThenV = PN->getIncomingValueForBlock(ThenBB); - // FIXME: Try to remove some of the duplication with HoistThenElseCodeToIf. // Skip PHIs which are trivial. if (ThenV == OrigV) continue; @@ -5162,15 +4996,8 @@ if (FoldBranchToCommonDest(BI, BonusInstThreshold)) return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; - // We have a conditional branch to two blocks that are only reachable - // from BI. We know that the condbr dominates the two blocks, so see if - // there is any identical code in the "then" and "else" blocks. If so, we - // can hoist it up to the branching block. if (BI->getSuccessor(0)->getSinglePredecessor()) { - if (BI->getSuccessor(1)->getSinglePredecessor()) { - if (HoistThenElseCodeToIf(BI, TTI)) - return SimplifyCFG(BB, TTI, BonusInstThreshold, AC) | true; - } else { + if (!BI->getSuccessor(1)->getSinglePredecessor()) { // If Successor #1 has multiple preds, we may be able to conditionally // execute Successor #0 if it branches to Successor #1. TerminatorInst *Succ0TI = BI->getSuccessor(0)->getTerminator(); Index: llvm/test/Transforms/GVN/hoist.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/GVN/hoist.ll @@ -0,0 +1,651 @@ +; RUN: opt -gvn-hoist -S < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@GlobalVar = internal global float 1.000000e+00 + +; Check that all scalar expressions are hoisted. +; +; CHECK-LABEL: @scalarsHoisting +; CHECK: fsub +; CHECK: fmul +; CHECK: fsub +; CHECK: fmul +; CHECK-NOT: fmul +; CHECK-NOT: fsub +define float @scalarsHoisting(float %d, float %min, float %max, float %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.else + +if.then: ; preds = %entry + %sub = fsub float %min, %a + %mul = fmul float %sub, %div + %sub1 = fsub float %max, %a + %mul2 = fmul float %sub1, %div + br label %if.end + +if.else: ; preds = %entry + %sub3 = fsub float %max, %a + %mul4 = fmul float %sub3, %div + %sub5 = fsub float %min, %a + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: ; preds = %if.else, %if.then + %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] + %tmin.0 = phi float [ %mul, %if.then ], [ %mul4, %if.else ] + %add = fadd float %tmax.0, %tmin.0 + ret float %add +} + +; Check that all loads and scalars depending on the loads are hoisted. +; Check that getelementptr computation gets hoisted before the load. +; +; CHECK-LABEL: @readsAndScalarsHoisting +; CHECK: load +; CHECK: load +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK: fsub +; CHECK: fmul +; CHECK-NOT: load +; CHECK-NOT: fmul +; CHECK-NOT: fsub +define float @readsAndScalarsHoisting(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.else + +if.then: ; preds = %entry + %A = getelementptr float, float* %min, i32 1 + %0 = load float, float* %A, align 4 + %1 = load float, float* %a, align 4 + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + br label %if.end + +if.else: ; preds = %entry + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %B = getelementptr float, float* %min, i32 1 + %5 = load float, float* %B, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: ; preds = %if.else, %if.then + %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] + %tmin.0 = phi float [ %mul, %if.then ], [ %mul4, %if.else ] + %add = fadd float %tmax.0, %tmin.0 + ret float %add +} + +; Check that we do not hoist loads after a store: the first two loads will be +; hoisted, and then the third load will not be hoisted. +; +; CHECK-LABEL: @readsAndWrites +; CHECK: load +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK: store +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK-NOT: load +; CHECK-NOT: fmul +; CHECK-NOT: fsub +define float @readsAndWrites(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.else + +if.then: ; preds = %entry + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + store float %0, float* @GlobalVar + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + br label %if.end + +if.else: ; preds = %entry + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: ; preds = %if.else, %if.then + %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] + %tmin.0 = phi float [ %mul, %if.then ], [ %mul4, %if.else ] + %add = fadd float %tmax.0, %tmin.0 + ret float %add +} + +; Check that we do hoist loads when the store is above the insertion point. +; +; CHECK-LABEL: @readsAndWriteAboveInsertPt +; CHECK: load +; CHECK: load +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK: fsub +; CHECK: fmul +; CHECK-NOT: load +; CHECK-NOT: fmul +; CHECK-NOT: fsub +define float @readsAndWriteAboveInsertPt(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + store float 0.000000e+00, float* @GlobalVar + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.else + +if.then: ; preds = %entry + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + br label %if.end + +if.else: ; preds = %entry + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: ; preds = %if.else, %if.then + %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] + %tmin.0 = phi float [ %mul, %if.then ], [ %mul4, %if.else ] + %add = fadd float %tmax.0, %tmin.0 + ret float %add +} + +; Check that dependent expressions are hoisted. +; CHECK-LABEL: @dependentScalarsHoisting +; CHECK: fsub +; CHECK: fadd +; CHECK: fdiv +; CHECK: fmul +; CHECK-NOT: fsub +; CHECK-NOT: fadd +; CHECK-NOT: fdiv +; CHECK-NOT: fmul +define float @dependentScalarsHoisting(float %a, float %b, i1 %c) { +entry: + br i1 %c, label %if.then, label %if.else + +if.then: + %d = fsub float %b, %a + %e = fadd float %d, %a + %f = fdiv float %e, %a + %g = fmul float %f, %a + br label %if.end + +if.else: + %h = fsub float %b, %a + %i = fadd float %h, %a + %j = fdiv float %i, %a + %k = fmul float %j, %a + br label %if.end + +if.end: + %r = phi float [ %g, %if.then ], [ %k, %if.else ] + ret float %r +} + +; Check that all independent expressions are hoisted. +; CHECK-LABEL: @independentScalarsHoisting +; CHECK: fadd +; CHECK: fsub +; CHECK: fdiv +; CHECK: fmul +; CHECK-NOT: fsub +; CHECK-NOT: fdiv +; CHECK-NOT: fmul +define float @independentScalarsHoisting(float %a, float %b, i1 %c) { +entry: + br i1 %c, label %if.then, label %if.else + +if.then: + %d = fadd float %b, %a + %e = fsub float %b, %a + %f = fdiv float %b, %a + %g = fmul float %b, %a + br label %if.end + +if.else: + %i = fadd float %b, %a + %h = fsub float %b, %a + %j = fdiv float %b, %a + %k = fmul float %b, %a + br label %if.end + +if.end: + %p = phi float [ %d, %if.then ], [ %i, %if.else ] + %q = phi float [ %e, %if.then ], [ %h, %if.else ] + %r = phi float [ %f, %if.then ], [ %j, %if.else ] + %s = phi float [ %g, %if.then ], [ %k, %if.else ] + %t = fadd float %p, %q + %u = fadd float %r, %s + %v = fadd float %t, %u + ret float %v +} + +; Check that we hoist load and scalar expressions in triangles. +; CHECK-LABEL: @triangleHoisting +; CHECK: load +; CHECK: load +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK: fsub +; CHECK: fmul +; CHECK-NOT: load +; CHECK-NOT: fmul +; CHECK-NOT: fsub +define float @triangleHoisting(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.end + +if.then: ; preds = %entry + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + br label %if.end + +if.end: ; preds = %entry + %p1 = phi float [ %mul2, %if.then ], [ 0.000000e+00, %entry ] + %p2 = phi float [ %mul, %if.then ], [ 0.000000e+00, %entry ] + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + + %x = fadd float %p1, %mul6 + %y = fadd float %p2, %mul4 + %z = fadd float %x, %y + ret float %z +} + +; Check that we hoist load and scalar expressions in dominator. +; CHECK-LABEL: @dominatorHoisting +; CHECK: load +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK-NOT: load +; CHECK-NOT: fmul +; CHECK-NOT: fsub +define float @dominatorHoisting(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.end + +if.then: ; preds = %entry + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: ; preds = %entry + %p1 = phi float [ %mul4, %if.then ], [ 0.000000e+00, %entry ] + %p2 = phi float [ %mul6, %if.then ], [ 0.000000e+00, %entry ] + + %x = fadd float %p1, %mul2 + %y = fadd float %p2, %mul + %z = fadd float %x, %y + ret float %z +} + +; Check that we hoist load and scalar expressions in dominator. +; CHECK-LABEL: @domHoisting +; CHECK: load +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK: load +; CHECK: fsub +; CHECK: fmul +; CHECK-NOT: load +; CHECK-NOT: fmul +; CHECK-NOT: fsub +define float @domHoisting(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.else + +if.then: + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.else: + %6 = load float, float* %max, align 4 + %7 = load float, float* %a, align 4 + %sub9 = fsub float %6, %7 + %mul10 = fmul float %sub9, %div + %8 = load float, float* %min, align 4 + %sub12 = fsub float %8, %7 + %mul13 = fmul float %sub12, %div + br label %if.end + +if.end: + %p1 = phi float [ %mul4, %if.then ], [ %mul10, %if.else ] + %p2 = phi float [ %mul6, %if.then ], [ %mul13, %if.else ] + + %x = fadd float %p1, %mul2 + %y = fadd float %p2, %mul + %z = fadd float %x, %y + ret float %z +} + +; Check that we do not hoist loads past stores within a same basic block. +; CHECK-LABEL: @noHoistInSingleBBWithStore +; CHECK: load +; CHECK: store +; CHECK: load +; CHECK: store +define i32 @noHoistInSingleBBWithStore() { +entry: + %D = alloca i32, align 4 + %0 = bitcast i32* %D to i8* + %bf = load i8, i8* %0, align 4 + %bf.clear = and i8 %bf, -3 + store i8 %bf.clear, i8* %0, align 4 + %bf1 = load i8, i8* %0, align 4 + %bf.clear1 = and i8 %bf1, 1 + store i8 %bf.clear1, i8* %0, align 4 + ret i32 0 +} + +; Check that we do not hoist loads past calls within a same basic block. +; CHECK-LABEL: @noHoistInSingleBBWithCall +; CHECK: load +; CHECK: call +; CHECK: load +declare void @foo() +define i32 @noHoistInSingleBBWithCall() { +entry: + %D = alloca i32, align 4 + %0 = bitcast i32* %D to i8* + %bf = load i8, i8* %0, align 4 + %bf.clear = and i8 %bf, -3 + call void @foo() + %bf1 = load i8, i8* %0, align 4 + %bf.clear1 = and i8 %bf1, 1 + ret i32 0 +} + +; Check that we do not hoist loads past stores in any branch of a diamond. +; CHECK-LABEL: @noHoistInDiamondWithOneStore1 +; CHECK: fdiv +; CHECK: fcmp +; CHECK: br +define float @noHoistInDiamondWithOneStore1(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.else + +if.then: ; preds = %entry + store float 0.000000e+00, float* @GlobalVar + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + br label %if.end + +if.else: ; preds = %entry + ; There are no side effects on the if.else branch. + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: ; preds = %if.else, %if.then + %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] + %tmin.0 = phi float [ %mul, %if.then ], [ %mul4, %if.else ] + + %6 = load float, float* %max, align 4 + %7 = load float, float* %a, align 4 + %sub6 = fsub float %6, %7 + %mul7 = fmul float %sub6, %div + %8 = load float, float* %min, align 4 + %sub8 = fsub float %8, %7 + %mul9 = fmul float %sub8, %div + + %add = fadd float %tmax.0, %tmin.0 + ret float %add +} + +; Check that we do not hoist loads past a store in any branch of a diamond. +; CHECK-LABEL: @noHoistInDiamondWithOneStore2 +; CHECK: fdiv +; CHECK: fcmp +; CHECK: br +define float @noHoistInDiamondWithOneStore2(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %if.then, label %if.else + +if.then: ; preds = %entry + ; There are no side effects on the if.then branch. + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + br label %if.end + +if.else: ; preds = %entry + store float 0.000000e+00, float* @GlobalVar + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: ; preds = %if.else, %if.then + %tmax.0 = phi float [ %mul2, %if.then ], [ %mul6, %if.else ] + %tmin.0 = phi float [ %mul, %if.then ], [ %mul4, %if.else ] + + %6 = load float, float* %max, align 4 + %7 = load float, float* %a, align 4 + %sub6 = fsub float %6, %7 + %mul7 = fmul float %sub6, %div + %8 = load float, float* %min, align 4 + %sub8 = fsub float %8, %7 + %mul9 = fmul float %sub8, %div + + %add = fadd float %tmax.0, %tmin.0 + ret float %add +} + +; Check that we do not hoist loads outside a loop containing stores. +; CHECK-LABEL: @noHoistInLoopsWithStores +; CHECK: fdiv +; CHECK: fcmp +; CHECK: br +define float @noHoistInLoopsWithStores(float %d, float* %min, float* %max, float* %a) { +entry: + %div = fdiv float 1.000000e+00, %d + %cmp = fcmp oge float %div, 0.000000e+00 + br i1 %cmp, label %do.body, label %if.else + +do.body: + %0 = load float, float* %min, align 4 + %1 = load float, float* %a, align 4 + + ; It is unsafe to hoist the loads outside the loop because of the store. + store float 0.000000e+00, float* @GlobalVar + + %sub = fsub float %0, %1 + %mul = fmul float %sub, %div + %2 = load float, float* %max, align 4 + %sub1 = fsub float %2, %1 + %mul2 = fmul float %sub1, %div + br label %while.cond + +while.cond: + %cmp1 = fcmp oge float %mul2, 0.000000e+00 + br i1 %cmp1, label %if.end, label %do.body + +if.else: + %3 = load float, float* %max, align 4 + %4 = load float, float* %a, align 4 + %sub3 = fsub float %3, %4 + %mul4 = fmul float %sub3, %div + %5 = load float, float* %min, align 4 + %sub5 = fsub float %5, %4 + %mul6 = fmul float %sub5, %div + br label %if.end + +if.end: + %tmax.0 = phi float [ %mul2, %while.cond ], [ %mul6, %if.else ] + %tmin.0 = phi float [ %mul, %while.cond ], [ %mul4, %if.else ] + + %add = fadd float %tmax.0, %tmin.0 + ret float %add +} + +; Check that we hoist stores: all the instructions from the then branch +; should be hoisted. +; CHECK-LABEL: @hoistStores +; CHECK: zext +; CHECK: trunc +; CHECK: getelementptr +; CHECK: load +; CHECK: getelementptr +; CHECK: store +; CHECK: load +; CHECK: load +; CHECK: zext +; CHECK: add +; CHECK: store +; CHECK: br +; CHECK: if.then +; CHECK: br + +%struct.foo = type { i16* } + +define void @hoistStores(%struct.foo* %s, i32* %coord, i1 zeroext %delta) { +entry: + %frombool = zext i1 %delta to i8 + %tobool = trunc i8 %frombool to i1 + br i1 %tobool, label %if.then, label %if.else + +if.then: ; preds = %entry + %p = getelementptr inbounds %struct.foo, %struct.foo* %s, i32 0, i32 0 + %0 = load i16*, i16** %p, align 8 + %incdec.ptr = getelementptr inbounds i16, i16* %0, i32 1 + store i16* %incdec.ptr, i16** %p, align 8 + %1 = load i16, i16* %0, align 2 + %conv = zext i16 %1 to i32 + %2 = load i32, i32* %coord, align 4 + %add = add i32 %2, %conv + store i32 %add, i32* %coord, align 4 + br label %if.end + +if.else: ; preds = %entry + %p1 = getelementptr inbounds %struct.foo, %struct.foo* %s, i32 0, i32 0 + %3 = load i16*, i16** %p1, align 8 + %incdec.ptr2 = getelementptr inbounds i16, i16* %3, i32 1 + store i16* %incdec.ptr2, i16** %p1, align 8 + %4 = load i16, i16* %3, align 2 + %conv3 = zext i16 %4 to i32 + %5 = load i32, i32* %coord, align 4 + %add4 = add i32 %5, %conv3 + store i32 %add4, i32* %coord, align 4 + %6 = load i16*, i16** %p1, align 8 + %incdec.ptr6 = getelementptr inbounds i16, i16* %6, i32 1 + store i16* %incdec.ptr6, i16** %p1, align 8 + %7 = load i16, i16* %6, align 2 + %conv7 = zext i16 %7 to i32 + %shl = shl i32 %conv7, 8 + %8 = load i32, i32* %coord, align 4 + %add8 = add i32 %8, %shl + store i32 %add8, i32* %coord, align 4 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +}