Index: include/llvm/Analysis/TargetTransformInfo.h =================================================================== --- include/llvm/Analysis/TargetTransformInfo.h +++ include/llvm/Analysis/TargetTransformInfo.h @@ -264,6 +264,11 @@ /// transformation will select an unrolling factor based on the current cost /// threshold and other factors. unsigned Count; + /// A forced peeling factor (the number of bodied of the original loop + /// that should be peeled off before the loop body). When set to 0, the + /// unrolling transformation will select a peeling factor based on profile + /// information and other factors. + unsigned PeelCount; /// Default unroll count for loops with run-time trip count. unsigned DefaultUnrollRuntimeCount; // Set the maximum unrolling factor. The unrolling factor may be selected @@ -292,6 +297,8 @@ bool Force; /// Allow using trip count upper bound to unroll loops. bool UpperBound; + /// Allow peeling off loop iterations for loops with low dynamic tripcount. + bool AllowPeeling; }; /// \brief Get target-customized preferences for the generic loop unrolling Index: include/llvm/Transforms/Utils/LoopUtils.h =================================================================== --- include/llvm/Transforms/Utils/LoopUtils.h +++ include/llvm/Transforms/Utils/LoopUtils.h @@ -461,6 +461,9 @@ void addStringMetadataToLoop(Loop *TheLoop, const char *MDString, unsigned V = 0); +/// \brief Get a loop's estimated trip count based on branch weight metadata. +unsigned getLoopEstimatedTripCount(Loop *L); + /// Helper to consistently add the set of standard passes to a loop pass's \c /// AnalysisUsage. /// Index: include/llvm/Transforms/Utils/UnrollLoop.h =================================================================== --- include/llvm/Transforms/Utils/UnrollLoop.h +++ include/llvm/Transforms/Utils/UnrollLoop.h @@ -16,6 +16,9 @@ #ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H #define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H +// Needed because we can't forward-declared the nested struct +// TargetTransformInfo::UnrollingPreferences +#include "llvm/Analysis/TargetTransformInfo.h" namespace llvm { @@ -33,8 +36,8 @@ bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force, bool AllowRuntime, bool AllowExpensiveTripCount, bool PreserveCondBr, bool PreserveOnlyFirst, - unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE, - DominatorTree *DT, AssumptionCache *AC, + unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI, + ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, OptimizationRemarkEmitter *ORE, bool PreserveLCSSA); bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count, @@ -43,6 +46,12 @@ ScalarEvolution *SE, DominatorTree *DT, bool PreserveLCSSA); +unsigned getPeelCount(Loop *L, unsigned LoopSize, + const TargetTransformInfo::UnrollingPreferences &UP); + +bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE, + DominatorTree *DT, bool PreserveLCSSA); + MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name); } Index: lib/Transforms/Scalar/LoopUnrollPass.cpp =================================================================== --- lib/Transforms/Scalar/LoopUnrollPass.cpp +++ lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -24,7 +24,6 @@ #include "llvm/Analysis/OptimizationDiagnosticInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" -#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/InstVisitor.h" @@ -102,6 +101,11 @@ cl::desc("Unrolled size limit for loops with an unroll(full) or " "unroll_count pragma.")); +static cl::opt + UnrollAllowPeeling("unroll-allow-peeling", cl::Hidden, + cl::desc("Allows loops to be peeled when the dynamic " + "trip count is known to be low.")); + /// A magic value for use with the Threshold parameter to indicate /// that the loop unroll should be performed regardless of how much /// code expansion would result. @@ -123,6 +127,7 @@ UP.PartialThreshold = UP.Threshold; UP.PartialOptSizeThreshold = 0; UP.Count = 0; + UP.PeelCount = 0; UP.DefaultUnrollRuntimeCount = 8; UP.MaxCount = UINT_MAX; UP.FullUnrollMaxCount = UINT_MAX; @@ -132,6 +137,7 @@ UP.AllowExpensiveTripCount = false; UP.Force = false; UP.UpperBound = false; + UP.AllowPeeling = false; // Override with any target specific settings TTI.getUnrollingPreferences(L, UP); @@ -164,6 +170,8 @@ UP.Runtime = UnrollRuntime; if (UnrollMaxUpperBound == 0) UP.UpperBound = false; + if (UnrollAllowPeeling.getNumOccurrences() > 0) + UP.AllowPeeling = UnrollAllowPeeling; // Apply user values provided by argument if (UserThreshold.hasValue()) { @@ -865,7 +873,15 @@ << "Unable to fully unroll loop as directed by unroll(full) pragma " "because loop has a runtime trip count."); - // 5th priority is runtime unrolling. + // 5th priority is loop peeling + UP.PeelCount = getPeelCount(L, LoopSize, UP); + if (UP.PeelCount) { + UP.Runtime = false; + UP.Count = 1; + return ExplicitUnroll; + } + + // 6th priority is runtime unrolling. // Don't unroll a runtime trip count loop when it is disabled. if (HasRuntimeUnrollDisablePragma(L)) { UP.Count = 0; @@ -966,6 +982,7 @@ unsigned TripCount = 0; unsigned MaxTripCount = 0; unsigned TripMultiple = 1; + // If there are multiple exiting blocks but one of them is the latch, use the // latch for the trip count estimation. Otherwise insist on a single exiting // block for the trip count estimation. @@ -1034,13 +1051,17 @@ // Unroll the loop. if (!UnrollLoop(L, UP.Count, TripCount, UP.Force, UP.Runtime, UP.AllowExpensiveTripCount, UseUpperBound, MaxOrZero, - TripMultiple, LI, SE, &DT, &AC, &ORE, PreserveLCSSA)) + TripMultiple, UP.PeelCount, LI, SE, &DT, &AC, &ORE, + PreserveLCSSA)) return false; // If loop has an unroll count pragma or unrolled by explicitly set count // mark loop as unrolled to prevent unrolling beyond that requested. - if (IsCountSetExplicitly) + // If the loop was peeled, we already "used up" the profile information + // we had, so we don't want to unroll or peel again. + if (IsCountSetExplicitly || UP.PeelCount) SetLoopAlreadyUnrolled(L); + return true; } @@ -1076,6 +1097,7 @@ const TargetTransformInfo &TTI = getAnalysis().getTTI(F); auto &AC = getAnalysis().getAssumptionCache(F); + // For the old PM, we can't use OptimizationRemarkEmitter as an analysis // pass. Function analyses need to be preserved across loop transformations // but ORE cannot be preserved (see comment before the pass definition). @@ -1156,10 +1178,10 @@ report_fatal_error("LoopUnrollPass: OptimizationRemarkEmitterAnalysis not " "cached at a higher level"); - bool Changed = - tryToUnrollLoop(&L, *DT, LI, SE, *TTI, *AC, *ORE, /*PreserveLCSSA*/ true, - ProvidedCount, ProvidedThreshold, ProvidedAllowPartial, - ProvidedRuntime, ProvidedUpperBound); + bool Changed = tryToUnrollLoop(&L, *DT, LI, SE, *TTI, *AC, *ORE, + /*PreserveLCSSA*/ true, ProvidedCount, + ProvidedThreshold, ProvidedAllowPartial, + ProvidedRuntime, ProvidedUpperBound); if (!Changed) return PreservedAnalyses::all(); Index: lib/Transforms/Utils/CMakeLists.txt =================================================================== --- lib/Transforms/Utils/CMakeLists.txt +++ lib/Transforms/Utils/CMakeLists.txt @@ -24,7 +24,8 @@ Local.cpp LoopSimplify.cpp LoopUnroll.cpp - LoopUnrollRuntime.cpp + LoopUnrollPeel.cpp + LoopUnrollRuntime.cpp LoopUtils.cpp LoopVersioning.cpp LowerInvoke.cpp Index: lib/Transforms/Utils/LoopUnroll.cpp =================================================================== --- lib/Transforms/Utils/LoopUnroll.cpp +++ lib/Transforms/Utils/LoopUnroll.cpp @@ -202,6 +202,9 @@ /// runtime-unroll the loop if computing RuntimeTripCount will be expensive and /// AllowExpensiveTripCount is false. /// +/// If we want to perform PGO-based loop peeling, PeelCount is set to the +/// number of iterations we want to peel off. +/// /// The LoopInfo Analysis that is passed will be kept consistent. /// /// This utility preserves LoopInfo. It will also preserve ScalarEvolution and @@ -209,9 +212,11 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force, bool AllowRuntime, bool AllowExpensiveTripCount, bool PreserveCondBr, bool PreserveOnlyFirst, - unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE, - DominatorTree *DT, AssumptionCache *AC, - OptimizationRemarkEmitter *ORE, bool PreserveLCSSA) { + unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI, + ScalarEvolution *SE, DominatorTree *DT, + AssumptionCache *AC, OptimizationRemarkEmitter *ORE, + bool PreserveLCSSA) { + BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) { DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n"); @@ -257,9 +262,8 @@ if (TripCount != 0 && Count > TripCount) Count = TripCount; - // Don't enter the unroll code if there is nothing to do. This way we don't - // need to support "partial unrolling by 1". - if (TripCount == 0 && Count < 2) + // Don't enter the unroll code if there is nothing to do. + if (TripCount == 0 && Count < 2 && PeelCount == 0) return false; assert(Count > 0); @@ -288,6 +292,13 @@ // flag is specified. bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime); + assert((!RuntimeTripCount || !PeelCount) && + "Did not expect runtime trip-count unrolling " + "and peeling for the same loop"); + + if (PeelCount) + peelLoop(L, PeelCount, LI, SE, DT, PreserveLCSSA); + // Loops containing convergent instructions must have a count that divides // their TripMultiple. DEBUG( @@ -301,9 +312,7 @@ "Unroll count must divide trip multiple if loop contains a " "convergent operation."); }); - // Don't output the runtime loop remainder if Count is a multiple of - // TripMultiple. Such a remainder is never needed, and is unsafe if the loop - // contains a convergent instruction. + if (RuntimeTripCount && TripMultiple % Count != 0 && !UnrollRuntimeLoopRemainder(L, Count, AllowExpensiveTripCount, UnrollRuntimeEpilog, LI, SE, DT, @@ -339,6 +348,13 @@ L->getHeader()) << "completely unrolled loop with " << NV("UnrollCount", TripCount) << " iterations"); + } else if (PeelCount) { + DEBUG(dbgs() << "PEELING loop %" << Header->getName() + << " with iteration count " << PeelCount << "!\n"); + ORE->emit(OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(), + L->getHeader()) + << " peeling loop %" << Header->getName() << " by " + << NV("PeelCount", PeelCount) << " iterations.\n"); } else { OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(), L->getHeader()); @@ -628,7 +644,7 @@ DEBUG(DT->verifyDomTree()); // Simplify any new induction variables in the partially unrolled loop. - if (SE && !CompletelyUnroll) { + if (SE && !CompletelyUnroll && Count > 1) { SmallVector DeadInsts; simplifyLoopIVs(L, SE, DT, LI, DeadInsts); Index: lib/Transforms/Utils/LoopUnrollPeel.cpp =================================================================== --- lib/Transforms/Utils/LoopUnrollPeel.cpp +++ lib/Transforms/Utils/LoopUnrollPeel.cpp @@ -0,0 +1,340 @@ +//===-- UnrollLoopPeel.cpp - Loop peeling utilities -----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements some loop unrolling utilities for peeling loops +// with dynamically inferred (from PGO) trip counts. See LoopUnroll.cpp for +// unrolling loops with compile-time trip counts. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/LoopIterator.h" +#include "llvm/Analysis/LoopPass.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/MDBuilder.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Cloning.h" +#include "llvm/Transforms/Utils/LoopUtils.h" +#include "llvm/Transforms/Utils/UnrollLoop.h" +#include + +using namespace llvm; + +#define DEBUG_TYPE "loop-unroll" +STATISTIC(NumPeeled, "Number of loops peeled"); + +static cl::opt UnrollPeelMaxCount( + "unroll-peel-max-count", cl::init(7), cl::Hidden, + cl::desc("Max average trip count which will cause loop peeling.")); + +static cl::opt UnrollForcePeelCount( + "unroll-force-peel-count", cl::init(0), cl::Hidden, + cl::desc("Force a peel count regardless of profiling information.")); + +// Check whether we are capable of peeling this loop. +static bool canPeel(Loop *L) { + // Make sure the loop is in simplified form + if (!L->isLoopSimplifyForm()) + return false; + + // Only peel loops that contain a single exit + if (!L->getExitingBlock() || !L->getUniqueExitBlock()) + return false; + + return true; +} + +// Return the number of iterations we want to peel off. +unsigned +llvm::getPeelCount(Loop *L, unsigned LoopSize, + const TargetTransformInfo::UnrollingPreferences &UP) { + if (!canPeel(L)) + return 0; + + // Only try to peel innermost loops. + if (!L->empty()) + return 0; + + // If the user provided a peel count, use that. + bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0; + if (UserPeelCount) { + DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount + << " iterations.\n"); + return UnrollForcePeelCount; + } + + // If we don't know the trip count, but have reason to believe the average + // trip count is low, peeling should be beneficial, since we will usually + // hit the peeled section. + // We only do this in the presence of profile information, since otherwise + // our estimates of the trip count are not reliable enough. + if (UP.AllowPeeling && L->getHeader()->getParent()->getEntryCount()) { + unsigned PeelCount = getLoopEstimatedTripCount(L); + + DEBUG(dbgs() << "Profile-based estimated trip count is " << PeelCount + << "\n"); + + if (PeelCount) { + if ((PeelCount <= UnrollPeelMaxCount) && + (LoopSize * (PeelCount + 1) <= UP.Threshold)) { + DEBUG(dbgs() << "Peeling first " << PeelCount << " iterations.\n"); + return PeelCount; + } + DEBUG(dbgs() << "Requested peel count: " << PeelCount << "\n"); + DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n"); + DEBUG(dbgs() << "Peel cost: " << LoopSize * (PeelCount + 1) << "\n"); + DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n"); + } + } + + return 0; +} + +// Clones the body of the loop L, putting it between InsertTop and InsertBot. +// IterNumber is the serial number of the iteration currently being peeled off. +// AvgIters is the average number of iterations we expect this loop to have. +// PreHeader and Exit are the preheader and exit block of the original loop. +// RemainingHeaderWeight is the number of *dynamic* loop entries still +// unaccounted for - that is, it is the number of times we expect to enter +// the header of the currently-peeled loop iteration. +// NewBlocks is an out parameter that contains a list of the the blocks in the +// new clone, and VMap is the corresponding value map. +// LoopBlocks is a helper for DFS-traversal of the loop. +// LVMap is a value-map that maps instructions from the original loop to +// instructions in the last peeled-off iteration. +static void cloneLoopBlocks(Loop *L, unsigned IterNumber, unsigned AvgIters, + BasicBlock *InsertTop, BasicBlock *InsertBot, + BasicBlock *PreHeader, BasicBlock *Exit, + uint64_t &RemainingHeaderWeight, + SmallVectorImpl &NewBlocks, + LoopBlocksDFS &LoopBlocks, ValueToValueMapTy &VMap, + ValueToValueMapTy &LVMap, LoopInfo *LI) { + + BasicBlock *Header = L->getHeader(); + BasicBlock *Latch = L->getLoopLatch(); + + Function *F = Header->getParent(); + LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO(); + LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO(); + Loop *ParentLoop = L->getParentLoop(); + + // For each block in the original loop, create a new copy, + // and update the value map with the newly created values. + for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) { + BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F); + NewBlocks.push_back(NewBB); + + if (ParentLoop) + ParentLoop->addBasicBlockToLoop(NewBB, *LI); + + VMap[*BB] = NewBB; + } + + // Hook-up the control flow for the newly inserted blocks. + // The new header is hooked up directly to the "top", which is either + // the original loop preheader (for the first iteration) or the previous + // iteration's exiting block (for every other iteration) + InsertTop->getTerminator()->setSuccessor(0, cast(VMap[Header])); + + // Similarly, for the latch: + // The original exiting edge is still hooked up to the loop exit. + // The backedge now goes to the "bottom", which is either the loop's real + // header (for the last peeled iteration) or the copied header of the next + // iteration (for every other iteration) + BranchInst *LatchBR = + cast(cast(VMap[Latch])->getTerminator()); + unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1); + LatchBR->setSuccessor(HeaderIdx, InsertBot); + LatchBR->setSuccessor(1 - HeaderIdx, Exit); + + // Set branch probabilities for the new latch copy. + // TODO: Pick a more realistic distribution. + if (RemainingHeaderWeight) { + uint64_t FallThruWeight = RemainingHeaderWeight * + ((float)(AvgIters - IterNumber) / AvgIters * 0.9); + uint64_t ExitWeight = RemainingHeaderWeight - FallThruWeight; + RemainingHeaderWeight -= ExitWeight; + + MDBuilder MDB(LatchBR->getContext()); + MDNode *WeightNode = + HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThruWeight) + : MDB.createBranchWeights(FallThruWeight, ExitWeight); + LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); + } + + // Change the incoming values to the ones defined in the preheader or + // the previous cloned loop body. + for (BasicBlock::iterator I = Header->begin(); isa(I); ++I) { + PHINode *NewPHI = cast(VMap[&*I]); + if (IterNumber == 0) { + VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader); + } else { + Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch); + Instruction *LatchInst = dyn_cast(LatchVal); + if (LatchInst && L->contains(LatchInst)) + VMap[&*I] = LVMap[LatchInst]; + else + VMap[&*I] = LatchVal; + } + cast(VMap[Header])->getInstList().erase(NewPHI); + } + + // Fix up the outgoing values - we need to add a value for the iteration + // we've just created. Note that this must happen *after* the incoming + // values are adjusted, since the value going out of the latch may also be + // a value coming into the header. + for (BasicBlock::iterator I = Exit->begin(); isa(I); ++I) { + PHINode *PHI = cast(I); + Value *LatchVal = PHI->getIncomingValueForBlock(Latch); + Instruction *LatchInst = dyn_cast(LatchVal); + if (LatchInst && L->contains(LatchInst)) + LatchVal = VMap[LatchVal]; + PHI->addIncoming(LatchVal, cast(VMap[Latch])); + } + + // LastValueMap is updated with the values for the current loop + // which are used the next time this function is called. + for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end(); VI != VE; + ++VI) { + LVMap[VI->first] = VI->second; + } +} + +bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, + ScalarEvolution *SE, DominatorTree *DT, + bool PreserveLCSSA) { + if (!canPeel(L)) + return false; + + LoopBlocksDFS LoopBlocks(L); + LoopBlocks.perform(LI); + + BasicBlock *Header = L->getHeader(); + BasicBlock *PreHeader = L->getLoopPreheader(); + BasicBlock *Latch = L->getLoopLatch(); + BasicBlock *Exit = L->getUniqueExitBlock(); + + Function *F = Header->getParent(); + + // Set up all the necessary basic blocks. It is convenient to split the + // preheader into 3 parts - two blocks to anchor the peeled copy of the loop + // body, and a new preheader for the "real" loop. + + // Peeling the first iteration transforms. + // + // PreHeader: + // ... + // Header: + // LoopBody; + // If (cond) goto Header + // Exit: + // + // into + // + // InsertTop: + // LoopBody; + // InsertBot: + // If (cond) goto NewPreHeader else goto Exit + // NewPreHeader: + // ... + // Header: + // LoopBody; + // If (cond) goto Header + // Exit: + // + // Each following iteration will split the current bottom anchor in two, + // and put the new copy of the loop body between these two blocks. + + BasicBlock *InsertTop = SplitEdge(PreHeader, Header, DT, LI); + BasicBlock *InsertBot = + SplitBlock(InsertTop, InsertTop->getTerminator(), DT, LI); + BasicBlock *NewPreHeader = + SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); + + InsertTop->setName(Header->getName() + ".peel.begin"); + InsertBot->setName(Header->getName() + ".peel.next"); + NewPreHeader->setName(PreHeader->getName() + ".new"); + + ValueToValueMapTy LVMap; + + // If we have branch weight information, we'll want to update it for the + // newly created branches. + BranchInst *LatchBR = + cast(cast(Latch)->getTerminator()); + unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1); + + uint64_t TrueWeight, FalseWeight; + uint64_t ExitWeight = 0, BackEdgeWeight = 0; + if (LatchBR->extractProfMetadata(TrueWeight, FalseWeight)) { + ExitWeight = HeaderIdx ? TrueWeight : FalseWeight; + BackEdgeWeight = HeaderIdx ? FalseWeight : TrueWeight; + } + + // For each peeled-off iteration, make a copy of the loop. + for (unsigned Iter = 0; Iter < PeelCount; ++Iter) { + SmallVector NewBlocks; + ValueToValueMapTy VMap; + + // The exit weight of the previous iteration is the header entry weight + // of the current iteration. So this is exactly how many dynamic iterations + // the current peeled-off static iteration uses up. + BackEdgeWeight -= ExitWeight; + + cloneLoopBlocks(L, Iter, PeelCount, InsertTop, InsertBot, NewPreHeader, + Exit, ExitWeight, NewBlocks, LoopBlocks, VMap, LVMap, LI); + InsertTop = InsertBot; + InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); + InsertBot->setName(Header->getName() + ".peel.next"); + + F->getBasicBlockList().splice(InsertTop->getIterator(), + F->getBasicBlockList(), + NewBlocks[0]->getIterator(), F->end()); + + // Remap to use values from the current iteration instead of the + // previous one. + remapInstructionsInBlocks(NewBlocks, VMap); + } + + // Now adjust the phi nodes in the loop header to get their initial values + // from the last peeled-off iteration instead of the preheader. + for (BasicBlock::iterator I = Header->begin(); isa(I); ++I) { + PHINode *PHI = cast(I); + Value *NewVal = PHI->getIncomingValueForBlock(Latch); + Instruction *LatchInst = dyn_cast(NewVal); + if (LatchInst && L->contains(LatchInst)) + NewVal = LVMap[LatchInst]; + + PHI->setIncomingValue(PHI->getBasicBlockIndex(NewPreHeader), NewVal); + } + + // Adjust the branch weights on the loop exit. + if (ExitWeight) { + MDBuilder MDB(LatchBR->getContext()); + MDNode *WeightNode = + HeaderIdx ? MDB.createBranchWeights(ExitWeight, BackEdgeWeight) + : MDB.createBranchWeights(BackEdgeWeight, ExitWeight); + LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); + } + + // If the loop is nested, we changed the parent loop, update SE. + if (Loop *ParentLoop = L->getParentLoop()) + SE->forgetLoop(ParentLoop); + + NumPeeled++; + + return true; +} Index: lib/Transforms/Utils/LoopUtils.cpp =================================================================== --- lib/Transforms/Utils/LoopUtils.cpp +++ lib/Transforms/Utils/LoopUtils.cpp @@ -1062,3 +1062,39 @@ // just a special case of this.) return true; } + +unsigned llvm::getLoopEstimatedTripCount(Loop *L) { + // Only support loops with a unique exiting block, and a latch. + if (!L->getExitingBlock()) + return 0; + + // Get the branch weights for the the loop's backedge. + BranchInst *LatchBR = + dyn_cast(L->getLoopLatch()->getTerminator()); + if (!LatchBR) + return 0; + + assert((LatchBR->getSuccessor(0) == L->getHeader() || + LatchBR->getSuccessor(1) == L->getHeader()) && + "At least one edge out of the latch must go to the header"); + + // To estimate the number of times the loop body was executed, we want to + // know the number of times the backedge was taken, vs. the number of times + // we exited the loop. + // The branch weights give us almost what we want, since they were adjusted + // from the raw counts to provide a better probability estimate. Remove + // the adjustment by subtracting 1 from both weights. + uint64_t TrueVal, FalseVal; + if (!LatchBR->extractProfMetadata(TrueVal, FalseVal) || (TrueVal <= 1) || + (FalseVal <= 1)) + return 0; + + TrueVal -= 1; + FalseVal -= 1; + + // Divide the count of the backedge by the count of the edge exiting the loop. + if (LatchBR->getSuccessor(0) == L->getHeader()) + return TrueVal / FalseVal; + else + return FalseVal / TrueVal; +} Index: test/Transforms/LoopUnroll/peel-loop-pgo.ll =================================================================== --- test/Transforms/LoopUnroll/peel-loop-pgo.ll +++ test/Transforms/LoopUnroll/peel-loop-pgo.ll @@ -0,0 +1,45 @@ +; RUN: opt < %s -S -debug-only=loop-unroll -loop-unroll -unroll-allow-peeling 2>&1 | FileCheck %s +; REQUIRES: asserts + +; CHECK: PEELING loop %for.body with iteration count 3! +; CHECK-LABEL: @basic +; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !1 +; CHECK: [[NEXT0]]: +; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !2 +; CHECK: [[NEXT1]]: +; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !3 +; CHECK: [[NEXT2]]: +; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !4 + +define void @basic(i32* %p, i32 %k) #0 !prof !0 { +entry: + %cmp3 = icmp slt i32 0, %k + br i1 %cmp3, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.lr.ph, %for.body + %i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] + %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1 + store i32 %i.05, i32* %p.addr.04, align 4 + %inc = add nsw i32 %i.05, 1 + %cmp = icmp slt i32 %inc, %k + br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge, !prof !1 + +for.cond.for.end_crit_edge: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.cond.for.end_crit_edge, %entry + ret void +} + +!0 = !{!"function_entry_count", i64 1} +!1 = !{!"branch_weights", i32 3001, i32 1001} + +;CHECK: !1 = !{!"branch_weights", i32 900, i32 101} +;CHECK: !2 = !{!"branch_weights", i32 540, i32 360} +;CHECK: !3 = !{!"branch_weights", i32 162, i32 378} +;CHECK: !4 = !{!"branch_weights", i32 560, i32 162} + Index: test/Transforms/LoopUnroll/peel-loop.ll =================================================================== --- test/Transforms/LoopUnroll/peel-loop.ll +++ test/Transforms/LoopUnroll/peel-loop.ll @@ -0,0 +1,91 @@ +; RUN: opt < %s -S -loop-unroll -unroll-force-peel-count=3 -simplifycfg -instcombine | FileCheck %s + +; CHECK-LABEL: @basic +; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0 +; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end +; CHECK: [[NEXT0]]: +; CHECK: store i32 0, i32* %p, align 4 +; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1 +; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:.*]] +; CHECK: [[NEXT1]]: +; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1 +; CHECK: store i32 1, i32* %[[INC1]], align 4 +; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2 +; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end +; CHECK: [[NEXT2]]: +; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2 +; CHECK: store i32 2, i32* %[[INC2]], align 4 +; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3 +; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK: %[[IV:.*]] = phi i32 [ {{.*}}, %[[LOOP]] ], [ 3, %[[NEXT2]] ] + +define void @basic(i32* %p, i32 %k) #0 { +entry: + %cmp3 = icmp slt i32 0, %k + br i1 %cmp3, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.lr.ph, %for.body + %i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] + %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1 + store i32 %i.05, i32* %p.addr.04, align 4 + %inc = add nsw i32 %i.05, 1 + %cmp = icmp slt i32 %inc, %k + br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge + +for.cond.for.end_crit_edge: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.cond.for.end_crit_edge, %entry + ret void +} + +; CHECK-LABEL: @output +; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0 +; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end +; CHECK: [[NEXT0]]: +; CHECK: store i32 0, i32* %p, align 4 +; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1 +; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:.*]] +; CHECK: [[NEXT1]]: +; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1 +; CHECK: store i32 1, i32* %[[INC1]], align 4 +; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2 +; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end +; CHECK: [[NEXT2]]: +; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2 +; CHECK: store i32 2, i32* %[[INC2]], align 4 +; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3 +; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK: %[[IV:.*]] = phi i32 [ %[[IV:.*]], %[[LOOP]] ], [ 3, %[[NEXT2]] ] +; CHECK: %ret = phi i32 [ 0, %entry ], [ 1, %[[NEXT0]] ], [ 2, %[[NEXT1]] ], [ 3, %[[NEXT2]] ], [ %[[IV]], %[[LOOP]] ] +; CHECK: ret i32 %ret +define i32 @output(i32* %p, i32 %k) #0 { +entry: + %cmp3 = icmp slt i32 0, %k + br i1 %cmp3, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.lr.ph, %for.body + %i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] + %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1 + store i32 %i.05, i32* %p.addr.04, align 4 + %inc = add nsw i32 %i.05, 1 + %cmp = icmp slt i32 %inc, %k + br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge + +for.cond.for.end_crit_edge: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.cond.for.end_crit_edge, %entry + %ret = phi i32 [ 0, %entry], [ %inc, %for.cond.for.end_crit_edge ] + ret i32 %ret +}