Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -167,6 +167,7 @@ void initializeJumpThreadingPass(PassRegistry&); void initializeLCSSAWrapperPassPass(PassRegistry &); void initializeLegacyLICMPassPass(PassRegistry&); +void initializeLegacyLoopSinkPassPass(PassRegistry&); void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&); void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&); void initializeLazyValueInfoWrapperPassPass(PassRegistry&); Index: include/llvm/LinkAllPasses.h =================================================================== --- include/llvm/LinkAllPasses.h +++ include/llvm/LinkAllPasses.h @@ -111,6 +111,7 @@ (void) llvm::createInternalizePass(); (void) llvm::createLCSSAPass(); (void) llvm::createLICMPass(); + (void) llvm::createLoopSinkPass(); (void) llvm::createLazyValueInfoPass(); (void) llvm::createLoopExtractorPass(); (void) llvm::createLoopInterchangePass(); Index: include/llvm/Transforms/Scalar.h =================================================================== --- include/llvm/Transforms/Scalar.h +++ include/llvm/Transforms/Scalar.h @@ -140,6 +140,13 @@ //===----------------------------------------------------------------------===// // +// LoopSink - This pass sinks invariants from preheader to loop body where +// frequency is lower than loop preheader. +// +Pass *createLoopSinkPass(); + +//===----------------------------------------------------------------------===// +// // LoopInterchange - This pass interchanges loops to provide a more // cache-friendly memory access patterns. // Index: include/llvm/Transforms/Utils/LoopUtils.h =================================================================== --- include/llvm/Transforms/Utils/LoopUtils.h +++ include/llvm/Transforms/Utils/LoopUtils.h @@ -467,6 +467,14 @@ /// All loop passes should call this as part of implementing their \c /// getAnalysisUsage. void getLoopAnalysisUsage(AnalysisUsage &AU); + +/// Returns true if the hoister and sinker can handle this instruction. +/// If SafetyInfo is not nullptr, check if the instruction can execute +/// speculatively. +/// +bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, + Loop *CurLoop, AliasSetTracker *CurAST, + LoopSafetyInfo *SafetyInfo); } #endif Index: lib/Transforms/Scalar/CMakeLists.txt =================================================================== --- lib/Transforms/Scalar/CMakeLists.txt +++ lib/Transforms/Scalar/CMakeLists.txt @@ -17,6 +17,7 @@ IndVarSimplify.cpp JumpThreading.cpp LICM.cpp + LoopSink.cpp LoadCombine.cpp LoopDeletion.cpp LoopDataPrefetch.cpp Index: lib/Transforms/Scalar/LICM.cpp =================================================================== --- lib/Transforms/Scalar/LICM.cpp +++ lib/Transforms/Scalar/LICM.cpp @@ -100,10 +100,6 @@ CloneInstructionInExitBlock(Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, const LoopSafetyInfo *SafetyInfo); -static bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, - DominatorTree *DT, TargetLibraryInfo *TLI, - Loop *CurLoop, AliasSetTracker *CurAST, - LoopSafetyInfo *SafetyInfo); namespace { struct LoopInvariantCodeMotion { @@ -337,7 +333,7 @@ // operands of the instruction are loop invariant. // if (isNotUsedInLoop(I, CurLoop, SafetyInfo) && - canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo)) { + canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo)) { ++II; Changed |= sink(I, LI, DT, CurLoop, CurAST, SafetyInfo); } @@ -390,7 +386,7 @@ // is safe to hoist the instruction. // if (CurLoop->hasLoopInvariantOperands(&I) && - canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo) && + canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo) && isSafeToExecuteUnconditionally( I, DT, CurLoop, SafetyInfo, CurLoop->getLoopPreheader()->getTerminator())) @@ -436,12 +432,9 @@ SafetyInfo->BlockColors = colorEHFunclets(*Fn); } -/// canSinkOrHoistInst - Return true if the hoister and sinker can handle this -/// instruction. -/// -bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, DominatorTree *DT, - TargetLibraryInfo *TLI, Loop *CurLoop, - AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo) { +bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, + Loop *CurLoop, AliasSetTracker *CurAST, + LoopSafetyInfo *SafetyInfo) { // Loads have extra constraints we have to verify before we can hoist them. if (LoadInst *LI = dyn_cast(&I)) { if (!LI->isUnordered()) @@ -515,6 +508,9 @@ !isa(I)) return false; + if (!SafetyInfo) + return true; + // TODO: Plumb the context instruction through to make hoisting and sinking // more powerful. Hoisting of loads already works due to the special casing // above. Index: lib/Transforms/Scalar/LoopSink.cpp =================================================================== --- /dev/null +++ lib/Transforms/Scalar/LoopSink.cpp @@ -0,0 +1,252 @@ +//===-- LoopSink.cpp - Loop Sink Pass ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass traverses all instructions in loop preheader and sink it to the +// loop body where frequency is lower than the loop's preheader. +// This pass is a reverse-transformation of LICM. It differs from the Sink +// pass that it only processes instructions in loop's preheader, and has more +// accurate alias/profile info to guide sinking decisions. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/AliasSetTracker.h" +#include "llvm/Analysis/BasicAliasAnalysis.h" +#include "llvm/Analysis/BlockFrequencyInfo.h" +#include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/LoopPass.h" +#include "llvm/Analysis/LoopPassManager.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Metadata.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/LoopUtils.h" +using namespace llvm; + +#define DEBUG_TYPE "loopsink" + +STATISTIC(NumLoopSunk, "Number of instructions sunk into loop"); + +static cl::opt SinkFrequencyPercentThreshold( + "sink-freq-percent-threshold", cl::Hidden, cl::init(90), + cl::desc("Do not sink instructions that require cloning unless they " + "execute less than this percent of the time.")); + +static bool SinkLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, + DominatorTree *DT, BlockFrequencyInfo *BFI, + ScalarEvolution *SE); + +namespace { +struct LegacyLoopSinkPass : public LoopPass { + static char ID; + LegacyLoopSinkPass() : LoopPass(ID) { + initializeLegacyLoopSinkPassPass(*PassRegistry::getPassRegistry()); + } + + bool runOnLoop(Loop *L, LPPassManager &LPM) override { + if (skipLoop(L)) + return false; + + auto *SE = getAnalysisIfAvailable(); + return SinkLoop(L, &getAnalysis().getAAResults(), + &getAnalysis().getLoopInfo(), + &getAnalysis().getDomTree(), + &getAnalysis().getBFI(), + SE ? &SE->getSE() : nullptr); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addRequired(); + getLoopAnalysisUsage(AU); + } +}; +} + +char LegacyLoopSinkPass::ID = 0; +INITIALIZE_PASS_BEGIN(LegacyLoopSinkPass, "loop-sink", "Loop Sink", false, + false) +INITIALIZE_PASS_DEPENDENCY(LoopPass) +INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) +INITIALIZE_PASS_END(LegacyLoopSinkPass, "loop-sink", "Loop Sink", false, false) + +Pass *llvm::createLoopSinkPass() { return new LegacyLoopSinkPass(); } + +/// Return adjusted total frequency of \p BBs. +/// +/// * If there is only one BB, sinking instruction will not introduce code +/// size increase. Thus there is no need to adjust the frequency. +/// * If there are more than one BB, sinking would lead to code size increase. +/// In this case, we add some "tax" to the total frequency to make it harder +/// to sink. E.g. +/// Freq(Preheader) = 100 +/// Freq(BBs) = sum(50, 49) = 99 +/// Even if Freq(BBs) < Freq(Preheader), we will not sink from Preheade to +/// BBs as the difference is too small to justify the code size increase. +/// To model this, The adjusted Freq(BBs) will be: +/// AdjustedFreq(BBs) = 99 / SinkFrequencyPercentThreshold% +static BlockFrequency AdjustedSumFreq(SmallPtrSet &BBs, + BlockFrequencyInfo *BFI) { + BlockFrequency T = 0; + for (BasicBlock *B : BBs) + T += BFI->getBlockFreq(B); + if (BBs.size() > 1) + T /= BranchProbability(SinkFrequencyPercentThreshold, 100); + return T; +} + +/// Return a set of basic blocks to insert sinked instructions. +/// +/// The returned set of basic blocks (BBsToSinkInto) should satisfy: +/// +/// * Inside the loop \p L +/// * For each UseBB in \p UseBBs, there is at least one BB in BBsToSinkInto +/// that +/// domintate the UseBB +/// * Has minimum total frequency that is no greater than preheader frequency +/// +/// The purpose of the function is to find the optimal sinking points to +/// minimize execution cost, which is defined as "sum of frequency of +/// BBsToSinkInto". +/// As a result, the returned BBsToSinkInto needs to have minimum total +/// frequency. +/// Additionally, if the total frequency of BBsToSinkInto exceeds preheader +/// frequency, the optimal solution is not sinking (return empty set). +static SmallPtrSet +FindBBsToSinkInto(const Loop *L, const SmallPtrSet &UseBBs, + DominatorTree *DT, BlockFrequencyInfo *BFI) { + SmallPtrSet BBsToSinkInto; + if (UseBBs.size() == 0) + return BBsToSinkInto; + + // Sort loop's basic blocks by frequency + SmallVector SortedLoopBBs; + for (BasicBlock *B : L->blocks()) + if (BFI->getBlockFreq(B) <= BFI->getBlockFreq(L->getLoopPreheader())) + SortedLoopBBs.push_back(B); + std::stable_sort(SortedLoopBBs.begin(), SortedLoopBBs.end(), + [&](BasicBlock *A, BasicBlock *B) { + return BFI->getBlockFreq(A) < BFI->getBlockFreq(B); + }); + + BBsToSinkInto.insert(UseBBs.begin(), UseBBs.end()); + SmallPtrSet BBsDominatedByColdestBB; + + // For every iteration: + // * Pick the ColdestBB from SortedLoopBBs + // * Find the set BBsDominatedByColdestBB that satisfy: + // - BBsDominatedByColdestBB is a subset of BBsToSinkInto + // - Every BB in BBsDominatedByColdestBB is dominated by ColdestBB + // * If Freq(ColdestBB) < Freq(BBsDominatedByColdestBB), remove + // BBsDominatedByColdestBB from BBsToSinkInto, add ColdestBB to + // BBsToSinkInto + for (BasicBlock *ColdestBB : SortedLoopBBs) { + BBsDominatedByColdestBB.clear(); + for (BasicBlock *SinkedBB : BBsToSinkInto) + if (DT->dominates(ColdestBB, SinkedBB)) + BBsDominatedByColdestBB.insert(SinkedBB); + if (BBsDominatedByColdestBB.size() == 0) + continue; + if (AdjustedSumFreq(BBsDominatedByColdestBB, BFI) > + BFI->getBlockFreq(ColdestBB)) { + for (BasicBlock *DominatedBB : BBsDominatedByColdestBB) { + BBsToSinkInto.erase(DominatedBB); + } + BBsToSinkInto.insert(ColdestBB); + } + } + + // If the total frequency of BBsToSinkInto is larger than preheader frequency, + // do not sink. + if (AdjustedSumFreq(BBsToSinkInto, BFI) > + BFI->getBlockFreq(L->getLoopPreheader())) + BBsToSinkInto.clear(); + return BBsToSinkInto; +} + +/// SinkLoop - Sink instructions from loop's preheader to the loop body if the +/// sum frequency of inserted copy is smaller than preheader's frequency. +bool SinkLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, + BlockFrequencyInfo *BFI, ScalarEvolution *SE) { + BasicBlock *Preheader = L->getLoopPreheader(); + if (!Preheader) + return false; + + const BlockFrequency PreheaderFreq = BFI->getBlockFreq(Preheader); + if (!any_of(L->blocks(), [&](const BasicBlock *BB) { + return BFI->getBlockFreq(BB) <= PreheaderFreq; + })) + return false; + + bool Changed = false; + AliasSetTracker CurAST(*AA); + + // Compute alias set. + for (BasicBlock *BB : L->blocks()) + CurAST.add(*BB); + + // Traverse preheader's instructions in reverse order becaue if A depends + // on B (A appears after B), A needs to be sinked first before B can be + // sinked. + for (auto II = Preheader->rbegin(), E = Preheader->rend(); II != E;) { + Instruction *I = &*II++; + if (!L->hasLoopInvariantOperands(I) || + !canSinkOrHoistInst(*I, AA, DT, L, &CurAST, nullptr)) + continue; + + // All blocks that have uses of I and are in the sub loop of L. + SmallPtrSet BBs; + for (auto &U : I->uses()) { + Instruction *UI = cast(U.getUser()); + // If the use is phi node, we can not sink I to this BB. + if (dyn_cast(UI) || + !L->contains(LI->getLoopFor(UI->getParent()))) { + BBs.clear(); + break; + } + BBs.insert(UI->getParent()); + } + + // Find the set of BBs that we should insert a copy of I. + SmallPtrSet BBsToSinkInto = + FindBBsToSinkInto(L, BBs, DT, BFI); + if (BBsToSinkInto.size() == 0) + continue; + + auto BI = BBsToSinkInto.begin(); + DEBUG(dbgs() << "Sinking " << I << " To: " << (*BI)->getName() << '\n'); + NumLoopSunk++; + I->moveBefore(&*(*BI)->getFirstInsertionPt()); + + for (++BI; BI != BBsToSinkInto.end(); ++BI) { + BasicBlock *N = *BI; + // Clone I and replace its uses. + Instruction *IC = I->clone(); + IC->setName(I->getName()); + IC->insertBefore(&*N->getFirstInsertionPt()); + for (BasicBlock *PredBB : predecessors(IC->getParent())) + replaceDominatedUsesWith(I, IC, *DT, + BasicBlockEdge(PredBB, IC->getParent())); + DEBUG(dbgs() << "Sinking " << I << " To: " << N->getName() << '\n'); + NumLoopSunk++; + } + Changed = true; + } + + if (Changed && SE) + SE->forgetLoopDispositions(L); + return Changed; +} Index: lib/Transforms/Scalar/Scalar.cpp =================================================================== --- lib/Transforms/Scalar/Scalar.cpp +++ lib/Transforms/Scalar/Scalar.cpp @@ -51,6 +51,7 @@ initializeIndVarSimplifyLegacyPassPass(Registry); initializeJumpThreadingPass(Registry); initializeLegacyLICMPassPass(Registry); + initializeLegacyLoopSinkPassPass(Registry); initializeLoopDataPrefetchLegacyPassPass(Registry); initializeLoopDeletionLegacyPassPass(Registry); initializeLoopAccessLegacyAnalysisPass(Registry); @@ -141,6 +142,10 @@ unwrap(PM)->add(createJumpThreadingPass()); } +void LLVMAddLoopSinkPass(LLVMPassManagerRef PM) { + unwrap(PM)->add(createLoopSinkPass()); +} + void LLVMAddLICMPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLICMPass()); } Index: test/Transforms/LICM/loopsink.ll =================================================================== --- /dev/null +++ test/Transforms/LICM/loopsink.ll @@ -0,0 +1,303 @@ +; RUN: opt -S -loop-sink < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@g = global i32 0, align 4 + +; Function Attrs: norecurse nounwind readonly uwtable +; b1 +; / \ +; b2 b6 +; / \ | +; b3 b4 | +; \ / | +; b5 | +; \ / +; b7 +; preheader: 1000 +; b2: 15 +; b3: 7 +; b4: 7 +; Sink load to b2 +; CHECK: t1 +; CHECK: .b2: +; CHECK: load i32, i32* @g +; CHECK: .b3: +; CHECK-NOT: load i32, i32* @g +define i32 @t1(i32, i32) #0 { + %3 = icmp eq i32 %1, 0 + br i1 %3, label %.exit, label %.preheader + +.preheader: + %invariant = load i32, i32* @g + br label %.b1 + +.b1: + %iv = phi i32 [ %t7, %.b7 ], [ 0, %.preheader ] + %c1 = icmp sgt i32 %iv, %0 + br i1 %c1, label %.b2, label %.b6, !prof !1 + +.b2: + %c2 = icmp sgt i32 %iv, 1 + br i1 %c2, label %.b3, label %.b4 + +.b3: + %t3 = sub nsw i32 %invariant, %iv + br label %.b5 + +.b4: + %t4 = add nsw i32 %invariant, %iv + br label %.b5 + +.b5: + %p5 = phi i32 [ %t3, %.b3 ], [ %t4, %.b4 ] + %t5 = mul nsw i32 %p5, 5 + br label %.b7 + +.b6: + %t6 = add nsw i32 %iv, 100 + br label %.b7 + +.b7: + %p7 = phi i32 [ %t6, %.b6 ], [ %t5, %.b5 ] + %t7 = add nuw nsw i32 %iv, 1 + %c7 = icmp eq i32 %t7, %p7 + br i1 %c7, label %.b1, label %.exit, !prof !7 + +.exit: + ret i32 10 +} + +; Function Attrs: norecurse nounwind readonly uwtable +; b1 +; / \ +; b2 b6 +; / \ | +; b3 b4 | +; \ / | +; b5 | +; \ / +; b7 +; preheader: 500 +; b1: 16016 +; b3: 8 +; b6: 8 +; Sink load to b3 and b6 +; CHECK: t2 +; CHECK: .preheader: +; CHECK-NOT: load i32, i32* @g +; CHECK: .b3: +; CHECK: load i32, i32* @g +; CHECK: .b4: +; CHECK: .b6: +; CHECK: load i32, i32* @g +; CHECK: .b7: +define i32 @t2(i32, i32) #0 { + %3 = icmp eq i32 %1, 0 + br i1 %3, label %.exit, label %.preheader + +.preheader: + %invariant = load i32, i32* @g + br label %.b1 + +.b1: + %iv = phi i32 [ %t7, %.b7 ], [ 0, %.preheader ] + %c1 = icmp sgt i32 %iv, %0 + br i1 %c1, label %.b2, label %.b6, !prof !6 + +.b2: + %c2 = icmp sgt i32 %iv, 1 + br i1 %c2, label %.b3, label %.b4, !prof !1 + +.b3: + %t3 = sub nsw i32 %invariant, %iv + br label %.b5 + +.b4: + %t4 = add nsw i32 5, %iv + br label %.b5 + +.b5: + %p5 = phi i32 [ %t3, %.b3 ], [ %t4, %.b4 ] + %t5 = mul nsw i32 %p5, 5 + br label %.b7 + +.b6: + %t6 = add nsw i32 %iv, %invariant + br label %.b7 + +.b7: + %p7 = phi i32 [ %t6, %.b6 ], [ %t5, %.b5 ] + %t7 = add nuw nsw i32 %iv, 1 + %c7 = icmp eq i32 %t7, %p7 + br i1 %c7, label %.b1, label %.exit, !prof !7 + +.exit: + ret i32 10 +} + +; Function Attrs: norecurse nounwind readonly uwtable +; b1 +; / \ +; b2 b6 +; / \ | +; b3 b4 | +; \ / | +; b5 | +; \ / +; b7 +; preheader: 500 +; b3: 8 +; b5: 16008 +; Do not sink load from preheader. +; CHECK: t3 +; CHECK: .preheader: +; CHECK: load i32, i32* @g +; CHECK: .b1: +; CHECK-NOT: load i32, i32* @g +define i32 @t3(i32, i32) #0 { + %3 = icmp eq i32 %1, 0 + br i1 %3, label %.exit, label %.preheader + +.preheader: + %invariant = load i32, i32* @g + br label %.b1 + +.b1: + %iv = phi i32 [ %t7, %.b7 ], [ 0, %.preheader ] + %c1 = icmp sgt i32 %iv, %0 + br i1 %c1, label %.b2, label %.b6, !prof !6 + +.b2: + %c2 = icmp sgt i32 %iv, 1 + br i1 %c2, label %.b3, label %.b4, !prof !1 + +.b3: + %t3 = sub nsw i32 %invariant, %iv + br label %.b5 + +.b4: + %t4 = add nsw i32 5, %iv + br label %.b5 + +.b5: + %p5 = phi i32 [ %t3, %.b3 ], [ %t4, %.b4 ] + %t5 = mul nsw i32 %p5, %invariant + br label %.b7 + +.b6: + %t6 = add nsw i32 %iv, 5 + br label %.b7 + +.b7: + %p7 = phi i32 [ %t6, %.b6 ], [ %t5, %.b5 ] + %t7 = add nuw nsw i32 %iv, 1 + %c7 = icmp eq i32 %t7, %p7 + br i1 %c7, label %.b1, label %.exit, !prof !7 + +.exit: + ret i32 10 +} + +; Function Attrs: norecurse nounwind readonly uwtable +; For single-BB loop with <=1 avg trip count, sink load to b1 +; CHECK: t4 +; CHECK: .preheader: +; CHECK-not: load i32, i32* @g +; CHECK: .b1: +; CHECK: load i32, i32* @g +; CHECK: .exit: +define i32 @t4(i32, i32) #0 { +.preheader: + %invariant = load i32, i32* @g + br label %.b1 + +.b1: + %iv = phi i32 [ %t1, %.b1 ], [ 0, %.preheader ] + %t1 = add nsw i32 %invariant, %iv + %c1 = icmp sgt i32 %iv, %0 + br i1 %c1, label %.b1, label %.exit, !prof !1 + +.exit: + ret i32 10 +} + +; Function Attrs: norecurse nounwind readonly uwtable +; b1 +; / \ +; b2 b6 +; / \ | +; b3 b4 | +; \ / | +; b5 | +; \ / +; b7 +; preheader: 1000 +; b2: 15 +; b3: 7 +; b4: 7 +; There is alias store in loop, do not sink load +; CHECK: t5 +; CHECK: .preheader: +; CHECK: load i32, i32* @g +; CHECK: .b1: +; CHECK-NOT: load i32, i32* @g +define i32 @t5(i32, i32*) #0 { + %3 = icmp eq i32 %0, 0 + br i1 %3, label %.exit, label %.preheader + +.preheader: + %invariant = load i32, i32* @g + br label %.b1 + +.b1: + %iv = phi i32 [ %t7, %.b7 ], [ 0, %.preheader ] + %c1 = icmp sgt i32 %iv, %0 + br i1 %c1, label %.b2, label %.b6, !prof !1 + +.b2: + %c2 = icmp sgt i32 %iv, 1 + br i1 %c2, label %.b3, label %.b4 + +.b3: + %t3 = sub nsw i32 %invariant, %iv + br label %.b5 + +.b4: + %t4 = add nsw i32 %invariant, %iv + br label %.b5 + +.b5: + %p5 = phi i32 [ %t3, %.b3 ], [ %t4, %.b4 ] + %t5 = mul nsw i32 %p5, 5 + br label %.b7 + +.b6: + %t6 = call i32 @foo() + br label %.b7 + +.b7: + %p7 = phi i32 [ %t6, %.b6 ], [ %t5, %.b5 ] + %t7 = add nuw nsw i32 %iv, 1 + %c7 = icmp eq i32 %t7, %p7 + br i1 %c7, label %.b1, label %.exit, !prof !7 + +.exit: + ret i32 10 +} + +declare i32 @foo() + +attributes #0 = { norecurse nounwind readonly uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.ident = !{!0} + +!0 = !{!"clang version 3.9.0 (trunk 268689)"} +!1 = !{!"branch_weights", i32 1, i32 2000} +!2 = !{!3, !3, i64 0} +!3 = !{!"int", !4, i64 0} +!4 = !{!"omnipotent char", !5, i64 0} +!5 = !{!"Simple C++ TBAA"} +!6 = !{!"branch_weights", i32 2000, i32 1} +!7 = !{!"branch_weights", i32 100, i32 1} Index: test/Transforms/LICM/sink.ll =================================================================== --- /dev/null +++ test/Transforms/LICM/sink.ll @@ -0,0 +1,73 @@ +; RUN: opt -S -licm < %s | FileCheck %s --check-prefix=CHECK-LICM +; RUN: opt -S -licm < %s | opt -S -loop-sink | FileCheck %s --check-prefix=CHECK-SINK + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; Original source code: +; int g; +; int foo(int p, int x) { +; for (int i = 0; i != x; i++) +; if (__builtin_expect(i == p, 0)) { +; x += g; x *= g; +; } +; return x; +; } +; +; Load of global value g should not be hoisted to preheader. + +@g = global i32 0, align 4 + +; Function Attrs: norecurse nounwind readonly uwtable +define i32 @_Z3fooii(i32, i32) #0 { + %3 = icmp eq i32 %1, 0 + br i1 %3, label %._crit_edge, label %.lr.ph.preheader + +.lr.ph.preheader: ; preds = %2 + br label %.lr.ph + +; CHECK-LICM: .lr.ph.preheader: +; CHECK-LICM: load i32, i32* @g +; CHECK-LICM: br label %.lr.ph + +.lr.ph: ; preds = %.lr.ph.preheader, %9 + %.03 = phi i32 [ %8, %.combine ], [ 0, %.lr.ph.preheader ] + %.012 = phi i32 [ %.1, %.combine ], [ %1, %.lr.ph.preheader ] + %4 = icmp eq i32 %.03, %0 + br i1 %4, label %.then, label %.combine, !prof !1 + +.then: ; preds = %.lr.ph + %5 = load i32, i32* @g, align 4, !tbaa !2 + %6 = add nsw i32 %5, %.012 + %7 = mul nsw i32 %6, %5 + br label %.combine + +; CHECK-SINK: .then: +; CHECK-SINK: load i32, i32* @g +; CHECK-SINK: br label %.combine + +.combine: ; preds = %.lr.ph, %.then + %.1 = phi i32 [ %7, %.then ], [ %.012, %.lr.ph ] + %8 = add nuw nsw i32 %.03, 1 + %9 = icmp eq i32 %8, %.1 + br i1 %9, label %._crit_edge.loopexit, label %.lr.ph + +._crit_edge.loopexit: ; preds = %.combine + %.1.lcssa = phi i32 [ %.1, %.combine ] + br label %._crit_edge + +._crit_edge: ; preds = %._crit_edge.loopexit, %2 + %.01.lcssa = phi i32 [ 0, %2 ], [ %.1.lcssa, %._crit_edge.loopexit ] + ret i32 %.01.lcssa +} + +attributes #0 = { norecurse nounwind readonly uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!llvm.ident = !{!0} + +!0 = !{!"clang version 3.9.0 (trunk 268689)"} +!1 = !{!"branch_weights", i32 1, i32 2000} +!2 = !{!3, !3, i64 0} +!3 = !{!"int", !4, i64 0} +!4 = !{!"omnipotent char", !5, i64 0} +!5 = !{!"Simple C++ TBAA"}