Index: lib/Transforms/Scalar/LoopPredication.cpp =================================================================== --- lib/Transforms/Scalar/LoopPredication.cpp +++ lib/Transforms/Scalar/LoopPredication.cpp @@ -178,6 +178,7 @@ #include "llvm/Transforms/Scalar/LoopPredication.h" #include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/BranchProbabilityInfo.h" #include "llvm/Analysis/GuardUtils.h" #include "llvm/Analysis/LoopInfo.h" @@ -246,6 +247,7 @@ } }; + AliasAnalysis *AA; ScalarEvolution *SE; BranchProbabilityInfo *BPI; @@ -264,7 +266,7 @@ Optional parseLoopLatchICmp(); - bool CanExpand(const SCEV* S); + bool CanExpand(const SCEV* S, const Instruction *At); Value *expandCheck(SCEVExpander &Expander, IRBuilder<> &Builder, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS); @@ -307,8 +309,9 @@ Optional generateLoopLatchCheck(Type *RangeCheckType); public: - LoopPredication(ScalarEvolution *SE, BranchProbabilityInfo *BPI) - : SE(SE), BPI(BPI){}; + LoopPredication(AliasAnalysis *AA, ScalarEvolution *SE, + BranchProbabilityInfo *BPI) + : AA(AA), SE(SE), BPI(BPI){}; bool runOnLoop(Loop *L); }; @@ -329,8 +332,9 @@ return false; auto *SE = &getAnalysis().getSE(); BranchProbabilityInfo &BPI = - getAnalysis().getBPI(); - LoopPredication LP(SE, &BPI); + getAnalysis().getBPI(); + auto *AA = &getAnalysis().getAAResults(); + LoopPredication LP(AA, SE, &BPI); return LP.runOnLoop(L); } }; @@ -356,7 +360,7 @@ AM.getResult(L, AR).getManager(); Function *F = L.getHeader()->getParent(); auto *BPI = FAM.getCachedResult(*F); - LoopPredication LP(&AR.SE, BPI); + LoopPredication LP(&AR.AA, &AR.SE, BPI); if (!LP.runOnLoop(&L)) return PreservedAnalyses::all(); @@ -394,15 +398,22 @@ Type *Ty = LHS->getType(); assert(Ty == RHS->getType() && "expandCheck operands have different types?"); - if (SE->isLoopEntryGuardedByCond(L, Pred, LHS, RHS)) - return Builder.getTrue(); - if (SE->isLoopEntryGuardedByCond(L, ICmpInst::getInversePredicate(Pred), - LHS, RHS)) - return Builder.getFalse(); + const bool OpsAreLoopInvariant = + SE->isLoopInvariant(LHS, L) && SE->isLoopInvariant(RHS, L); + if (OpsAreLoopInvariant) { + if (SE->isLoopEntryGuardedByCond(L, Pred, LHS, RHS)) + return Builder.getTrue(); + if (SE->isLoopEntryGuardedByCond(L, ICmpInst::getInversePredicate(Pred), + LHS, RHS)) + return Builder.getFalse(); + } - Instruction *InsertAt = &*Builder.GetInsertPoint(); + Instruction * const InsertAt = OpsAreLoopInvariant ? + Preheader->getTerminator() : &*Builder.GetInsertPoint(); Value *LHSV = Expander.expandCodeFor(LHS, Ty, InsertAt); Value *RHSV = Expander.expandCodeFor(RHS, Ty, InsertAt); + IRBuilder<>::InsertPointGuard G(Builder); + Builder.SetInsertPoint(InsertAt); return Builder.CreateICmp(Pred, LHSV, RHSV); } @@ -438,8 +449,36 @@ return Step->isOne() || (Step->isAllOnesValue() && EnableCountDownLoop); } -bool LoopPredication::CanExpand(const SCEV* S) { - return SE->isLoopInvariant(S, L) && isSafeToExpand(S, *SE); +bool LoopPredication::CanExpand(const SCEV* S, const Instruction *At) { + if (SE->isLoopInvariant(S, L) && isSafeToExpand(S, *SE)) + return true; + + // Handling expressions which produce invariant results, but *haven't* yet + // been removed from the loop serves two important purposes. + // 1) Most importantly, it resolves a pass ordering cycle which would + // otherwise need us to iteration licm, loop-predication, and either + // loop-unswitch or loop-peeling to make progress on examples with lots of + // predicable range checks in a row. (Since, in the general case, we can't + // hoist the length checks until the dominating checks have been discharged + // as we can't prove doing so is safe.) + // 2) As a nice side effect, this exposes the value of peeling or unswitching + // much more obviously in the IR. Otherwise, the cost modeling for other + // transforms would end up needing to duplicate all of this logic to model a + // check which becomes predictable based on a modeled peel or unswitch. + // + // The cost of doing so in the worst case is an extra load in the loop to + // materialize the loop invariant test value instead of checking against the + // original IV which is presumable in a register inside the loop. Such cases + // are presumably rare, and hint at missing oppurtunities for other passes. + if (const SCEVUnknown *U = dyn_cast(S)) + if (const auto *LI = dyn_cast(U->getValue())) { + if (!LI->isUnordered()) + return false; + if (AA->pointsToConstantMemory(LI->getOperand(0)) || + LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr) + return isSafeToExpandAt(S, At, *SE); + } + return false; } Optional LoopPredication::widenICmpRangeCheckIncrementingLoop( @@ -461,8 +500,10 @@ const SCEV *RHS = SE->getAddExpr(SE->getMinusSCEV(GuardLimit, GuardStart), SE->getMinusSCEV(LatchStart, SE->getOne(Ty))); - if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) || - !CanExpand(LatchLimit) || !CanExpand(RHS)) { + Instruction *InsertAt = &*Builder.GetInsertPoint(); + Instruction *PHTerm = Preheader->getTerminator(); + if (!CanExpand(GuardStart, PHTerm) || !CanExpand(GuardLimit, InsertAt) || + !CanExpand(LatchLimit, PHTerm) || !CanExpand(RHS, InsertAt)) { LLVM_DEBUG(dbgs() << "Can't expand limit check!\n"); return None; } @@ -477,6 +518,10 @@ expandCheck(Expander, Builder, LimitCheckPred, LatchLimit, RHS); auto *FirstIterationCheck = expandCheck(Expander, Builder, RangeCheck.Pred, GuardStart, GuardLimit); + if (L->isLoopInvariant(LimitCheck) && L->isLoopInvariant(FirstIterationCheck)) + InsertAt = Preheader->getTerminator(); + IRBuilder<>::InsertPointGuard G(Builder); + Builder.SetInsertPoint(InsertAt); return Builder.CreateAnd(FirstIterationCheck, LimitCheck); } @@ -487,8 +532,9 @@ const SCEV *GuardStart = RangeCheck.IV->getStart(); const SCEV *GuardLimit = RangeCheck.Limit; const SCEV *LatchLimit = LatchCheck.Limit; - if (!CanExpand(GuardStart) || !CanExpand(GuardLimit) || - !CanExpand(LatchLimit)) { + Instruction *InsertAt = &*Builder.GetInsertPoint(); + if (!CanExpand(GuardStart, InsertAt) || !CanExpand(GuardLimit, InsertAt) || + !CanExpand(LatchLimit, InsertAt)) { LLVM_DEBUG(dbgs() << "Can't expand limit check!\n"); return None; } @@ -512,6 +558,10 @@ GuardStart, GuardLimit); auto *LimitCheck = expandCheck(Expander, Builder, LimitCheckPred, LatchLimit, SE->getOne(Ty)); + if (L->isLoopInvariant(LimitCheck) && L->isLoopInvariant(FirstIterationCheck)) + InsertAt = Preheader->getTerminator(); + IRBuilder<>::InsertPointGuard G(Builder); + Builder.SetInsertPoint(InsertAt); return Builder.CreateAnd(FirstIterationCheck, LimitCheck); } @@ -586,6 +636,7 @@ Value *Condition, SCEVExpander &Expander, IRBuilder<> &Builder) { + IRBuilder<>::InsertPointGuard G(Builder); unsigned NumWidened = 0; // The guard condition is expected to be in form of: // cond1 && cond2 && cond3 ... @@ -616,6 +667,8 @@ } if (ICmpInst *ICI = dyn_cast(Condition)) { + IRBuilder<>::InsertPointGuard G(Builder); + Builder.SetInsertPoint(ICI); if (auto NewRangeCheck = widenICmpRangeCheck(ICI, Expander, Builder)) { Checks.push_back(NewRangeCheck.getValue()); Index: test/Transforms/LoopPredication/invariant_load.ll =================================================================== --- test/Transforms/LoopPredication/invariant_load.ll +++ test/Transforms/LoopPredication/invariant_load.ll @@ -78,8 +78,10 @@ ; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ] ; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH:%.*]], align 4, !invariant.load !0 -; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]] -; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ] +; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i32 [[N]], [[LEN]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 0, [[LEN]] +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[TMP0]] +; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ] ; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64 ; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]] ; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4 @@ -139,8 +141,10 @@ ; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ] ; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* @Length, align 4 -; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]] -; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ] +; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i32 [[N]], [[LEN]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 0, [[LEN]] +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[TMP0]] +; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 9) [ "deopt"() ] ; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64 ; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]] ; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4