diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -9911,42 +9911,7 @@ if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) return true; - for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; - DTN != HeaderDTN; DTN = DTN->getIDom()) { - assert(DTN && "should reach the loop header before reaching the root!"); - - BasicBlock *BB = DTN->getBlock(); - if (isImpliedViaGuard(BB, Pred, LHS, RHS)) - return true; - - BasicBlock *PBB = BB->getSinglePredecessor(); - if (!PBB) - continue; - - BranchInst *ContinuePredicate = dyn_cast(PBB->getTerminator()); - if (!ContinuePredicate || !ContinuePredicate->isConditional()) - continue; - - Value *Condition = ContinuePredicate->getCondition(); - - // If we have an edge `E` within the loop body that dominates the only - // latch, the condition guarding `E` also guards the backedge. This - // reasoning works only for loops with a single latch. - - BasicBlockEdge DominatingEdge(PBB, BB); - if (DominatingEdge.isSingleEdge()) { - // We're constructively (and conservatively) enumerating edges within the - // loop body that dominate the latch. The dominator tree better agree - // with us on this: - assert(DT.dominates(DominatingEdge, Latch) && "should be!"); - - if (isImpliedCond(Pred, LHS, RHS, Condition, - BB != ContinuePredicate->getSuccessor(0))) - return true; - } - } - - return false; + return isKnownPredicateAt(Pred, LHS, RHS, Latch->getTerminator()); } bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, diff --git a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll --- a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll @@ -69,7 +69,7 @@ ; CHECK-NEXT: [[UGLYGEP2:%.*]] = bitcast i8* [[UGLYGEP]] to i16* ; CHECK-NEXT: [[TMP29:%.*]] = load i16, i16* [[LSR_IV810]], align 2 ; CHECK-NEXT: store i16 [[TMP29]], i16* [[UGLYGEP2]], align 2 -; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], 2 +; CHECK-NEXT: [[LSR_IV_NEXT]] = add nuw nsw i64 [[LSR_IV]], 2 ; CHECK-NEXT: [[LSR_IV_NEXT3:%.*]] = inttoptr i64 [[LSR_IV_NEXT]] to i16* ; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr [33 x i16], [33 x i16]* [[LSR_IV8]], i64 0, i64 1 ; CHECK-NEXT: [[TMP3]] = bitcast i16* [[SCEVGEP9]] to [33 x i16]*