Index: include/llvm/Analysis/ScalarEvolutionExpressions.h =================================================================== --- include/llvm/Analysis/ScalarEvolutionExpressions.h +++ include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -350,9 +350,7 @@ /// Return an expression representing the value of this expression /// one iteration of the loop ahead. - const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const { - return cast(SE.getAddExpr(this, getStepRecurrence(SE))); - } + const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const; /// Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const SCEV *S) { Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -10215,6 +10215,29 @@ return SE.getCouldNotCompute(); } +const SCEVAddRecExpr * +SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { + assert(getNumOperands() > 1 && "AddRec with zero step?"); + // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), + // but in this case we cannot guarantee that the value returned will be an + // AddRec because SCEV does not have a fixed point where it stops + // simplification: it is legal to return ({rec1} + {rec2}). For example, it + // may happen if we reach arithmetic depth limit while simplifying. So we + // construct the returned value explicitly. + SmallVector Ops; + // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and + // (this + Step) is {A+B,+,B+C,+...,+,N}. + for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) + Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); + // We know that the last operand is not a constant zero (otherwise it would + // have been popped out earlier). This guarantees us that if the result has + // the same last operand, then it will also not be popped out, meaning that + // the returned value will be an AddRec. + Ops.push_back(getOperand(getNumOperands() - 1)); + return cast(SE.getAddRecExpr(Ops, getLoop(), + SCEV::FlagAnyWrap)); +} + // Return true when S contains at least an undef value. static inline bool containsUndefs(const SCEV *S) { return SCEVExprContains(S, [](const SCEV *S) { Index: test/Analysis/ScalarEvolution/pr35890.ll =================================================================== --- /dev/null +++ test/Analysis/ScalarEvolution/pr35890.ll @@ -0,0 +1,44 @@ +; RUN: opt < %s -scalar-evolution-max-arith-depth=0 -indvars -S | FileCheck %s + +target datalayout = "e-m:e-i32:64-f80:128-n8:16:32:64-S128-ni:1" +target triple = "x86_64-unknown-linux-gnu" + +; Check that it does not crash because SCEVAddRec's step is not an AddRec. + +define void @pr35890(i32* %inc_ptr, i32 %a) { + +; CHECK-LABEL: @pr35890( + +entry: + %inc = load i32, i32* %inc_ptr, !range !0 + %ne.cond = icmp ne i32 %inc, 0 + br i1 %ne.cond, label %loop, label %bail + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ] + %a.1 = add i32 %a, 1 + %iv.next = add i32 %iv, %a.1 + %iv.wide = zext i32 %iv to i64 + %iv.square = mul i64 %iv.wide, %iv.wide + %iv.cube = mul i64 %iv.square, %iv.wide + %brcond = icmp slt i64 %iv.wide, %iv.cube + br i1 %brcond, label %if.true, label %if.false + +if.true: + br label %backedge + +if.false: + br label %backedge + +backedge: + %loopcond = icmp slt i32 %iv, 200 + br i1 %loopcond, label %loop, label %exit + +exit: + ret void + +bail: + ret void +} + +!0 = !{i32 0, i32 100}