Index: include/llvm/Analysis/ScalarEvolutionExpressions.h =================================================================== --- include/llvm/Analysis/ScalarEvolutionExpressions.h +++ include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -350,9 +350,7 @@ /// Return an expression representing the value of this expression /// one iteration of the loop ahead. - const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const { - return cast(SE.getAddExpr(this, getStepRecurrence(SE))); - } + const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const; /// Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const SCEV *S) { Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -10215,6 +10215,30 @@ return SE.getCouldNotCompute(); } +const SCEVAddRecExpr * +SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { + assert(getNumOperands() > 1 && "AddRec with zero step?"); + const SCEV *Step = getStepRecurrence(SE); + // There is a temptation to just call SE.getAddExpr(this, Step), but in this + // case we cannot guarantee that the value returned will be an AddRec. So we + // make it explicitly. Given that the last op of this is non zero, it will + // also be non-zero if we add this and Step one by one operand. This does give + // us a guarantee of result being SCEVAddRec. + SmallVector Ops; + if (const SCEVAddRecExpr *StepAR = dyn_cast(Step)) { + assert(StepAR->getNumOperands() < this->getNumOperands() && + "Step of an AddRec is not less complex than the AddRec?"); + for (unsigned i = 0, e = StepAR->getNumOperands(); i < e; ++i) + Ops.push_back(SE.getAddExpr(getOperand(i), StepAR->getOperand(i))); + Ops.insert(Ops.end(), op_begin() + StepAR->getNumOperands(), op_end()); + } else { + Ops.push_back(SE.getAddExpr(getOperand(0), Step)); + Ops.insert(Ops.end(), op_begin() + 1, op_end()); + } + return cast(SE.getAddRecExpr(Ops, getLoop(), + SCEV::FlagAnyWrap)); +} + // Return true when S contains at least an undef value. static inline bool containsUndefs(const SCEV *S) { return SCEVExprContains(S, [](const SCEV *S) { Index: test/Analysis/ScalarEvolution/pr35890.ll =================================================================== --- /dev/null +++ test/Analysis/ScalarEvolution/pr35890.ll @@ -0,0 +1,44 @@ +; RUN: opt < %s -scalar-evolution-max-arith-depth=0 -indvars -S | FileCheck %s + +target datalayout = "e-m:e-i32:64-f80:128-n8:16:32:64-S128-ni:1" +target triple = "x86_64-unknown-linux-gnu" + +; Check that it does not crash because SCEVAddRec's step is not an AddRec. + +define void @pr35890(i32* %inc_ptr, i32 %a) { + +; CHECK-LABEL: @pr35890( + +entry: + %inc = load i32, i32* %inc_ptr, !range !0 + %ne.cond = icmp ne i32 %inc, 0 + br i1 %ne.cond, label %loop, label %bail + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %backedge ] + %a.1 = add i32 %a, 1 + %iv.next = add i32 %iv, %a.1 + %iv.wide = zext i32 %iv to i64 + %iv.square = mul i64 %iv.wide, %iv.wide + %iv.cube = mul i64 %iv.square, %iv.wide + %brcond = icmp slt i64 %iv.wide, %iv.cube + br i1 %brcond, label %if.true, label %if.false + +if.true: + br label %backedge + +if.false: + br label %backedge + +backedge: + %loopcond = icmp slt i32 %iv, 200 + br i1 %loopcond, label %loop, label %exit + +exit: + ret void + +bail: + ret void +} + +!0 = !{i32 0, i32 100}