Index: include/llvm/Analysis/ScalarEvolution.h =================================================================== --- include/llvm/Analysis/ScalarEvolution.h +++ include/llvm/Analysis/ScalarEvolution.h @@ -1214,26 +1214,31 @@ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, unsigned Depth = 0); const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS, - SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) { + SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, + unsigned Depth = 0) { SmallVector Ops = {LHS, RHS}; - return getAddExpr(Ops, Flags); + return getAddExpr(Ops, Flags, Depth); } const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2, - SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) { + SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, + unsigned Depth = 0) { SmallVector Ops = {Op0, Op1, Op2}; - return getAddExpr(Ops, Flags); + return getAddExpr(Ops, Flags, Depth); } const SCEV *getMulExpr(SmallVectorImpl &Ops, - SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap); + SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, + unsigned Depth = 0); const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS, - SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) { + SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, + unsigned Depth = 0) { SmallVector Ops = {LHS, RHS}; - return getMulExpr(Ops, Flags); + return getMulExpr(Ops, Flags, Depth); } const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2, - SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) { + SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, + unsigned Depth = 0) { SmallVector Ops = {Op0, Op1, Op2}; - return getMulExpr(Ops, Flags); + return getMulExpr(Ops, Flags, Depth); } const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS); const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS); @@ -1287,7 +1292,8 @@ /// Return LHS-RHS. Minus is represented in SCEV as A+B*-1. const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS, - SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap); + SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap, + unsigned Depth = 0); /// Return a SCEV corresponding to a conversion of the input value to the /// specified type. If the type must be extended, it is zero extended. @@ -1693,10 +1699,14 @@ bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned, bool NoWrap); - /// Get add expr already created or create a new one + /// Get add expr already created or create a new one. const SCEV *getOrCreateAddExpr(SmallVectorImpl &Ops, SCEV::NoWrapFlags Flags); + /// Get mul expr already created or create a new one. + const SCEV *getOrCreateMulExpr(SmallVectorImpl &Ops, + SCEV::NoWrapFlags Flags); + private: FoldingSet UniqueSCEVs; FoldingSet UniquePreds; Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -149,9 +149,9 @@ cl::init(2)); static cl::opt - MaxAddExprDepth("scalar-evolution-max-addexpr-depth", cl::Hidden, - cl::desc("Maximum depth of recursive AddExpr"), - cl::init(32)); + MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, + cl::desc("Maximum depth of recursive arithmetics"), + cl::init(32)); static cl::opt MaxConstantEvolvingDepth( "scalar-evolution-max-constant-evolving-depth", cl::Hidden, @@ -2276,8 +2276,8 @@ if (Ops.size() == 1) return Ops[0]; } - // Limit recursion calls depth - if (Depth > MaxAddExprDepth) + // Limit recursion calls depth. + if (Depth > MaxArithDepth) return getOrCreateAddExpr(Ops, Flags); // Okay, check to see if the same value occurs in the operand list more than @@ -2293,7 +2293,7 @@ ++Count; // Merge the values into a multiply. const SCEV *Scale = getConstant(Ty, Count); - const SCEV *Mul = getMulExpr(Scale, Ops[i]); + const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); if (Ops.size() == Count) return Mul; Ops[i] = Mul; @@ -2343,7 +2343,7 @@ } } if (Ok) - LargeOps.push_back(getMulExpr(LargeMulOps)); + LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); } else { Ok = false; break; @@ -2417,7 +2417,8 @@ if (MulOp.first != 0) Ops.push_back(getMulExpr( getConstant(MulOp.first), - getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1))); + getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), + SCEV::FlagAnyWrap, Depth + 1)); if (Ops.empty()) return getZero(Ty); if (Ops.size() == 1) @@ -2445,11 +2446,12 @@ SmallVector MulOps(Mul->op_begin(), Mul->op_begin()+MulOp); MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); - InnerMul = getMulExpr(MulOps); + InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); } SmallVector TwoOps = {getOne(Ty), InnerMul}; const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); - const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); + const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, + SCEV::FlagAnyWrap, Depth + 1); if (Ops.size() == 2) return OuterMul; if (AddOp < Idx) { Ops.erase(Ops.begin()+AddOp); @@ -2478,19 +2480,20 @@ SmallVector MulOps(Mul->op_begin(), Mul->op_begin()+MulOp); MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); - InnerMul1 = getMulExpr(MulOps); + InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); } const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { SmallVector MulOps(OtherMul->op_begin(), OtherMul->op_begin()+OMulOp); MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); - InnerMul2 = getMulExpr(MulOps); + InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); } SmallVector TwoOps = {InnerMul1, InnerMul2}; const SCEV *InnerMulSum = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); - const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); + const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, + SCEV::FlagAnyWrap, Depth + 1); if (Ops.size() == 2) return OuterMul; Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+OtherMulIdx-1); @@ -2621,6 +2624,27 @@ return S; } +const SCEV * +ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl &Ops, + SCEV::NoWrapFlags Flags) { +FoldingSetNodeID ID; + ID.AddInteger(scMulExpr); + for (unsigned i = 0, e = Ops.size(); i != e; ++i) + ID.AddPointer(Ops[i]); + void *IP = nullptr; + SCEVMulExpr *S = + static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); + if (!S) { + const SCEV **O = SCEVAllocator.Allocate(Ops.size()); + std::uninitialized_copy(Ops.begin(), Ops.end(), O); + S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), + O, Ops.size()); + UniqueSCEVs.InsertNode(S, IP); + } + S->setNoWrapFlags(Flags); + return S; +} + static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { uint64_t k = i*j; if (j > 1 && k / j != i) Overflow = true; @@ -2673,7 +2697,8 @@ /// Get a canonical multiply expression, or something simpler if possible. const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, - SCEV::NoWrapFlags Flags) { + SCEV::NoWrapFlags Flags, + unsigned Depth) { assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && "only nuw or nsw allowed"); assert(!Ops.empty() && "Cannot get empty mul!"); @@ -2690,6 +2715,10 @@ Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); + // Limit recursion calls depth. + if (Depth > MaxArithDepth) + return getOrCreateMulExpr(Ops, Flags); + // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { @@ -2701,8 +2730,11 @@ // apply this transformation as well. if (Add->getNumOperands() == 2) if (containsConstantSomewhere(Add)) - return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), - getMulExpr(LHSC, Add->getOperand(1))); + return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), + SCEV::FlagAnyWrap, Depth + 1), + getMulExpr(LHSC, Add->getOperand(1), + SCEV::FlagAnyWrap, Depth + 1), + SCEV::FlagAnyWrap, Depth + 1); ++Idx; while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { @@ -2730,17 +2762,19 @@ SmallVector NewOps; bool AnyFolded = false; for (const SCEV *AddOp : Add->operands()) { - const SCEV *Mul = getMulExpr(Ops[0], AddOp); + const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, + Depth + 1); if (!isa(Mul)) AnyFolded = true; NewOps.push_back(Mul); } if (AnyFolded) - return getAddExpr(NewOps); + return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); } else if (const auto *AddRec = dyn_cast(Ops[1])) { // Negation preserves a recurrence's no self-wrap property. SmallVector Operands; for (const SCEV *AddRecOp : AddRec->operands()) - Operands.push_back(getMulExpr(Ops[0], AddRecOp)); + Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, + Depth + 1)); return getAddRecExpr(Operands, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW)); @@ -2762,18 +2796,18 @@ while (const SCEVMulExpr *Mul = dyn_cast(Ops[Idx])) { if (Ops.size() > MulOpsInlineThreshold) break; - // If we have an mul, expand the mul operands onto the end of the operands - // list. + // If we have an mul, expand the mul operands onto the end of the + // operands list. Ops.erase(Ops.begin()+Idx); Ops.append(Mul->op_begin(), Mul->op_end()); DeletedMul = true; } - // If we deleted at least one mul, we added operands to the end of the list, - // and they are not necessarily sorted. Recurse to resort and resimplify - // any operands we just acquired. + // If we deleted at least one mul, we added operands to the end of the + // list, and they are not necessarily sorted. Recurse to resort and + // resimplify any operands we just acquired. if (DeletedMul) - return getMulExpr(Ops); + return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } // If there are any add recurrences in the operands list, see if any other @@ -2784,8 +2818,8 @@ // Scan over all recurrences, trying to fold loop invariants into them. for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { - // Scan all of the other operands to this mul and add them to the vector if - // they are loop invariant w.r.t. the recurrence. + // Scan all of the other operands to this mul and add them to the vector + // if they are loop invariant w.r.t. the recurrence. SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); const Loop *AddRecLoop = AddRec->getLoop(); @@ -2801,9 +2835,10 @@ // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} SmallVector NewOps; NewOps.reserve(AddRec->getNumOperands()); - const SCEV *Scale = getMulExpr(LIOps); + const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) - NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); + NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), + SCEV::FlagAnyWrap, Depth + 1)); // Build the new addrec. Propagate the NUW and NSW flags if both the // outer mul and the inner addrec are guaranteed to have no overflow. @@ -2822,12 +2857,12 @@ Ops[i] = NewRec; break; } - return getMulExpr(Ops); + return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); } - // Okay, if there weren't any loop invariants to be folded, check to see if - // there are multiple AddRec's with the same loop induction variable being - // multiplied together. If so, we can fold them. + // Okay, if there weren't any loop invariants to be folded, check to see + // if there are multiple AddRec's with the same loop induction variable + // being multiplied together. If so, we can fold them. // {A1,+,A2,+,...,+,An} * {B1,+,B2,+,...,+,Bn} // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ @@ -2869,7 +2904,9 @@ const SCEV *CoeffTerm = getConstant(Ty, Coeff); const SCEV *Term1 = AddRec->getOperand(y-z); const SCEV *Term2 = OtherAddRec->getOperand(z); - Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2)); + Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, + SCEV::FlagAnyWrap, Depth + 1), + SCEV::FlagAnyWrap, Depth + 1); } } AddRecOps.push_back(Term); @@ -2887,7 +2924,7 @@ } } if (OpsModified) - return getMulExpr(Ops); + return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); // Otherwise couldn't fold anything into this recurrence. Move onto the // next one. @@ -2895,22 +2932,7 @@ // Okay, it looks like we really DO need an mul expr. Check to see if we // already have one, otherwise create a new one. - FoldingSetNodeID ID; - ID.AddInteger(scMulExpr); - for (unsigned i = 0, e = Ops.size(); i != e; ++i) - ID.AddPointer(Ops[i]); - void *IP = nullptr; - SCEVMulExpr *S = - static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); - if (!S) { - const SCEV **O = SCEVAllocator.Allocate(Ops.size()); - std::uninitialized_copy(Ops.begin(), Ops.end(), O); - S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), - O, Ops.size()); - UniqueSCEVs.InsertNode(S, IP); - } - S->setNoWrapFlags(Flags); - return S; + return getOrCreateMulExpr(Ops, Flags); } /// Get a canonical unsigned division expression, or something simpler if @@ -3713,7 +3735,8 @@ } const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, - SCEV::NoWrapFlags Flags) { + SCEV::NoWrapFlags Flags, + unsigned Depth) { // Fast path: X - X --> 0. if (LHS == RHS) return getZero(LHS->getType()); @@ -3747,7 +3770,7 @@ // larger scope than intended. auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; - return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags); + return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); } const SCEV * Index: lib/Transforms/Utils/SimplifyIndVar.cpp =================================================================== --- lib/Transforms/Utils/SimplifyIndVar.cpp +++ lib/Transforms/Utils/SimplifyIndVar.cpp @@ -352,7 +352,7 @@ return false; typedef const SCEV *(ScalarEvolution::*OperationFunctionTy)( - const SCEV *, const SCEV *, SCEV::NoWrapFlags); + const SCEV *, const SCEV *, SCEV::NoWrapFlags, unsigned); typedef const SCEV *(ScalarEvolution::*ExtensionFunctionTy)( const SCEV *, Type *); @@ -406,10 +406,11 @@ IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); const SCEV *A = - (SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap), WideTy); + (SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0u), + WideTy); const SCEV *B = (SE->*Operation)((SE->*Extension)(LHS, WideTy), - (SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap); + (SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap, 0u); if (A != B) return false; @@ -530,8 +531,7 @@ return false; const SCEV *(ScalarEvolution::*GetExprForBO)(const SCEV *, const SCEV *, - SCEV::NoWrapFlags); - + SCEV::NoWrapFlags, unsigned); switch (BO->getOpcode()) { default: return false; @@ -560,7 +560,7 @@ const SCEV *ExtendAfterOp = SE->getZeroExtendExpr(SE->getSCEV(BO), WideTy); const SCEV *OpAfterExtend = (SE->*GetExprForBO)( SE->getZeroExtendExpr(LHS, WideTy), SE->getZeroExtendExpr(RHS, WideTy), - SCEV::FlagAnyWrap); + SCEV::FlagAnyWrap, 0u); if (ExtendAfterOp == OpAfterExtend) { BO->setHasNoUnsignedWrap(); SE->forgetValue(BO); @@ -572,7 +572,7 @@ const SCEV *ExtendAfterOp = SE->getSignExtendExpr(SE->getSCEV(BO), WideTy); const SCEV *OpAfterExtend = (SE->*GetExprForBO)( SE->getSignExtendExpr(LHS, WideTy), SE->getSignExtendExpr(RHS, WideTy), - SCEV::FlagAnyWrap); + SCEV::FlagAnyWrap, 0u); if (ExtendAfterOp == OpAfterExtend) { BO->setHasNoSignedWrap(); SE->forgetValue(BO); Index: test/Analysis/ScalarEvolution/limit-depth.ll =================================================================== --- test/Analysis/ScalarEvolution/limit-depth.ll +++ test/Analysis/ScalarEvolution/limit-depth.ll @@ -0,0 +1,44 @@ +; RUN: opt -scalar-evolution-max-arith-depth=0 -analyze -scalar-evolution < %s | FileCheck %s + +; Check that depth set to 0 prevents getAddExpr and getMulExpr from making +; transformations in SCEV. We expect the result to be very straightforward. + +define void @test_add(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) { +; CHECK-LABEL: @test_add +; CHECK: %s2 = add i32 %s1, %p3 +; CHECK-NEXT: --> (%a + %a + %b + %b + %c + %c + %d + %d + %e + %e + %f + %f) + %tmp0 = add i32 %a, %b + %tmp1 = add i32 %b, %c + %tmp2 = add i32 %c, %d + %tmp3 = add i32 %d, %e + %tmp4 = add i32 %e, %f + %tmp5 = add i32 %f, %a + + %p1 = add i32 %tmp0, %tmp3 + %p2 = add i32 %tmp1, %tmp4 + %p3 = add i32 %tmp2, %tmp5 + + %s1 = add i32 %p1, %p2 + %s2 = add i32 %s1, %p3 + ret void +} + +define void @test_mul(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) { +; CHECK-LABEL: @test_mul +; CHECK: %s2 = mul i32 %s1, %p3 +; CHECK-NEXT: --> (2 * 3 * 4 * 5 * 6 * 7 * %a * %b * %c * %d * %e * %f) + %tmp0 = mul i32 %a, 2 + %tmp1 = mul i32 %b, 3 + %tmp2 = mul i32 %c, 4 + %tmp3 = mul i32 %d, 5 + %tmp4 = mul i32 %e, 6 + %tmp5 = mul i32 %f, 7 + + %p1 = mul i32 %tmp0, %tmp3 + %p2 = mul i32 %tmp1, %tmp4 + %p3 = mul i32 %tmp2, %tmp5 + + %s1 = mul i32 %p1, %p2 + %s2 = mul i32 %s1, %p3 + ret void +}