Skip to content

Commit dc80366

Browse files
author
Max Kazantsev
committedJun 15, 2017
[ScalarEvolution] Apply Depth limit to getMulExpr
This is a fix for PR33292 that shows a case of extremely long compilation of a single .c file with clang, with most time spent within SCEV. We have a mechanism of limiting recursion depth for getAddExpr to avoid long analysis in SCEV. However, there are calls from getAddExpr to getMulExpr and back that do not propagate the info about depth. As result of this, a chain getAddExpr -> ... .> getAddExpr -> getMulExpr -> getAddExpr -> ... -> getAddExpr can be extremely long, with every segment of getAddExpr's being up to max depth long. This leads either to long compilation or crash by stack overflow. We face this situation while analyzing big SCEVs in the test of PR33292. This patch applies the same limit on max expression depth for getAddExpr and getMulExpr. Differential Revision: https://reviews.llvm.org/D33984 llvm-svn: 305463
1 parent 3adc408 commit dc80366

File tree

4 files changed

+148
-71
lines changed

4 files changed

+148
-71
lines changed
 

‎llvm/include/llvm/Analysis/ScalarEvolution.h

+21-11
Original file line numberDiff line numberDiff line change
@@ -1214,26 +1214,31 @@ class ScalarEvolution {
12141214
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
12151215
unsigned Depth = 0);
12161216
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
1217-
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1217+
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1218+
unsigned Depth = 0) {
12181219
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
1219-
return getAddExpr(Ops, Flags);
1220+
return getAddExpr(Ops, Flags, Depth);
12201221
}
12211222
const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
1222-
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1223+
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1224+
unsigned Depth = 0) {
12231225
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
1224-
return getAddExpr(Ops, Flags);
1226+
return getAddExpr(Ops, Flags, Depth);
12251227
}
12261228
const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1227-
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
1229+
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1230+
unsigned Depth = 0);
12281231
const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
1229-
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1232+
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1233+
unsigned Depth = 0) {
12301234
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
1231-
return getMulExpr(Ops, Flags);
1235+
return getMulExpr(Ops, Flags, Depth);
12321236
}
12331237
const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
1234-
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1238+
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1239+
unsigned Depth = 0) {
12351240
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
1236-
return getMulExpr(Ops, Flags);
1241+
return getMulExpr(Ops, Flags, Depth);
12371242
}
12381243
const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
12391244
const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
@@ -1287,7 +1292,8 @@ class ScalarEvolution {
12871292

12881293
/// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
12891294
const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
1290-
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
1295+
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1296+
unsigned Depth = 0);
12911297

12921298
/// Return a SCEV corresponding to a conversion of the input value to the
12931299
/// specified type. If the type must be extended, it is zero extended.
@@ -1693,10 +1699,14 @@ class ScalarEvolution {
16931699
bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
16941700
bool NoWrap);
16951701

1696-
/// Get add expr already created or create a new one
1702+
/// Get add expr already created or create a new one.
16971703
const SCEV *getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
16981704
SCEV::NoWrapFlags Flags);
16991705

1706+
/// Get mul expr already created or create a new one.
1707+
const SCEV *getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1708+
SCEV::NoWrapFlags Flags);
1709+
17001710
private:
17011711
FoldingSet<SCEV> UniqueSCEVs;
17021712
FoldingSet<SCEVPredicate> UniquePreds;

‎llvm/lib/Analysis/ScalarEvolution.cpp

+76-53
Original file line numberDiff line numberDiff line change
@@ -149,9 +149,9 @@ static cl::opt<unsigned> MaxValueCompareDepth(
149149
cl::init(2));
150150

151151
static cl::opt<unsigned>
152-
MaxAddExprDepth("scalar-evolution-max-addexpr-depth", cl::Hidden,
153-
cl::desc("Maximum depth of recursive AddExpr"),
154-
cl::init(32));
152+
MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
153+
cl::desc("Maximum depth of recursive arithmetics"),
154+
cl::init(32));
155155

156156
static cl::opt<unsigned> MaxConstantEvolvingDepth(
157157
"scalar-evolution-max-constant-evolving-depth", cl::Hidden,
@@ -2276,8 +2276,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
22762276
if (Ops.size() == 1) return Ops[0];
22772277
}
22782278

2279-
// Limit recursion calls depth
2280-
if (Depth > MaxAddExprDepth)
2279+
// Limit recursion calls depth.
2280+
if (Depth > MaxArithDepth)
22812281
return getOrCreateAddExpr(Ops, Flags);
22822282

22832283
// Okay, check to see if the same value occurs in the operand list more than
@@ -2293,7 +2293,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
22932293
++Count;
22942294
// Merge the values into a multiply.
22952295
const SCEV *Scale = getConstant(Ty, Count);
2296-
const SCEV *Mul = getMulExpr(Scale, Ops[i]);
2296+
const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
22972297
if (Ops.size() == Count)
22982298
return Mul;
22992299
Ops[i] = Mul;
@@ -2343,7 +2343,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
23432343
}
23442344
}
23452345
if (Ok)
2346-
LargeOps.push_back(getMulExpr(LargeMulOps));
2346+
LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
23472347
} else {
23482348
Ok = false;
23492349
break;
@@ -2417,7 +2417,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
24172417
if (MulOp.first != 0)
24182418
Ops.push_back(getMulExpr(
24192419
getConstant(MulOp.first),
2420-
getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)));
2420+
getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2421+
SCEV::FlagAnyWrap, Depth + 1));
24212422
if (Ops.empty())
24222423
return getZero(Ty);
24232424
if (Ops.size() == 1)
@@ -2445,11 +2446,12 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
24452446
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
24462447
Mul->op_begin()+MulOp);
24472448
MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2448-
InnerMul = getMulExpr(MulOps);
2449+
InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
24492450
}
24502451
SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
24512452
const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2452-
const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
2453+
const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2454+
SCEV::FlagAnyWrap, Depth + 1);
24532455
if (Ops.size() == 2) return OuterMul;
24542456
if (AddOp < Idx) {
24552457
Ops.erase(Ops.begin()+AddOp);
@@ -2478,19 +2480,20 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
24782480
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
24792481
Mul->op_begin()+MulOp);
24802482
MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2481-
InnerMul1 = getMulExpr(MulOps);
2483+
InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
24822484
}
24832485
const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
24842486
if (OtherMul->getNumOperands() != 2) {
24852487
SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
24862488
OtherMul->op_begin()+OMulOp);
24872489
MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2488-
InnerMul2 = getMulExpr(MulOps);
2490+
InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
24892491
}
24902492
SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
24912493
const SCEV *InnerMulSum =
24922494
getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2493-
const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
2495+
const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2496+
SCEV::FlagAnyWrap, Depth + 1);
24942497
if (Ops.size() == 2) return OuterMul;
24952498
Ops.erase(Ops.begin()+Idx);
24962499
Ops.erase(Ops.begin()+OtherMulIdx-1);
@@ -2621,6 +2624,27 @@ ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
26212624
return S;
26222625
}
26232626

2627+
const SCEV *
2628+
ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2629+
SCEV::NoWrapFlags Flags) {
2630+
FoldingSetNodeID ID;
2631+
ID.AddInteger(scMulExpr);
2632+
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2633+
ID.AddPointer(Ops[i]);
2634+
void *IP = nullptr;
2635+
SCEVMulExpr *S =
2636+
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2637+
if (!S) {
2638+
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2639+
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2640+
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2641+
O, Ops.size());
2642+
UniqueSCEVs.InsertNode(S, IP);
2643+
}
2644+
S->setNoWrapFlags(Flags);
2645+
return S;
2646+
}
2647+
26242648
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
26252649
uint64_t k = i*j;
26262650
if (j > 1 && k / j != i) Overflow = true;
@@ -2673,7 +2697,8 @@ static bool containsConstantSomewhere(const SCEV *StartExpr) {
26732697

26742698
/// Get a canonical multiply expression, or something simpler if possible.
26752699
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2676-
SCEV::NoWrapFlags Flags) {
2700+
SCEV::NoWrapFlags Flags,
2701+
unsigned Depth) {
26772702
assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
26782703
"only nuw or nsw allowed");
26792704
assert(!Ops.empty() && "Cannot get empty mul!");
@@ -2690,6 +2715,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
26902715

26912716
Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
26922717

2718+
// Limit recursion calls depth.
2719+
if (Depth > MaxArithDepth)
2720+
return getOrCreateMulExpr(Ops, Flags);
2721+
26932722
// If there are any constants, fold them together.
26942723
unsigned Idx = 0;
26952724
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
@@ -2701,8 +2730,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
27012730
// apply this transformation as well.
27022731
if (Add->getNumOperands() == 2)
27032732
if (containsConstantSomewhere(Add))
2704-
return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
2705-
getMulExpr(LHSC, Add->getOperand(1)));
2733+
return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
2734+
SCEV::FlagAnyWrap, Depth + 1),
2735+
getMulExpr(LHSC, Add->getOperand(1),
2736+
SCEV::FlagAnyWrap, Depth + 1),
2737+
SCEV::FlagAnyWrap, Depth + 1);
27062738

27072739
++Idx;
27082740
while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
@@ -2730,17 +2762,19 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
27302762
SmallVector<const SCEV *, 4> NewOps;
27312763
bool AnyFolded = false;
27322764
for (const SCEV *AddOp : Add->operands()) {
2733-
const SCEV *Mul = getMulExpr(Ops[0], AddOp);
2765+
const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
2766+
Depth + 1);
27342767
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
27352768
NewOps.push_back(Mul);
27362769
}
27372770
if (AnyFolded)
2738-
return getAddExpr(NewOps);
2771+
return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
27392772
} else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
27402773
// Negation preserves a recurrence's no self-wrap property.
27412774
SmallVector<const SCEV *, 4> Operands;
27422775
for (const SCEV *AddRecOp : AddRec->operands())
2743-
Operands.push_back(getMulExpr(Ops[0], AddRecOp));
2776+
Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
2777+
Depth + 1));
27442778

27452779
return getAddRecExpr(Operands, AddRec->getLoop(),
27462780
AddRec->getNoWrapFlags(SCEV::FlagNW));
@@ -2762,18 +2796,18 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
27622796
while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
27632797
if (Ops.size() > MulOpsInlineThreshold)
27642798
break;
2765-
// If we have an mul, expand the mul operands onto the end of the operands
2766-
// list.
2799+
// If we have an mul, expand the mul operands onto the end of the
2800+
// operands list.
27672801
Ops.erase(Ops.begin()+Idx);
27682802
Ops.append(Mul->op_begin(), Mul->op_end());
27692803
DeletedMul = true;
27702804
}
27712805

2772-
// If we deleted at least one mul, we added operands to the end of the list,
2773-
// and they are not necessarily sorted. Recurse to resort and resimplify
2774-
// any operands we just acquired.
2806+
// If we deleted at least one mul, we added operands to the end of the
2807+
// list, and they are not necessarily sorted. Recurse to resort and
2808+
// resimplify any operands we just acquired.
27752809
if (DeletedMul)
2776-
return getMulExpr(Ops);
2810+
return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
27772811
}
27782812

27792813
// If there are any add recurrences in the operands list, see if any other
@@ -2784,8 +2818,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
27842818

27852819
// Scan over all recurrences, trying to fold loop invariants into them.
27862820
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2787-
// Scan all of the other operands to this mul and add them to the vector if
2788-
// they are loop invariant w.r.t. the recurrence.
2821+
// Scan all of the other operands to this mul and add them to the vector
2822+
// if they are loop invariant w.r.t. the recurrence.
27892823
SmallVector<const SCEV *, 8> LIOps;
27902824
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
27912825
const Loop *AddRecLoop = AddRec->getLoop();
@@ -2801,9 +2835,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
28012835
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
28022836
SmallVector<const SCEV *, 4> NewOps;
28032837
NewOps.reserve(AddRec->getNumOperands());
2804-
const SCEV *Scale = getMulExpr(LIOps);
2838+
const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
28052839
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2806-
NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
2840+
NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
2841+
SCEV::FlagAnyWrap, Depth + 1));
28072842

28082843
// Build the new addrec. Propagate the NUW and NSW flags if both the
28092844
// outer mul and the inner addrec are guaranteed to have no overflow.
@@ -2822,12 +2857,12 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
28222857
Ops[i] = NewRec;
28232858
break;
28242859
}
2825-
return getMulExpr(Ops);
2860+
return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
28262861
}
28272862

2828-
// Okay, if there weren't any loop invariants to be folded, check to see if
2829-
// there are multiple AddRec's with the same loop induction variable being
2830-
// multiplied together. If so, we can fold them.
2863+
// Okay, if there weren't any loop invariants to be folded, check to see
2864+
// if there are multiple AddRec's with the same loop induction variable
2865+
// being multiplied together. If so, we can fold them.
28312866

28322867
// {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
28332868
// = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
@@ -2869,7 +2904,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
28692904
const SCEV *CoeffTerm = getConstant(Ty, Coeff);
28702905
const SCEV *Term1 = AddRec->getOperand(y-z);
28712906
const SCEV *Term2 = OtherAddRec->getOperand(z);
2872-
Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2907+
Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2,
2908+
SCEV::FlagAnyWrap, Depth + 1),
2909+
SCEV::FlagAnyWrap, Depth + 1);
28732910
}
28742911
}
28752912
AddRecOps.push_back(Term);
@@ -2887,30 +2924,15 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
28872924
}
28882925
}
28892926
if (OpsModified)
2890-
return getMulExpr(Ops);
2927+
return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
28912928

28922929
// Otherwise couldn't fold anything into this recurrence. Move onto the
28932930
// next one.
28942931
}
28952932

28962933
// Okay, it looks like we really DO need an mul expr. Check to see if we
28972934
// already have one, otherwise create a new one.
2898-
FoldingSetNodeID ID;
2899-
ID.AddInteger(scMulExpr);
2900-
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2901-
ID.AddPointer(Ops[i]);
2902-
void *IP = nullptr;
2903-
SCEVMulExpr *S =
2904-
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2905-
if (!S) {
2906-
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2907-
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2908-
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2909-
O, Ops.size());
2910-
UniqueSCEVs.InsertNode(S, IP);
2911-
}
2912-
S->setNoWrapFlags(Flags);
2913-
return S;
2935+
return getOrCreateMulExpr(Ops, Flags);
29142936
}
29152937

29162938
/// Get a canonical unsigned division expression, or something simpler if
@@ -3713,7 +3735,8 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
37133735
}
37143736

37153737
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
3716-
SCEV::NoWrapFlags Flags) {
3738+
SCEV::NoWrapFlags Flags,
3739+
unsigned Depth) {
37173740
// Fast path: X - X --> 0.
37183741
if (LHS == RHS)
37193742
return getZero(LHS->getType());
@@ -3747,7 +3770,7 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
37473770
// larger scope than intended.
37483771
auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
37493772

3750-
return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags);
3773+
return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
37513774
}
37523775

37533776
const SCEV *

‎llvm/lib/Transforms/Utils/SimplifyIndVar.cpp

+7-7
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,7 @@ bool SimplifyIndvar::eliminateOverflowIntrinsic(CallInst *CI) {
352352
return false;
353353

354354
typedef const SCEV *(ScalarEvolution::*OperationFunctionTy)(
355-
const SCEV *, const SCEV *, SCEV::NoWrapFlags);
355+
const SCEV *, const SCEV *, SCEV::NoWrapFlags, unsigned);
356356
typedef const SCEV *(ScalarEvolution::*ExtensionFunctionTy)(
357357
const SCEV *, Type *);
358358

@@ -406,10 +406,11 @@ bool SimplifyIndvar::eliminateOverflowIntrinsic(CallInst *CI) {
406406
IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
407407

408408
const SCEV *A =
409-
(SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap), WideTy);
409+
(SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0u),
410+
WideTy);
410411
const SCEV *B =
411412
(SE->*Operation)((SE->*Extension)(LHS, WideTy),
412-
(SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap);
413+
(SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap, 0u);
413414

414415
if (A != B)
415416
return false;
@@ -530,8 +531,7 @@ bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
530531
return false;
531532

532533
const SCEV *(ScalarEvolution::*GetExprForBO)(const SCEV *, const SCEV *,
533-
SCEV::NoWrapFlags);
534-
534+
SCEV::NoWrapFlags, unsigned);
535535
switch (BO->getOpcode()) {
536536
default:
537537
return false;
@@ -560,7 +560,7 @@ bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
560560
const SCEV *ExtendAfterOp = SE->getZeroExtendExpr(SE->getSCEV(BO), WideTy);
561561
const SCEV *OpAfterExtend = (SE->*GetExprForBO)(
562562
SE->getZeroExtendExpr(LHS, WideTy), SE->getZeroExtendExpr(RHS, WideTy),
563-
SCEV::FlagAnyWrap);
563+
SCEV::FlagAnyWrap, 0u);
564564
if (ExtendAfterOp == OpAfterExtend) {
565565
BO->setHasNoUnsignedWrap();
566566
SE->forgetValue(BO);
@@ -572,7 +572,7 @@ bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
572572
const SCEV *ExtendAfterOp = SE->getSignExtendExpr(SE->getSCEV(BO), WideTy);
573573
const SCEV *OpAfterExtend = (SE->*GetExprForBO)(
574574
SE->getSignExtendExpr(LHS, WideTy), SE->getSignExtendExpr(RHS, WideTy),
575-
SCEV::FlagAnyWrap);
575+
SCEV::FlagAnyWrap, 0u);
576576
if (ExtendAfterOp == OpAfterExtend) {
577577
BO->setHasNoSignedWrap();
578578
SE->forgetValue(BO);
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
; RUN: opt -scalar-evolution-max-arith-depth=0 -analyze -scalar-evolution < %s | FileCheck %s
2+
3+
; Check that depth set to 0 prevents getAddExpr and getMulExpr from making
4+
; transformations in SCEV. We expect the result to be very straightforward.
5+
6+
define void @test_add(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
7+
; CHECK-LABEL: @test_add
8+
; CHECK: %s2 = add i32 %s1, %p3
9+
; CHECK-NEXT: --> (%a + %a + %b + %b + %c + %c + %d + %d + %e + %e + %f + %f)
10+
%tmp0 = add i32 %a, %b
11+
%tmp1 = add i32 %b, %c
12+
%tmp2 = add i32 %c, %d
13+
%tmp3 = add i32 %d, %e
14+
%tmp4 = add i32 %e, %f
15+
%tmp5 = add i32 %f, %a
16+
17+
%p1 = add i32 %tmp0, %tmp3
18+
%p2 = add i32 %tmp1, %tmp4
19+
%p3 = add i32 %tmp2, %tmp5
20+
21+
%s1 = add i32 %p1, %p2
22+
%s2 = add i32 %s1, %p3
23+
ret void
24+
}
25+
26+
define void @test_mul(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
27+
; CHECK-LABEL: @test_mul
28+
; CHECK: %s2 = mul i32 %s1, %p3
29+
; CHECK-NEXT: --> (2 * 3 * 4 * 5 * 6 * 7 * %a * %b * %c * %d * %e * %f)
30+
%tmp0 = mul i32 %a, 2
31+
%tmp1 = mul i32 %b, 3
32+
%tmp2 = mul i32 %c, 4
33+
%tmp3 = mul i32 %d, 5
34+
%tmp4 = mul i32 %e, 6
35+
%tmp5 = mul i32 %f, 7
36+
37+
%p1 = mul i32 %tmp0, %tmp3
38+
%p2 = mul i32 %tmp1, %tmp4
39+
%p3 = mul i32 %tmp2, %tmp5
40+
41+
%s1 = mul i32 %p1, %p2
42+
%s2 = mul i32 %s1, %p3
43+
ret void
44+
}

0 commit comments

Comments
 (0)
Please sign in to comment.