diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3192,9 +3192,20 @@ for (const SCEV *AddRecOp : AddRec->operands()) Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, Depth + 1)); - + // Let M be the minimum representable signed value. AddRec with nsw + // multiplied by -1 can have signed overflow if and only if it takes a + // value of M: M * (-1) would stay M and (M + 1) * (-1) would be the + // maximum signed value. In all other cases signed overflow is + // impossible. + auto FlagsMask = SCEV::FlagNW; + if (hasFlags(AddRec->getNoWrapFlags(), SCEV::FlagNSW)) { + auto MinInt = + APInt::getSignedMinValue(getTypeSizeInBits(AddRec->getType())); + if (getSignedRangeMin(AddRec) != MinInt) + FlagsMask = setFlags(FlagsMask, SCEV::FlagNSW); + } return getAddRecExpr(Operands, AddRec->getLoop(), - AddRec->getNoWrapFlags(SCEV::FlagNW)); + AddRec->getNoWrapFlags(FlagsMask)); } } } diff --git a/llvm/test/Analysis/ScalarEvolution/addrec-sub-nsw.ll b/llvm/test/Analysis/ScalarEvolution/addrec-sub-nsw.ll --- a/llvm/test/Analysis/ScalarEvolution/addrec-sub-nsw.ll +++ b/llvm/test/Analysis/ScalarEvolution/addrec-sub-nsw.ll @@ -44,7 +44,7 @@ ; CHECK-NEXT: %i = phi i32 [ 0, %entry ], [ %i.next, %loop ] ; CHECK-NEXT: --> {0,+,1}<%loop> U: [0,2147483647) S: [0,2147483647) Exits: (-1 + (1 smax %n)) LoopDispositions: { %loop: Computable } ; CHECK-NEXT: %minus.i = mul i32 %i, -1 -; CHECK-NEXT: --> {0,+,-1}<%loop> U: [-2147483646,1) S: [-2147483646,1) Exits: (1 + (-1 * (1 smax %n))) LoopDispositions: { %loop: Computable } +; CHECK-NEXT: --> {0,+,-1}<%loop> U: [-2147483646,1) S: [-2147483646,1) Exits: (1 + (-1 * (1 smax %n))) LoopDispositions: { %loop: Computable } ; CHECK-NEXT: %a = sub i32 %minus.n, %minus.i ; CHECK-NEXT: --> {(-1 * %n),+,1}<%loop> U: full-set S: full-set Exits: (-1 + (-1 * %n) + (1 smax %n)) LoopDispositions: { %loop: Computable } ; CHECK-NEXT: %i.next = add nuw nsw i32 %i, 1 diff --git a/llvm/test/Analysis/ScalarEvolution/decrementing_addrecs.ll b/llvm/test/Analysis/ScalarEvolution/decrementing_addrecs.ll --- a/llvm/test/Analysis/ScalarEvolution/decrementing_addrecs.ll +++ b/llvm/test/Analysis/ScalarEvolution/decrementing_addrecs.ll @@ -40,7 +40,7 @@ ; DEFAULT-NEXT: %b = sub i32 %n.minus.1, %i ; DEFAULT-NEXT: --> {(-1 + %n),+,-1}<%loop> U: full-set S: full-set Exits: 0 LoopDispositions: { %loop: Computable } ; DEFAULT-NEXT: %c = sub i32 2147483647, %i -; DEFAULT-NEXT: --> {2147483647,+,-1}<%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: (-2147483648 + (-1 * %n)) LoopDispositions: { %loop: Computable } +; DEFAULT-NEXT: --> {2147483647,+,-1}<%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: (-2147483648 + (-1 * %n)) LoopDispositions: { %loop: Computable } ; DEFAULT-NEXT: %i.next = add nuw nsw i32 %i, 1 ; DEFAULT-NEXT: --> {1,+,1}<%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: %n LoopDispositions: { %loop: Computable } ; DEFAULT-NEXT: %j.next = add nsw i32 %j, -1 @@ -66,7 +66,7 @@ ; EXPENSIVE_SHARPENING-NEXT: %b = sub i32 %n.minus.1, %i ; EXPENSIVE_SHARPENING-NEXT: --> {(-1 + %n),+,-1}<%loop> U: [0,2147483647) S: [0,2147483647) Exits: 0 LoopDispositions: { %loop: Computable } ; EXPENSIVE_SHARPENING-NEXT: %c = sub i32 2147483647, %i -; EXPENSIVE_SHARPENING-NEXT: --> {2147483647,+,-1}<%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: (-2147483648 + (-1 * %n)) LoopDispositions: { %loop: Computable } +; EXPENSIVE_SHARPENING-NEXT: --> {2147483647,+,-1}<%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: (-2147483648 + (-1 * %n)) LoopDispositions: { %loop: Computable } ; EXPENSIVE_SHARPENING-NEXT: %i.next = add nuw nsw i32 %i, 1 ; EXPENSIVE_SHARPENING-NEXT: --> {1,+,1}<%loop> U: [1,-2147483648) S: [1,-2147483648) Exits: %n LoopDispositions: { %loop: Computable } ; EXPENSIVE_SHARPENING-NEXT: %j.next = add nsw i32 %j, -1 diff --git a/llvm/test/Analysis/ScalarEvolution/different-loops-recs.ll b/llvm/test/Analysis/ScalarEvolution/different-loops-recs.ll --- a/llvm/test/Analysis/ScalarEvolution/different-loops-recs.ll +++ b/llvm/test/Analysis/ScalarEvolution/different-loops-recs.ll @@ -275,7 +275,7 @@ ; CHECK: %tmp9 = sext i8 %tmp8 to i64 ; CHECK-NEXT: --> (sext i8 %tmp8 to i64) U: [-128,128) S: [-128,128) ; CHECK: %tmp10 = sub i64 %tmp9, %tmp7 -; CHECK-NEXT: --> ((sext i8 %tmp8 to i64) + {-2,+,-1}<%loop2>) U: [9223372036854775682,126) S: [9223372036854775682,126) +; CHECK-NEXT: --> ((sext i8 %tmp8 to i64) + {-2,+,-1}<%loop2>) U: [9223372036854775682,126) S: [9223372036854775682,126) ; CHECK: %tmp11 = add i64 %tmp10, undef ; CHECK-NEXT: --> ((sext i8 %tmp8 to i64) + {(-2 + undef),+,-1}<%loop2>) ; CHECK: %tmp13 = trunc i64 %tmp11 to i32 diff --git a/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll b/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll --- a/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll +++ b/llvm/test/Analysis/ScalarEvolution/sext-iv-2.ll @@ -3,7 +3,7 @@ ; CHECK: %tmp3 = sext i8 %tmp2 to i32 ; CHECK: --> (sext i8 {0,+,1}<%bb1> to i32) U: [-128,128) S: [-128,128) Exits: -1 ; CHECK: %tmp4 = mul i32 %tmp3, %i.02 -; CHECK: --> ((sext i8 {0,+,1}<%bb1> to i32) * {0,+,1}<%bb>) U: [-3968,3938) S: [-3968,3938) Exits: {0,+,-1}<%bb> +; CHECK: --> ((sext i8 {0,+,1}<%bb1> to i32) * {0,+,1}<%bb>) U: [-3968,3938) S: [-3968,3938) Exits: {0,+,-1}<%bb> ; These sexts are not foldable.