Index: llvm/include/llvm/Analysis/ScalarEvolution.h =================================================================== --- llvm/include/llvm/Analysis/ScalarEvolution.h +++ llvm/include/llvm/Analysis/ScalarEvolution.h @@ -1487,6 +1487,9 @@ return Pair.first->second; } + /// Update no-wrap flags of an AddRec. + void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags); + /// Determine the range for a particular SCEV. /// NOTE: This returns a reference to an entry in a cache. It must be /// copied if its needed for longer. Index: llvm/lib/Analysis/ScalarEvolution.cpp =================================================================== --- llvm/lib/Analysis/ScalarEvolution.cpp +++ llvm/lib/Analysis/ScalarEvolution.cpp @@ -1451,7 +1451,7 @@ if (!AR->hasNoUnsignedWrap()) { auto NewFlags = proveNoWrapViaConstantRanges(AR); - const_cast(AR)->setNoWrapFlags(NewFlags); + setNoWrapFlags(const_cast(AR), NewFlags); } // If we have special knowledge that this addrec won't overflow, @@ -1500,7 +1500,7 @@ SCEV::FlagAnyWrap, Depth + 1); if (ZAdd == OperandExtendedAdd) { // Cache knowledge of AR NUW, which is propagated to this AddRec. - const_cast(AR)->setNoWrapFlags(SCEV::FlagNUW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNUW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, @@ -1519,7 +1519,7 @@ if (ZAdd == OperandExtendedAdd) { // Cache knowledge of AR NW, which is propagated to this AddRec. // Negative step causes unsigned wrap, but it still can't self-wrap. - const_cast(AR)->setNoWrapFlags(SCEV::FlagNW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, @@ -1551,7 +1551,7 @@ isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { // Cache knowledge of AR NUW, which is propagated to this // AddRec. - const_cast(AR)->setNoWrapFlags(SCEV::FlagNUW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNUW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, @@ -1567,7 +1567,7 @@ // Cache knowledge of AR NW, which is propagated to this // AddRec. Negative step causes unsigned wrap, but it // still can't self-wrap. - const_cast(AR)->setNoWrapFlags(SCEV::FlagNW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, @@ -1596,7 +1596,7 @@ } if (proveNoWrapByVaryingStart(Start, Step, L)) { - const_cast(AR)->setNoWrapFlags(SCEV::FlagNUW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNUW); return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, Depth + 1), getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); @@ -1795,7 +1795,7 @@ if (!AR->hasNoSignedWrap()) { auto NewFlags = proveNoWrapViaConstantRanges(AR); - const_cast(AR)->setNoWrapFlags(NewFlags); + setNoWrapFlags(const_cast(AR), NewFlags); } // If we have special knowledge that this addrec won't overflow, @@ -1844,7 +1844,7 @@ SCEV::FlagAnyWrap, Depth + 1); if (SAdd == OperandExtendedAdd) { // Cache knowledge of AR NSW, which is propagated to this AddRec. - const_cast(AR)->setNoWrapFlags(SCEV::FlagNSW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNSW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, @@ -1869,7 +1869,7 @@ // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> // (SAdd == OperandExtendedAdd => AR is NW) - const_cast(AR)->setNoWrapFlags(SCEV::FlagNW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( @@ -1903,7 +1903,7 @@ (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. - const_cast(AR)->setNoWrapFlags(SCEV::FlagNSW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNSW); return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, Depth + 1), getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); @@ -1928,7 +1928,7 @@ } if (proveNoWrapByVaryingStart(Start, Step, L)) { - const_cast(AR)->setNoWrapFlags(SCEV::FlagNSW); + setNoWrapFlags(const_cast(AR), SCEV::FlagNSW); return getAddRecExpr( getExtendAddRecStart(AR, Ty, this, Depth + 1), getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); @@ -2586,7 +2586,7 @@ UniqueSCEVs.InsertNode(S, IP); addToLoopUseLists(S); } - S->setNoWrapFlags(Flags); + setNoWrapFlags(S, Flags); return S; } @@ -5352,6 +5352,15 @@ return None; } +void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, + SCEV::NoWrapFlags Flags) { + if (AddRec->getNoWrapFlags(Flags) != Flags) { + AddRec->setNoWrapFlags(Flags); + UnsignedRanges.erase(AddRec); + SignedRanges.erase(AddRec); + } +} + /// Determine the range for a particular SCEV. If SignHint is /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges /// with a "cleaner" unsigned (resp. signed) representation. Index: llvm/test/Analysis/ScalarEvolution/srem.ll =================================================================== --- llvm/test/Analysis/ScalarEvolution/srem.ll +++ llvm/test/Analysis/ScalarEvolution/srem.ll @@ -29,7 +29,7 @@ ; CHECK-NEXT: %add = add nsw i32 %2, %call ; CHECK-NEXT: --> (%2 + %call) U: full-set S: full-set Exits: <> LoopDispositions: { %for.cond: Variant } ; CHECK-NEXT: %inc = add nsw i32 %i.0, 1 -; CHECK-NEXT: --> {1,+,1}<%for.cond> U: full-set S: full-set Exits: (1 + %width) LoopDispositions: { %for.cond: Computable } +; CHECK-NEXT: --> {1,+,1}<%for.cond> U: [1,0) S: [1,0) Exits: (1 + %width) LoopDispositions: { %for.cond: Computable } ; CHECK-NEXT: Determining loop execution counts for: @_Z4loopi ; CHECK-NEXT: Loop %for.cond: backedge-taken count is %width ; CHECK-NEXT: Loop %for.cond: max backedge-taken count is -1 Index: llvm/test/Transforms/IndVarSimplify/floating-point-iv.ll =================================================================== --- llvm/test/Transforms/IndVarSimplify/floating-point-iv.ll +++ llvm/test/Transforms/IndVarSimplify/floating-point-iv.ll @@ -150,7 +150,7 @@ ; CHECK-NEXT: [[INDVAR_CONV:%.*]] = sitofp i32 [[TMP11_INT]] to double ; CHECK-NEXT: [[TMP12]] = fadd double [[TMP10]], [[INDVAR_CONV]] ; CHECK-NEXT: [[TMP13_INT]] = add nuw nsw i32 [[TMP11_INT]], 1 -; CHECK-NEXT: [[TMP14:%.*]] = icmp slt i32 [[TMP13_INT]], 99999 +; CHECK-NEXT: [[TMP14:%.*]] = icmp ult i32 [[TMP13_INT]], 99999 ; CHECK-NEXT: br i1 [[TMP14]], label [[BB22]], label [[BB6:%.*]] ; CHECK: bb22: ; CHECK-NEXT: br i1 true, label [[BB8]], label [[BB6]] @@ -189,7 +189,7 @@ ; CHECK-NEXT: [[INDVAR_CONV:%.*]] = sitofp i32 [[TMP11_INT]] to float ; CHECK-NEXT: [[TMP12]] = fadd float [[TMP10]], [[INDVAR_CONV]] ; CHECK-NEXT: [[TMP13_INT]] = add nuw nsw i32 [[TMP11_INT]], 1 -; CHECK-NEXT: [[TMP14:%.*]] = icmp slt i32 [[TMP13_INT]], 99999 +; CHECK-NEXT: [[TMP14:%.*]] = icmp ult i32 [[TMP13_INT]], 99999 ; CHECK-NEXT: br i1 [[TMP14]], label [[BB22]], label [[BB6:%.*]] ; CHECK: bb22: ; CHECK-NEXT: br i1 true, label [[BB8]], label [[BB6]] @@ -229,7 +229,7 @@ ; CHECK-NEXT: [[INDVAR_CONV:%.*]] = sitofp i32 [[TMP11_INT]] to float ; CHECK-NEXT: [[TMP12]] = fadd float [[TMP10]], [[INDVAR_CONV]] ; CHECK-NEXT: [[TMP13_INT]] = add nuw nsw i32 [[TMP11_INT]], 1 -; CHECK-NEXT: [[TMP14:%.*]] = icmp slt i32 [[TMP13_INT]], 99999 +; CHECK-NEXT: [[TMP14:%.*]] = icmp ult i32 [[TMP13_INT]], 99999 ; CHECK-NEXT: br i1 [[TMP14]], label [[BB22]], label [[BB6:%.*]] ; CHECK: bb22: ; CHECK-NEXT: br i1 true, label [[BB8]], label [[BB6]]