diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -1684,11 +1684,6 @@ unsigned BitWidth = getTypeSizeInBits(AR->getType()); const Loop *L = AR->getLoop(); - if (!AR->hasNoUnsignedWrap()) { - auto NewFlags = proveNoWrapViaConstantRanges(AR); - setNoWrapFlags(const_cast(AR), NewFlags); - } - // If we have special knowledge that this addrec won't overflow, // we don't need to do any further analysis. if (AR->hasNoUnsignedWrap()) { @@ -2045,11 +2040,6 @@ unsigned BitWidth = getTypeSizeInBits(AR->getType()); const Loop *L = AR->getLoop(); - if (!AR->hasNoSignedWrap()) { - auto NewFlags = proveNoWrapViaConstantRanges(AR); - setNoWrapFlags(const_cast(AR), NewFlags); - } - // If we have special knowledge that this addrec won't overflow, // we don't need to do any further analysis. if (AR->hasNoSignedWrap()) { diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll b/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll --- a/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll +++ b/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll @@ -146,7 +146,7 @@ ; CHECK-NEXT: [[VECTORP:%.*]] = getelementptr inbounds [0 x double], ptr [[VECTOR:%.*]], i32 0, i64 [[INDVARS_IV2]] ; CHECK-NEXT: [[V2:%.*]] = load double, ptr [[VECTORP]], align 8 ; CHECK-NEXT: call void @use(double [[V2]]) -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], [[TMP0]] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], [[TMP0]] ; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV2]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT3]], [[WIDE_TRIP_COUNT]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[RETURN_LOOPEXIT:%.*]]