Index: llvm/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/lib/Analysis/ValueTracking.cpp +++ llvm/lib/Analysis/ValueTracking.cpp @@ -4084,8 +4084,11 @@ OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) { KnownBits Known = computeKnownBits( V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo); - ConstantRange CR = computeConstantRange(V, UseInstrInfo); - return ConstantRange::fromKnownBits(Known, ForSigned).intersectWith(CR); + ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned); + ConstantRange CR2 = computeConstantRange(V, UseInstrInfo); + ConstantRange::PreferredRangeType RangeType = + ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned; + return CR1.intersectWith(CR2, RangeType); } OverflowResult llvm::computeOverflowForUnsignedAdd( @@ -4130,12 +4133,10 @@ ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) return OverflowResult::NeverOverflows; - KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); - KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); - ConstantRange LHSRange = - ConstantRange::fromKnownBits(LHSKnown, /*signed*/ true); - ConstantRange RHSRange = - ConstantRange::fromKnownBits(RHSKnown, /*signed*/ true); + ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( + LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); + ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( + RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); OverflowResult OR = mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); if (OR != OverflowResult::MayOverflow) @@ -4151,11 +4152,11 @@ // The only other way to improve on the known bits is from an assumption, so // call computeKnownBitsFromAssume() directly. bool LHSOrRHSKnownNonNegative = - (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); + (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative()); bool LHSOrRHSKnownNegative = - (LHSKnown.isNegative() || RHSKnown.isNegative()); + (LHSRange.isAllNegative() || RHSRange.isAllNegative()); if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { - KnownBits AddKnown(LHSKnown.getBitWidth()); + KnownBits AddKnown(LHSRange.getBitWidth()); computeKnownBitsFromAssume( Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || @@ -4191,12 +4192,10 @@ ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) return OverflowResult::NeverOverflows; - KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); - KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); - ConstantRange LHSRange = - ConstantRange::fromKnownBits(LHSKnown, /*signed*/ true); - ConstantRange RHSRange = - ConstantRange::fromKnownBits(RHSKnown, /*signed*/ true); + ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( + LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); + ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( + RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); } Index: llvm/test/Transforms/InstCombine/and2.ll =================================================================== --- llvm/test/Transforms/InstCombine/and2.ll +++ llvm/test/Transforms/InstCombine/and2.ll @@ -154,7 +154,7 @@ ; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_multiuse( ; CHECK-NEXT: [[SH:%.*]] = lshr i8 1, %x ; CHECK-NEXT: [[AND:%.*]] = and i8 [[SH]], 1 -; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[SH]], [[AND]] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i8 [[SH]], [[AND]] ; CHECK-NEXT: ret i8 [[ADD]] ; %sh = lshr i8 1, %x Index: llvm/test/Transforms/InstCombine/icmp-add.ll =================================================================== --- llvm/test/Transforms/InstCombine/icmp-add.ll +++ llvm/test/Transforms/InstCombine/icmp-add.ll @@ -407,7 +407,7 @@ ; CHECK-LABEL: @sum_ugt_op_uses( ; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[P1:%.*]] ; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[P2:%.*]] -; CHECK-NEXT: [[A:%.*]] = add i8 [[X]], [[Y]] +; CHECK-NEXT: [[A:%.*]] = add nsw i8 [[X]], [[Y]] ; CHECK-NEXT: store i8 [[A]], i8* [[P3:%.*]], align 1 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[X]], [[A]] ; CHECK-NEXT: ret i1 [[C]] Index: llvm/test/Transforms/InstCombine/sadd-with-overflow.ll =================================================================== --- llvm/test/Transforms/InstCombine/sadd-with-overflow.ll +++ llvm/test/Transforms/InstCombine/sadd-with-overflow.ll @@ -19,7 +19,8 @@ define { i32, i1 } @fold_mixed_signs(i32 %x) { ; CHECK-LABEL: @fold_mixed_signs( -; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 6) +; CHECK-NEXT: [[B:%.*]] = add nsw i32 [[X:%.*]], 6 +; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[B]], 0 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]] ; %a = add nsw i32 %x, 13 Index: llvm/test/Transforms/InstCombine/saturating-add-sub.ll =================================================================== --- llvm/test/Transforms/InstCombine/saturating-add-sub.ll +++ llvm/test/Transforms/InstCombine/saturating-add-sub.ll @@ -391,7 +391,7 @@ define i8 @test_scalar_sadd_srem_no_ov(i8 %a) { ; CHECK-LABEL: @test_scalar_sadd_srem_no_ov( ; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100 -; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[B]], i8 28) +; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[B]], 28 ; CHECK-NEXT: ret i8 [[R]] ; %b = srem i8 %a, 100 @@ -414,7 +414,7 @@ ; CHECK-LABEL: @test_scalar_sadd_srem_and_no_ov( ; CHECK-NEXT: [[AA:%.*]] = srem i8 [[A:%.*]], 100 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 15 -; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[AA]], i8 [[BB]]) +; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[AA]], [[BB]] ; CHECK-NEXT: ret i8 [[R]] ; %aa = srem i8 %a, 100 @@ -928,7 +928,7 @@ ; CHECK-LABEL: @test_scalar_ssub_add_nsw_no_ov( ; CHECK-NEXT: [[AA:%.*]] = add nsw i8 [[A:%.*]], 7 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7 -; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[AA]], i8 [[BB]]) +; CHECK-NEXT: [[R:%.*]] = sub nsw i8 [[AA]], [[BB]] ; CHECK-NEXT: ret i8 [[R]] ; %aa = add nsw i8 %a, 7 @@ -954,7 +954,7 @@ ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_splat( ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], -; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]]) +; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> [[AA]], [[BB]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %aa = add nsw <2 x i8> %a, @@ -967,7 +967,7 @@ ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat1( ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], -; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]]) +; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> [[AA]], [[BB]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %aa = add nsw <2 x i8> %a, Index: llvm/test/Transforms/InstCombine/ssub-with-overflow.ll =================================================================== --- llvm/test/Transforms/InstCombine/ssub-with-overflow.ll +++ llvm/test/Transforms/InstCombine/ssub-with-overflow.ll @@ -22,9 +22,9 @@ define { i32, i1 } @fold_mixed_signs(i32 %x) { ; CHECK-LABEL: @fold_mixed_signs( -; CHECK-NEXT: [[A:%.*]] = add nsw i32 [[X:%.*]], -13 -; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A]], i32 -7) -; CHECK-NEXT: ret { i32, i1 } [[B]] +; CHECK-NEXT: [[B:%.*]] = add nsw i32 [[X:%.*]], -6 +; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[B]], 0 +; CHECK-NEXT: ret { i32, i1 } [[TMP1]] ; %a = sub nsw i32 %x, 13 %b = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 -7)