Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -4044,21 +4044,17 @@ bool UseInstrInfo) { KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, UseInstrInfo); - if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { - KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, - nullptr, UseInstrInfo); - - if (LHSKnown.isNegative() && RHSKnown.isNegative()) { - // The sign bit is set in both cases: this MUST overflow. - return OverflowResult::AlwaysOverflows; - } - - if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { - // The sign bit is clear in both cases: this CANNOT overflow. - return OverflowResult::NeverOverflows; - } - } + KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, + nullptr, UseInstrInfo); + // a + b overflows iff a > ~b. Determine whether this is never/always true + // based on the min/max values achievable under the known bits constraint. + APInt MinLHS = LHSKnown.One, MaxLHS = ~LHSKnown.Zero; + APInt MinInvRHS = RHSKnown.Zero, MaxInvRHS = ~RHSKnown.One; + if (MaxLHS.ule(MinInvRHS)) + return OverflowResult::NeverOverflows; + if (MinLHS.ugt(MaxInvRHS)) + return OverflowResult::AlwaysOverflows; return OverflowResult::MayOverflow; } @@ -4172,18 +4168,16 @@ const Instruction *CxtI, const DominatorTree *DT) { KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); - if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { - KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); - - // If the LHS is negative and the RHS is non-negative, no unsigned wrap. - if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) - return OverflowResult::NeverOverflows; - - // If the LHS is non-negative and the RHS negative, we always wrap. - if (LHSKnown.isNonNegative() && RHSKnown.isNegative()) - return OverflowResult::AlwaysOverflows; - } + KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); + // a - b overflows iff a < b. Determine whether this is never/always true + // based on the min/max values achievable under the known bits constraint. + APInt MinLHS = LHSKnown.One, MaxLHS = ~LHSKnown.Zero; + APInt MinRHS = RHSKnown.One, MaxRHS = ~RHSKnown.Zero; + if (MinLHS.uge(MaxRHS)) + return OverflowResult::NeverOverflows; + if (MaxLHS.ult(MinRHS)) + return OverflowResult::AlwaysOverflows; return OverflowResult::MayOverflow; } Index: test/Transforms/InstCombine/AddOverFlow.ll =================================================================== --- test/Transforms/InstCombine/AddOverFlow.ll +++ test/Transforms/InstCombine/AddOverFlow.ll @@ -73,7 +73,7 @@ } ; CHECK-LABEL: @ripple_nsw1 -; CHECK: add nsw i16 %a, %b +; CHECK: add nuw nsw i16 %a, %b define i16 @ripple_nsw1(i16 %x, i16 %y) { ; %a has at most one bit set %a = and i16 %y, 1 @@ -87,7 +87,7 @@ ; Like the previous test, but flip %a and %b ; CHECK-LABEL: @ripple_nsw2 -; CHECK: add nsw i16 %b, %a +; CHECK: add nuw nsw i16 %b, %a define i16 @ripple_nsw2(i16 %x, i16 %y) { %a = and i16 %y, 1 %b = and i16 %x, 49151 @@ -96,7 +96,7 @@ } ; CHECK-LABEL: @ripple_nsw3 -; CHECK: add nsw i16 %a, %b +; CHECK: add nuw nsw i16 %a, %b define i16 @ripple_nsw3(i16 %x, i16 %y) { %a = and i16 %y, 43691 %b = and i16 %x, 21843 @@ -106,7 +106,7 @@ ; Like the previous test, but flip %a and %b ; CHECK-LABEL: @ripple_nsw4 -; CHECK: add nsw i16 %b, %a +; CHECK: add nuw nsw i16 %b, %a define i16 @ripple_nsw4(i16 %x, i16 %y) { %a = and i16 %y, 43691 %b = and i16 %x, 21843 Index: test/Transforms/InstCombine/demand_shrink_nsw.ll =================================================================== --- test/Transforms/InstCombine/demand_shrink_nsw.ll +++ test/Transforms/InstCombine/demand_shrink_nsw.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: [[V35:%.*]] = add nuw nsw i32 [[V34]], 1362915575 ; CHECK-NEXT: [[V40:%.*]] = shl nuw nsw i32 [[V34]], 1 ; CHECK-NEXT: [[V41:%.*]] = and i32 [[V40]], 290 -; CHECK-NEXT: [[V42:%.*]] = sub nsw i32 [[V35]], [[V41]] +; CHECK-NEXT: [[V42:%.*]] = sub nuw nsw i32 [[V35]], [[V41]] ; CHECK-NEXT: [[V43:%.*]] = add nuw i32 [[V42]], 1533579450 ; CHECK-NEXT: [[V45:%.*]] = xor i32 [[V43]], 749011377 ; CHECK-NEXT: ret i32 [[V45]] Index: test/Transforms/InstCombine/saturating-add-sub.ll =================================================================== --- test/Transforms/InstCombine/saturating-add-sub.ll +++ test/Transforms/InstCombine/saturating-add-sub.ll @@ -230,6 +230,46 @@ ret <2 x i8> %r } +define i8 @test_scalar_uadd_never_overflows(i8 %a) { +; CHECK-LABEL: @test_scalar_uadd_never_overflows( +; CHECK-NEXT: [[A_MASKED:%.*]] = and i8 [[A:%.*]], -127 +; CHECK-NEXT: [[R:%.*]] = add nuw nsw i8 [[A_MASKED]], 1 +; CHECK-NEXT: ret i8 [[R]] +; + %a_masked = and i8 %a, 129 + %r = call i8 @llvm.uadd.sat.i8(i8 %a_masked, i8 1) + ret i8 %r +} + +define <2 x i8> @test_vector_uadd_never_overflows(<2 x i8> %a) { +; CHECK-LABEL: @test_vector_uadd_never_overflows( +; CHECK-NEXT: [[A_MASKED:%.*]] = and <2 x i8> [[A:%.*]], +; CHECK-NEXT: [[R:%.*]] = add nuw nsw <2 x i8> [[A_MASKED]], +; CHECK-NEXT: ret <2 x i8> [[R]] +; + %a_masked = and <2 x i8> %a, + %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_masked, <2 x i8> ) + ret <2 x i8> %r +} + +define i8 @test_scalar_uadd_always_overflows(i8 %a) { +; CHECK-LABEL: @test_scalar_uadd_always_overflows( +; CHECK-NEXT: ret i8 -1 +; + %a_masked = or i8 %a, 192 + %r = call i8 @llvm.uadd.sat.i8(i8 %a_masked, i8 64) + ret i8 %r +} + +define <2 x i8> @test_vector_uadd_always_overflows(<2 x i8> %a) { +; CHECK-LABEL: @test_vector_uadd_always_overflows( +; CHECK-NEXT: ret <2 x i8> +; + %a_masked = or <2 x i8> %a, + %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_masked, <2 x i8> ) + ret <2 x i8> %r +} + ; neg sadd nneg never overflows. define i8 @test_scalar_sadd_neg_nneg(i8 %a) { ; CHECK-LABEL: @test_scalar_sadd_neg_nneg( @@ -568,6 +608,46 @@ ret <2 x i8> %r } +define i8 @test_scalar_usub_never_overflows(i8 %a) { +; CHECK-LABEL: @test_scalar_usub_never_overflows( +; CHECK-NEXT: [[A_MASKED:%.*]] = or i8 [[A:%.*]], 64 +; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_MASKED]], -10 +; CHECK-NEXT: ret i8 [[R]] +; + %a_masked = or i8 %a, 64 + %r = call i8 @llvm.usub.sat.i8(i8 %a_masked, i8 10) + ret i8 %r +} + +define <2 x i8> @test_vector_usub_never_overflows(<2 x i8> %a) { +; CHECK-LABEL: @test_vector_usub_never_overflows( +; CHECK-NEXT: [[A_MASKED:%.*]] = or <2 x i8> [[A:%.*]], +; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_MASKED]], +; CHECK-NEXT: ret <2 x i8> [[R]] +; + %a_masked = or <2 x i8> %a, + %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_masked, <2 x i8> ) + ret <2 x i8> %r +} + +define i8 @test_scalar_usub_always_overflows(i8 %a) { +; CHECK-LABEL: @test_scalar_usub_always_overflows( +; CHECK-NEXT: ret i8 0 +; + %a_masked = and i8 %a, 64 + %r = call i8 @llvm.usub.sat.i8(i8 %a_masked, i8 100) + ret i8 %r +} + +define <2 x i8> @test_vector_usub_always_overflows(<2 x i8> %a) { +; CHECK-LABEL: @test_vector_usub_always_overflows( +; CHECK-NEXT: ret <2 x i8> zeroinitializer +; + %a_masked = and <2 x i8> %a, + %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_masked, <2 x i8> ) + ret <2 x i8> %r +} + ; neg ssub neg never overflows. define i8 @test_scalar_ssub_neg_neg(i8 %a) { ; CHECK-LABEL: @test_scalar_ssub_neg_neg( Index: test/Transforms/InstCombine/sext.ll =================================================================== --- test/Transforms/InstCombine/sext.ll +++ test/Transforms/InstCombine/sext.ll @@ -78,7 +78,7 @@ define i64 @test7(i32 %x) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[T:%.*]] = and i32 %x, 511 -; CHECK-NEXT: [[U:%.*]] = sub nsw i32 20000, [[T]] +; CHECK-NEXT: [[U:%.*]] = sub nuw nsw i32 20000, [[T]] ; CHECK-NEXT: [[S1:%.*]] = zext i32 [[U]] to i64 ; CHECK-NEXT: ret i64 [[S1]] ; Index: test/Transforms/InstCombine/shuffle_select.ll =================================================================== --- test/Transforms/InstCombine/shuffle_select.ll +++ test/Transforms/InstCombine/shuffle_select.ll @@ -1397,7 +1397,7 @@ define <4 x i8> @or_add(<4 x i8> %v) { ; CHECK-LABEL: @or_add( ; CHECK-NEXT: [[V0:%.*]] = lshr <4 x i8> [[V:%.*]], -; CHECK-NEXT: [[T3:%.*]] = add nsw <4 x i8> [[V0]], +; CHECK-NEXT: [[T3:%.*]] = add nuw nsw <4 x i8> [[V0]], ; CHECK-NEXT: ret <4 x i8> [[T3]] ; %v0 = lshr <4 x i8> %v, ; clear the top bits Index: test/Transforms/InstCombine/strlen-1.ll =================================================================== --- test/Transforms/InstCombine/strlen-1.ll +++ test/Transforms/InstCombine/strlen-1.ll @@ -128,7 +128,7 @@ define i32 @test_simplify11(i32 %x) { ; CHECK-LABEL: @test_simplify11( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 7 -; CHECK-NEXT: [[TMP1:%.*]] = sub nsw i32 9, [[AND]] +; CHECK-NEXT: [[TMP1:%.*]] = sub nuw nsw i32 9, [[AND]] ; CHECK-NEXT: ret i32 [[TMP1]] ; %and = and i32 %x, 7 Index: test/Transforms/InstCombine/sub-xor.ll =================================================================== --- test/Transforms/InstCombine/sub-xor.ll +++ test/Transforms/InstCombine/sub-xor.ll @@ -39,7 +39,7 @@ define i32 @test3(i32 %x) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 31 -; CHECK-NEXT: [[ADD:%.*]] = sub nsw i32 73, [[AND]] +; CHECK-NEXT: [[ADD:%.*]] = sub nuw nsw i32 73, [[AND]] ; CHECK-NEXT: ret i32 [[ADD]] ; %and = and i32 %x, 31 Index: test/Transforms/InstCombine/wcslen-1.ll =================================================================== --- test/Transforms/InstCombine/wcslen-1.ll +++ test/Transforms/InstCombine/wcslen-1.ll @@ -130,9 +130,9 @@ define i64 @test_simplify11(i32 %x) { ; CHECK-LABEL: @test_simplify11( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 7 -; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[AND]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i64 9, [[TMP1]] -; CHECK-NEXT: ret i64 [[TMP2]] +; CHECK-NEXT: [[NARROW:%.*]] = sub nuw nsw i32 9, [[AND]] +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[NARROW]] to i64 +; CHECK-NEXT: ret i64 [[TMP1]] ; %and = and i32 %x, 7 %hello_p = getelementptr inbounds [13 x i32], [13 x i32]* @null_hello_mid, i32 0, i32 %and Index: test/Transforms/InstCombine/wcslen-3.ll =================================================================== --- test/Transforms/InstCombine/wcslen-3.ll +++ test/Transforms/InstCombine/wcslen-3.ll @@ -131,9 +131,9 @@ define i64 @test_simplify11(i16 %x) { ; CHECK-LABEL: @test_simplify11( ; CHECK-NEXT: [[AND:%.*]] = and i16 [[X:%.*]], 7 -; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[AND]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i64 9, [[TMP1]] -; CHECK-NEXT: ret i64 [[TMP2]] +; CHECK-NEXT: [[NARROW:%.*]] = sub nuw nsw i16 9, [[AND]] +; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[NARROW]] to i64 +; CHECK-NEXT: ret i64 [[TMP1]] ; %and = and i16 %x, 7 %hello_p = getelementptr inbounds [13 x i16], [13 x i16]* @null_hello_mid, i16 0, i16 %and Index: test/Transforms/LoopVectorize/X86/masked_load_store.ll =================================================================== --- test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -2028,7 +2028,7 @@ ; AVX512-NEXT: br i1 [[TMP23]], label [[FOR_BODY_PREHEADER]], label [[VECTOR_BODY]], !llvm.loop !49 ; AVX512: for.body.preheader: ; AVX512-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 9984, [[VECTOR_BODY]] ] -; AVX512-NEXT: [[TMP24:%.*]] = sub nsw i64 9999, [[INDVARS_IV_PH]] +; AVX512-NEXT: [[TMP24:%.*]] = sub nuw nsw i64 9999, [[INDVARS_IV_PH]] ; AVX512-NEXT: br label [[FOR_BODY_PROL:%.*]] ; AVX512: for.body.prol: ; AVX512-NEXT: [[INDVARS_IV_PROL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_PROL:%.*]], [[FOR_INC_PROL:%.*]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY_PREHEADER]] ]