diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -195,7 +195,13 @@ assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); - Known = LHSKnown & RHSKnown; + // We have patterns for `and` in computeKnownBits that are not duplicated + // here. + computeKnownBits(I, Known, Depth, CxtI); + KnownBits KnownSimp = LHSKnown | RHSKnown; + Known.One &= KnownSimp.One; + Known.Zero |= KnownSimp.Zero; + assert(!Known.hasConflict() && "Bits known to be one AND zero?"); // If the client is only demanding bits that we know, return the known // constant. @@ -224,7 +230,13 @@ assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); - Known = LHSKnown | RHSKnown; + // We have patterns for `or` in computeKnownBits that are not duplicated + // here. + computeKnownBits(I, Known, Depth, CxtI); + KnownBits KnownSimp = LHSKnown | RHSKnown; + Known.One |= KnownSimp.One; + Known.Zero |= KnownSimp.Zero; + assert(!Known.hasConflict() && "Bits known to be one AND zero?"); // If the client is only demanding bits that we know, return the known // constant. @@ -262,7 +274,13 @@ assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); - Known = LHSKnown ^ RHSKnown; + // We have patterns for `xor` in computeKnownBits that are not duplicated + // here. + computeKnownBits(I, Known, Depth, CxtI); + KnownBits KnownSimp = LHSKnown ^ RHSKnown; + Known.One |= KnownSimp.One; + Known.Zero |= KnownSimp.Zero; + assert(!Known.hasConflict() && "Bits known to be one AND zero?"); // If the client is only demanding bits that we know, return the known // constant. diff --git a/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll b/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll --- a/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll +++ b/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll @@ -228,10 +228,7 @@ define <2 x i32> @cmp_ne_0_add_and_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_ne_0_add_and_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], -; CHECK-NEXT: ret <2 x i32> [[B]] +; CHECK-NEXT: ret <2 x i32> zeroinitializer ; %C1 = or <2 x i32> %C, %y = add <2 x i32> %C1, %x @@ -242,10 +239,7 @@ define <2 x i32> @cmp_ugt_0_sub_and_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_ugt_0_sub_and_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], -; CHECK-NEXT: ret <2 x i32> [[B]] +; CHECK-NEXT: ret <2 x i32> zeroinitializer ; %C1 = or <2 x i32> %C, %y = sub <2 x i32> %x, %C1 @@ -267,10 +261,7 @@ define <2 x i32> @cmp_eq_0_add_xor_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_eq_0_add_xor_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = xor <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], -; CHECK-NEXT: ret <2 x i32> [[B]] +; CHECK-NEXT: ret <2 x i32> ; %C1 = or <2 x i32> %C, %y = add <2 x i32> %x, %C1 @@ -281,10 +272,7 @@ define <2 x i32> @cmp_ne_0_sub_xor_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_ne_0_sub_xor_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = xor <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], -; CHECK-NEXT: ret <2 x i32> [[B]] +; CHECK-NEXT: ret <2 x i32> ; %C1 = or <2 x i32> %C, %y = sub <2 x i32> %x, %C1 @@ -306,10 +294,7 @@ define <2 x i32> @cmp_sgt_0_add_or_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_sgt_0_add_or_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], -; CHECK-NEXT: ret <2 x i32> [[B]] +; CHECK-NEXT: ret <2 x i32> ; %C1 = or <2 x i32> %C, %y = add <2 x i32> %x, %C1 @@ -320,10 +305,7 @@ define <2 x i32> @cmp_ne_0_sub_or_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_ne_0_sub_or_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], -; CHECK-NEXT: ret <2 x i32> [[B]] +; CHECK-NEXT: ret <2 x i32> ; %C1 = or <2 x i32> %C, %y = sub <2 x i32> %x, %C1 diff --git a/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll --- a/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll +++ b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll @@ -49,11 +49,7 @@ define i1 @blsmsk_ge_is_false(i32 %x) { ; CHECK-LABEL: @blsmsk_ge_is_false( -; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 10 -; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X1]], -1 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X1]], [[X2]] -; CHECK-NEXT: [[Z:%.*]] = icmp ugt i32 [[X3]], 7 -; CHECK-NEXT: ret i1 [[Z]] +; CHECK-NEXT: ret i1 false ; %x1 = or i32 %x, 10 %x2 = sub i32 %x1, 1 @@ -64,11 +60,7 @@ define <2 x i1> @blsmsk_gt_is_false_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsmsk_gt_is_false_vec( -; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[X2:%.*]] = add nsw <2 x i32> [[X1]], -; CHECK-NEXT: [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]] -; CHECK-NEXT: [[Z:%.*]] = icmp ugt <2 x i32> [[X3]], -; CHECK-NEXT: ret <2 x i1> [[Z]] +; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %x1 = or <2 x i32> %x, %x2 = sub <2 x i32> %x1, @@ -101,11 +93,7 @@ define <2 x i32> @blsmsk_add_eval_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsmsk_add_eval_vec( -; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[X2:%.*]] = add nsw <2 x i32> [[X1]], -; CHECK-NEXT: [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]] -; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[X3]], -; CHECK-NEXT: ret <2 x i32> [[Z]] +; CHECK-NEXT: ret <2 x i32> ; %x1 = or <2 x i32> %x, %x2 = add <2 x i32> %x1, @@ -116,11 +104,7 @@ define i32 @blsmsk_sub_eval(i32 %x) { ; CHECK-LABEL: @blsmsk_sub_eval( -; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 9 -; CHECK-NEXT: [[X2:%.*]] = add i32 [[X1]], 31 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X1]], [[X2]] -; CHECK-NEXT: [[Z:%.*]] = or i32 [[X3]], -32 -; CHECK-NEXT: ret i32 [[Z]] +; CHECK-NEXT: ret i32 -31 ; %x1 = or i32 %x, 9 %x2 = sub i32 %x1, 1 @@ -142,11 +126,7 @@ define <2 x i32> @blsmsk_or_eval_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsmsk_or_eval_vec( -; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[X2:%.*]] = add nsw <2 x i32> [[X1]], -; CHECK-NEXT: [[X3:%.*]] = xor <2 x i32> [[X2]], [[X1]] -; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[X3]], -; CHECK-NEXT: ret <2 x i32> [[Z]] +; CHECK-NEXT: ret <2 x i32> ; %x1 = or <2 x i32> %x, %x2 = add <2 x i32> %x1, @@ -255,10 +235,7 @@ ; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X]], -1 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]] -; CHECK-NEXT: [[Z:%.*]] = icmp ugt i32 [[X3]], 7 -; CHECK-NEXT: ret i1 [[Z]] +; CHECK-NEXT: ret i1 false ; %lb = and i32 %x, 1 %cmp = icmp ne i32 %lb, 0 @@ -298,10 +275,7 @@ ; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 2 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X]], -1 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]] -; CHECK-NEXT: [[Z:%.*]] = icmp ugt i32 [[X3]], 8 -; CHECK-NEXT: ret i1 [[Z]] +; CHECK-NEXT: ret i1 false ; %lb = and i32 %x, 2 %cmp = icmp ne i32 %lb, 0 @@ -398,10 +372,7 @@ ; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[X2:%.*]] = add i32 [[X]], 31 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]] -; CHECK-NEXT: [[Z:%.*]] = or i32 [[X3]], -32 -; CHECK-NEXT: ret i32 [[Z]] +; CHECK-NEXT: ret i32 -31 ; %lb = and i32 %x, 1 %cmp = icmp ne i32 %lb, 0 @@ -635,11 +606,7 @@ define i1 @blsi_gt_is_false(i32 %x) { ; CHECK-LABEL: @blsi_gt_is_false( -; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 10 -; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X1]] -; CHECK-NEXT: [[X3:%.*]] = and i32 [[X1]], [[X2]] -; CHECK-NEXT: [[Z:%.*]] = icmp ugt i32 [[X3]], 8 -; CHECK-NEXT: ret i1 [[Z]] +; CHECK-NEXT: ret i1 false ; %x1 = or i32 %x, 10 %x2 = sub i32 0, %x1 @@ -683,11 +650,7 @@ define <2 x i32> @blsi_sub_eval_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsi_sub_eval_vec( -; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]] -; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X1]], [[X2]] -; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[X3]], -; CHECK-NEXT: ret <2 x i32> [[Z]] +; CHECK-NEXT: ret <2 x i32> ; %x1 = or <2 x i32> %x, %x2 = sub <2 x i32> , %x1 @@ -720,11 +683,7 @@ define <2 x i32> @blsi_xor_eval_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsi_xor_eval_vec( -; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]] -; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X1]], [[X2]] -; CHECK-NEXT: [[Z1:%.*]] = or <2 x i32> [[X3]], -; CHECK-NEXT: ret <2 x i32> [[Z1]] +; CHECK-NEXT: ret <2 x i32> ; %x1 = or <2 x i32> %x, %x2 = sub <2 x i32> , %x1 @@ -886,10 +845,7 @@ ; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X]] -; CHECK-NEXT: [[X3:%.*]] = and i32 [[X2]], [[X]] -; CHECK-NEXT: [[Z:%.*]] = icmp ugt i32 [[X3]], 7 -; CHECK-NEXT: ret i1 [[Z]] +; CHECK-NEXT: ret i1 false ; %lb = and i32 %x, 4 %cmp = icmp ne i32 %lb, 0 @@ -905,10 +861,7 @@ ; CHECK-NEXT: [[LB:%.*]] = and i32 [[X:%.*]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) -; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X]] -; CHECK-NEXT: [[X3:%.*]] = and i32 [[X2]], [[X]] -; CHECK-NEXT: [[Z:%.*]] = icmp ugt i32 [[X3]], 8 -; CHECK-NEXT: ret i1 [[Z]] +; CHECK-NEXT: ret i1 false ; %lb = and i32 %x, 1 %cmp = icmp ne i32 %lb, 0 diff --git a/llvm/test/Transforms/InstCombine/and-or-xor-lowbit-simplified.ll b/llvm/test/Transforms/InstCombine/and-or-xor-lowbit-simplified.ll --- a/llvm/test/Transforms/InstCombine/and-or-xor-lowbit-simplified.ll +++ b/llvm/test/Transforms/InstCombine/and-or-xor-lowbit-simplified.ll @@ -3,11 +3,7 @@ define i1 @cmp_ne_0_add_and_eval(i32 %x, i32 %C) { ; CHECK-LABEL: @cmp_ne_0_add_and_eval( -; CHECK-NEXT: [[Y:%.*]] = add i32 [[X:%.*]], 1 -; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 -; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[B]], 0 -; CHECK-NEXT: ret i1 [[E]] +; CHECK-NEXT: ret i1 false ; %C1 = or i32 %C, 131 %y = add i32 %C1, %x @@ -19,11 +15,7 @@ define i1 @cmp_ugt_0_sub_and_eval(i32 %x, i32 %C) { ; CHECK-LABEL: @cmp_ugt_0_sub_and_eval( -; CHECK-NEXT: [[Y:%.*]] = add i32 [[X:%.*]], 1 -; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y]], [[X]] -; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 -; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[B]], 0 -; CHECK-NEXT: ret i1 [[E]] +; CHECK-NEXT: ret i1 false ; %C1 = or i32 %C, 129 %y = sub i32 %x, %C1 @@ -263,10 +255,7 @@ define <2 x i1> @cmp_ne_0_add_and_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_ne_0_add_and_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[E:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> -; CHECK-NEXT: ret <2 x i1> [[E]] +; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %C1 = or <2 x i32> %C, %y = add <2 x i32> %C1, %x @@ -278,10 +267,7 @@ define <2 x i1> @cmp_ugt_0_sub_and_eval_vec(<2 x i32> %x, <2 x i32> %C) { ; CHECK-LABEL: @cmp_ugt_0_sub_and_eval_vec( -; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[Y]], [[X]] -; CHECK-NEXT: [[E:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> -; CHECK-NEXT: ret <2 x i1> [[E]] +; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %C1 = or <2 x i32> %C, %y = sub <2 x i32> %x, %C1 diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll --- a/llvm/test/Transforms/InstCombine/assume.ll +++ b/llvm/test/Transforms/InstCombine/assume.ll @@ -382,10 +382,7 @@ define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) { ; CHECK-LABEL: @assumption_conflicts_with_known_bits( -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B:%.*]], 3 ; CHECK-NEXT: tail call void @llvm.assume(i1 false) -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[AND1]], 0 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP2]]) ; CHECK-NEXT: ret i32 0 ; %and1 = and i32 %b, 3 diff --git a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll --- a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll +++ b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll @@ -144,10 +144,7 @@ define <2 x i32> @ctpop_x_and_negx_vec_nz(<2 x i32> %x) { ; CHECK-LABEL: @ctpop_x_and_negx_vec_nz( -; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[SUB:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]] -; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X1]], [[SUB]] -; CHECK-NEXT: ret <2 x i32> [[AND]] +; CHECK-NEXT: ret <2 x i32> ; %x1 = or <2 x i32> %x, %sub = sub <2 x i32> , %x1 diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll --- a/llvm/test/Transforms/InstCombine/shift.ll +++ b/llvm/test/Transforms/InstCombine/shift.ll @@ -110,8 +110,8 @@ ;; (A >> 8) << 8 === A & -256 define i32 @test12(i32 %A) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], -256 -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[C:%.*]] = and i32 [[A:%.*]], -256 +; CHECK-NEXT: ret i32 [[C]] ; %B = ashr i32 %A, 8 %C = shl i32 %B, 8 @@ -1770,12 +1770,11 @@ define void @ossfuzz_38078(i32 %arg, i32 %arg1, ptr %ptr, ptr %ptr2, ptr %ptr3, ptr %ptr4, ptr %ptr5, ptr %ptr6, ptr %ptr7) { ; CHECK-LABEL: @ossfuzz_38078( ; CHECK-NEXT: bb: -; CHECK-NEXT: [[I2:%.*]] = add nsw i32 [[ARG:%.*]], [[ARG1:%.*]] -; CHECK-NEXT: [[B3:%.*]] = or i32 [[I2]], 2147483647 ; CHECK-NEXT: [[G1:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 -1 -; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[I2]], 0 +; CHECK-NEXT: [[I2:%.*]] = sub i32 0, [[ARG1:%.*]] +; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[I2]], [[ARG:%.*]] ; CHECK-NEXT: call void @llvm.assume(i1 [[I5]]) -; CHECK-NEXT: store volatile i32 [[B3]], ptr [[G1]], align 4 +; CHECK-NEXT: store volatile i32 2147483647, ptr [[G1]], align 4 ; CHECK-NEXT: br label [[BB:%.*]] ; CHECK: BB: ; CHECK-NEXT: unreachable diff --git a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll --- a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll +++ b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll @@ -123,8 +123,8 @@ define i32 @zext_or_eq_ult_add(i32 %i) { ; CHECK-LABEL: @zext_or_eq_ult_add( ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[I:%.*]], -3 -; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 3 -; CHECK-NEXT: [[R:%.*]] = zext i1 [[TMP2]] to i32 +; CHECK-NEXT: [[O:%.*]] = icmp ult i32 [[TMP1]], 3 +; CHECK-NEXT: [[R:%.*]] = zext i1 [[O]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; %a = add i32 %i, -3 @@ -138,8 +138,8 @@ define i32 @select_zext_or_eq_ult_add(i32 %i) { ; CHECK-LABEL: @select_zext_or_eq_ult_add( ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[I:%.*]], -3 -; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 3 -; CHECK-NEXT: [[R:%.*]] = zext i1 [[TMP2]] to i32 +; CHECK-NEXT: [[NARROW:%.*]] = icmp ult i32 [[TMP1]], 3 +; CHECK-NEXT: [[R:%.*]] = zext i1 [[NARROW]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; %a = add i32 %i, -3 @@ -173,25 +173,7 @@ define i8 @PR49475_infloop(i32 %t0, i16 %insert, i64 %e, i8 %i162) { ; CHECK-LABEL: @PR49475_infloop( -; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[T0:%.*]], 0 -; CHECK-NEXT: [[B2:%.*]] = icmp eq i16 [[INSERT:%.*]], 0 -; CHECK-NEXT: [[T1:%.*]] = or i1 [[B]], [[B2]] -; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[T1]] to i32 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[EXT]], [[T0]] -; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[AND]], 140 -; CHECK-NEXT: [[XOR1:%.*]] = zext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[CONV16:%.*]] = sext i8 [[I162:%.*]] to i64 -; CHECK-NEXT: [[SUB17:%.*]] = sub i64 [[CONV16]], [[E:%.*]] -; CHECK-NEXT: [[SEXT:%.*]] = shl i64 [[SUB17]], 32 -; CHECK-NEXT: [[CONV18:%.*]] = ashr exact i64 [[SEXT]], 32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sle i64 [[CONV18]], [[XOR1]] -; CHECK-NEXT: [[CONV19:%.*]] = zext i1 [[CMP]] to i16 -; CHECK-NEXT: [[OR21:%.*]] = or i16 [[CONV19]], [[INSERT]] -; CHECK-NEXT: [[TRUNC44:%.*]] = trunc i16 [[OR21]] to i8 -; CHECK-NEXT: [[INC:%.*]] = or i8 [[TRUNC44]], [[I162]] -; CHECK-NEXT: [[TOBOOL23_NOT:%.*]] = icmp eq i16 [[OR21]], 0 -; CHECK-NEXT: call void @llvm.assume(i1 [[TOBOOL23_NOT]]) -; CHECK-NEXT: ret i8 [[INC]] +; CHECK-NEXT: ret i8 [[I162:%.*]] ; %b = icmp eq i32 %t0, 0 %b2 = icmp eq i16 %insert, 0