diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4578,6 +4578,22 @@ if (Pred == ICmpInst::ICMP_UGE) return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); + if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) { + // icmp (X & Y) eq/ne Y --> (X | ~Y) eq/ne -1 if Y is freely invertible and + // Y is non-constant. If Y is constant this form is preferable (and + // canonicalize too it elsewhere). + if (!match(Op1, m_ImmConstant()) && + IC.isFreeToInvert(Op1, Op1->hasOneUse() || Op1->hasNUses(2))) + return new ICmpInst(Pred, + IC.Builder.CreateOr(A, IC.Builder.CreateNot(Op1)), + Constant::getAllOnesValue(Op1->getType())); + // icmp (X & Y) eq/ne Y --> (~X & Y) eq/ne 0 if X is freely invertible. + if (IC.isFreeToInvert(A, A->hasOneUse())) + return new ICmpInst(Pred, + IC.Builder.CreateAnd(Op1, IC.Builder.CreateNot(A)), + Constant::getNullValue(Op1->getType())); + } + return nullptr; } @@ -5344,21 +5360,6 @@ } } - // canoncalize: - // (icmp eq/ne (and X, C), X) - // -> (icmp eq/ne (and X, ~C), 0) - { - Constant *CMask; - A = nullptr; - if (match(Op0, m_OneUse(m_And(m_Specific(Op1), m_ImmConstant(CMask))))) - A = Op1; - else if (match(Op1, m_OneUse(m_And(m_Specific(Op0), m_ImmConstant(CMask))))) - A = Op0; - if (A) - return new ICmpInst(Pred, Builder.CreateAnd(A, Builder.CreateNot(CMask)), - Constant::getNullValue(A->getType())); - } - if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) { // A == (A^B) -> B == 0 Value *OtherVal = A == Op0 ? B : A; diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll @@ -269,9 +269,8 @@ define i1 @n1(i8 %x, i8 %y) { ; CHECK-LABEL: @n1( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y ; not -1 diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll @@ -269,9 +269,8 @@ define i1 @n1(i8 %x, i8 %y) { ; CHECK-LABEL: @n1( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y ; not -1 diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll @@ -251,9 +251,9 @@ ; CHECK-LABEL: @n1( ; CHECK-NEXT: [[T0:%.*]] = shl i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[T0]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y ; not 1 @@ -268,9 +268,9 @@ ; CHECK-LABEL: @n2( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = add nuw i8 [[T0]], 1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = sub nuw i8 -2, [[T0]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll @@ -251,9 +251,9 @@ ; CHECK-LABEL: @n1( ; CHECK-NEXT: [[T0:%.*]] = shl i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[T0]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y ; not 1 @@ -268,9 +268,9 @@ ; CHECK-LABEL: @n2( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = add nuw i8 [[T0]], 1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = sub nuw i8 -2, [[T0]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y diff --git a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll --- a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll +++ b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll @@ -503,9 +503,8 @@ ; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[MASK_SHR:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[NMASK:%.*]] = shl i8 [[MASK_SHR]], [[Z:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[NMASK]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[NMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[R]] ; %x = xor i8 %x_in, 123 diff --git a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll --- a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll +++ b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll @@ -238,9 +238,9 @@ define i1 @icmp_eq_x_invertable_y_todo(i8 %x, i1 %y) { ; CHECK-LABEL: @icmp_eq_x_invertable_y_todo( -; CHECK-NEXT: [[YY:%.*]] = select i1 [[Y:%.*]], i8 7, i8 24 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[Y:%.*]], i8 -8, i8 -25 +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[R]] ; %yy = select i1 %y, i8 7, i8 24 @@ -251,9 +251,8 @@ define i1 @icmp_eq_x_invertable_y(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_x_invertable_y( -; CHECK-NEXT: [[YY:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[R]] ; %yy = xor i8 %y, -1 @@ -264,9 +263,9 @@ define i1 @icmp_eq_x_invertable_y2_todo(i8 %x, i1 %y) { ; CHECK-LABEL: @icmp_eq_x_invertable_y2_todo( -; CHECK-NEXT: [[YY:%.*]] = select i1 [[Y:%.*]], i8 7, i8 24 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[YY]], [[AND]] +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[Y:%.*]], i8 -8, i8 -25 +; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], -1 ; CHECK-NEXT: ret i1 [[R]] ; %yy = select i1 %y, i8 7, i8 24 @@ -277,9 +276,8 @@ define i1 @icmp_eq_x_invertable_y2(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_x_invertable_y2( -; CHECK-NEXT: [[YY:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[YY]] +; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], -1 ; CHECK-NEXT: ret i1 [[R]] ; %yy = xor i8 %y, -1