Index: llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -5540,32 +5540,41 @@ LHS = Op0; Value *X; - if (match(LHS, m_Shl(m_One(), m_Value(X)))) { - APInt ValToCheck = Op0KnownZeroInverted; + APInt C2 = Op0KnownZeroInverted; + const APInt *C1; + if (match(LHS, m_Shl(m_Power2(C1), m_Value(X)))) { Type *XTy = X->getType(); - if (ValToCheck.isPowerOf2()) { - // ((1 << X) & 8) == 0 -> X != 3 - // ((1 << X) & 8) != 0 -> X == 3 - auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); - auto NewPred = ICmpInst::getInversePredicate(Pred); - return new ICmpInst(NewPred, X, CmpC); - } else if ((++ValToCheck).isPowerOf2()) { - // ((1 << X) & 7) == 0 -> X >= 3 - // ((1 << X) & 7) != 0 -> X < 3 - auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); - auto NewPred = - Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT; - return new ICmpInst(NewPred, X, CmpC); + unsigned Log2C1 = C1->countTrailingZeros(); + if (C2.isPowerOf2()) { + // iff (C1 is pow2) & (C2 is pow2) & (C1 <= C2): + // ((C1 << X) & C2) == 0 -> X != (Log2(C2) - Log2(C1)) + // ((C1 << X) & C2) != 0 -> X == (Log2(C2) - Log2(C1)) + unsigned Log2C2 = C2.countTrailingZeros(); + if (Log2C1 <= Log2C2) { + auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1); + auto NewPred = ICmpInst::getInversePredicate(Pred); + return new ICmpInst(NewPred, X, CmpC); + } + } else if ((++C2).isPowerOf2()) { + // iff (C1 is pow2) & (C2 is pow2) & (C1 <= C2): + // ((C1 << X) & (C2 - 1)) == 0 -> X >= (Log2(C2) - Log2(C1)) + // ((C1 << X) & (C2 - 1)) != 0 -> X < (Log2(C2) - Log2(C1)) + unsigned Log2C2 = C2.countTrailingZeros(); + if (Log2C1 <= Log2C2) { + auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1); + auto NewPred = Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE + : CmpInst::ICMP_ULT; + return new ICmpInst(NewPred, X, CmpC); + } } } - // Check if the LHS is 8 >>u x and the result is a power of 2 like 1. - const APInt *CI; - if (Op0KnownZeroInverted.isOne() && - match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) { - // ((8 >>u X) & 1) == 0 -> X != 3 - // ((8 >>u X) & 1) != 0 -> X == 3 - unsigned CmpVal = CI->countTrailingZeros(); + // Check if the LHS is C1 >>u x and the result is a power of 2 like 1. + if (C2.isPowerOf2() && match(LHS, m_LShr(m_Power2(C1), m_Value(X)))) { + unsigned CmpVal = C1->countTrailingZeros() - C2.countTrailingZeros(); + // iff (C1 is pow2) & (C2 is pow2): + // ((C1 >>u X) & C2) == 0 -> X != (Log2(C1) - Log2(C2)) + // ((C1 >>u X) & C2) != 0 -> X == (Log2(C1) - Log2(C2)) auto NewPred = ICmpInst::getInversePredicate(Pred); return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal)); } Index: llvm/test/Transforms/InstCombine/icmp-and-shift.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/InstCombine/icmp-and-shift.ll @@ -0,0 +1,458 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare void @use(i8) + +define i32 @icmp_eq_and_pow2_shl1(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_shl1( +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[TMP0:%.*]], 4 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 1, %0 + %and = and i32 %shl, 16 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_eq_and_pow2_shl1_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_shl1_vec( +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[TMP0:%.*]], +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[CONV]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp eq <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_ne_and_pow2_shl1(i32 %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_shl1( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0:%.*]], 4 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 1, %0 + %and = and i32 %shl, 16 + %cmp = icmp ne i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_ne_and_pow2_shl1_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_shl1_vec( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP0:%.*]], +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[CONV]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp ne <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_eq_and_pow2_shl_pow2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2( +; CHECK-NEXT: [[SHL:%.*]] = shl i32 2, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[SHL]], 4 +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and i32 [[AND]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[AND_LOBIT]], 1 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %shl = shl i32 2, %0 + %and = and i32 %shl, 16 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_eq_and_pow2_shl_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_vec( +; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> , [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[SHL]], +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and <2 x i32> [[AND]], +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[AND_LOBIT]], +; CHECK-NEXT: ret <2 x i32> [[TMP2]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp eq <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_ne_and_pow2_shl_pow2(i32 %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_shl_pow2( +; CHECK-NEXT: [[SHL:%.*]] = shl i32 2, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[SHL]], 4 +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[AND_LOBIT]] +; + %shl = shl i32 2, %0 + %and = and i32 %shl, 16 + %cmp = icmp ne i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_ne_and_pow2_shl_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_shl_pow2_vec( +; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> , [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[SHL]], +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and <2 x i32> [[AND]], +; CHECK-NEXT: ret <2 x i32> [[AND_LOBIT]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp ne <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_eq_and_pow2_shl_pow2_negative1(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_negative1( +; CHECK-NEXT: [[SHL:%.*]] = shl i32 11, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[SHL]], 4 +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and i32 [[AND]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[AND_LOBIT]], 1 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %shl = shl i32 11, %0 + %and = and i32 %shl, 16 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @icmp_eq_and_pow2_shl_pow2_negative2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_negative2( +; CHECK-NEXT: [[SHL:%.*]] = shl i32 2, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], 14 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 2, %0 + %and = and i32 %shl, 14 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @icmp_eq_and_pow2_shl_pow2_negative3(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_shl_pow2_negative3( +; CHECK-NEXT: ret i32 1 +; + %shl = shl i32 32, %0 + %and = and i32 %shl, 16 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + + +define i32 @icmp_eq_and_pow2_minus1_shl1(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1( +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 1, %0 + %and = and i32 %shl, 15 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_eq_and_pow2_minus1_shl1_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1_vec( +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i32> [[TMP0:%.*]], +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[CONV]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp eq <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_ne_and_pow2_minus1_shl1(i32 %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl1( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP0:%.*]], 4 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 1, %0 + %and = and i32 %shl, 15 + %cmp = icmp ne i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_ne_and_pow2_minus1_shl1_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl1_vec( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[TMP0:%.*]], +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[CONV]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp ne <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_eq_and_pow2_minus1_shl_pow2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl_pow2( +; CHECK-NEXT: [[SHL:%.*]] = shl i32 2, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], 14 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 2, %0 + %and = and i32 %shl, 15 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_eq_and_pow2_minus1_shl_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl_pow2_vec( +; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> , [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[SHL]], +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[AND]], zeroinitializer +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[CONV]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp eq <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_ne_and_pow2_minus1_shl_pow2(i32 %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl_pow2( +; CHECK-NEXT: [[SHL:%.*]] = shl i32 2, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], 14 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 2, %0 + %and = and i32 %shl, 15 + %cmp = icmp ne i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_ne_and_pow2_minus1_shl_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_minus1_shl_pow2_vec( +; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i32> , [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[SHL]], +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[AND]], zeroinitializer +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[CONV]] +; + %shl = shl <2 x i32> , %0 + %and = and <2 x i32> %shl, + %cmp = icmp ne <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_eq_and_pow2_minus1_shl1_negative1(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1_negative1( +; CHECK-NEXT: [[SHL:%.*]] = shl i32 3, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], 15 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %shl = shl i32 3, %0 + %and = and i32 %shl, 15 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @icmp_eq_and_pow2_minus1_shl1_negative2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_minus1_shl1_negative2( +; CHECK-NEXT: ret i32 1 +; + %shl = shl i32 32, %0 + %and = and i32 %shl, 15 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + + +define i32 @icmp_eq_and1_lshr_pow2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and1_lshr_pow2( +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %lshr = lshr i32 8, %0 + %and = and i32 %lshr, 1 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_eq_and1_lshr_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_eq_and1_lshr_pow2_vec( +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[TMP0:%.*]], +; CHECK-NEXT: [[CONV:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[CONV]] +; + %lshr = lshr <2 x i32> , %0 + %and = and <2 x i32> %lshr, + %cmp = icmp eq <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_ne_and1_lshr_pow2(i32 %0) { +; CHECK-LABEL: @icmp_ne_and1_lshr_pow2( +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %lshr = lshr i32 8, %0 + %and = and i32 %lshr, 1 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_ne_and1_lshr_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_ne_and1_lshr_pow2_vec( +; CHECK-NEXT: [[LSHR:%.*]] = lshr <2 x i32> , [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[LSHR]], +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and <2 x i32> [[AND]], +; CHECK-NEXT: ret <2 x i32> [[AND_LOBIT]] +; + %lshr = lshr <2 x i32> , %0 + %and = and <2 x i32> %lshr, + %cmp = icmp ne <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_eq_and_pow2_lshr_pow2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_lshr_pow2( +; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 8, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[LSHR]], 2 +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and i32 [[AND]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[AND_LOBIT]], 1 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %lshr = lshr i32 8, %0 + %and = and i32 %lshr, 4 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @icmp_eq_and_pow2_lshr_pow2_case2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_lshr_pow2_case2( +; CHECK-NEXT: ret i32 1 +; + %lshr = lshr i32 4, %0 + %and = and i32 %lshr, 8 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_eq_and_pow2_lshr_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_eq_and_pow2_lshr_pow2_vec( +; CHECK-NEXT: [[LSHR:%.*]] = lshr <2 x i32> , [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[LSHR]], +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and <2 x i32> [[AND]], +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[AND_LOBIT]], +; CHECK-NEXT: ret <2 x i32> [[TMP2]] +; + %lshr = lshr <2 x i32> , %0 + %and = and <2 x i32> %lshr, + %cmp = icmp eq <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_ne_and_pow2_lshr_pow2(i32 %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_lshr_pow2( +; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 8, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[LSHR]], 2 +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and i32 [[AND]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[AND_LOBIT]], 1 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %lshr = lshr i32 8, %0 + %and = and i32 %lshr, 4 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @icmp_ne_and_pow2_lshr_pow2_case2(i32 %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_lshr_pow2_case2( +; CHECK-NEXT: ret i32 1 +; + %lshr = lshr i32 4, %0 + %and = and i32 %lshr, 8 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define <2 x i32> @icmp_ne_and_pow2_lshr_pow2_vec(<2 x i32> %0) { +; CHECK-LABEL: @icmp_ne_and_pow2_lshr_pow2_vec( +; CHECK-NEXT: [[LSHR:%.*]] = lshr <2 x i32> , [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[LSHR]], +; CHECK-NEXT: [[AND_LOBIT:%.*]] = and <2 x i32> [[AND]], +; CHECK-NEXT: ret <2 x i32> [[AND_LOBIT]] +; + %lshr = lshr <2 x i32> , %0 + %and = and <2 x i32> %lshr, + %cmp = icmp ne <2 x i32> %and, + %conv = zext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %conv +} + +define i32 @icmp_eq_and1_lshr_pow2_negative1(i32 %0) { +; CHECK-LABEL: @icmp_eq_and1_lshr_pow2_negative1( +; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 7, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %lshr = lshr i32 7, %0 + %and = and i32 %lshr, 1 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} + +define i32 @icmp_eq_and1_lshr_pow2_negative2(i32 %0) { +; CHECK-LABEL: @icmp_eq_and1_lshr_pow2_negative2( +; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 8, [[TMP0:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[LSHR]], 3 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 +; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; + %lshr = lshr i32 8, %0 + %and = and i32 %lshr, 3 + %cmp = icmp eq i32 %and, 0 + %conv = zext i1 %cmp to i32 + ret i32 %conv +} Index: llvm/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll =================================================================== --- llvm/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll +++ llvm/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll @@ -193,8 +193,7 @@ define i1 @scalar_i32_lshr_and_negC_eq_X_is_constant2(i32 %y) { ; CHECK-LABEL: @scalar_i32_lshr_and_negC_eq_X_is_constant2( -; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp sgt i32 [[LSHR]], -1 +; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[Y:%.*]], 0 ; CHECK-NEXT: ret i1 [[R]] ; %lshr = lshr i32 2147483648, %y