diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -483,6 +483,58 @@ return ConstantInt::get(LHS->getType(), !IsAnd); } +/// Try to fold (icmp(A & B) != 0) & (icmp(A & D) == E), where B is a contiguous +/// set of ones starting from the most significant bit (negative power of 2), D +/// and E are equal, and D is a contiguous set of ones starting in the position +/// the most significant zero bit of B. +static Value *foldLogOpOfMaskedICmps_AllZeros_BMask_NotMixed_Contiguous( + ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, + Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, + InstCombiner::BuilderTy &Builder) { + assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && + "Expected equality predicates for masked type of icmps."); + if (!IsAnd || PredL != ICmpInst::ICMP_EQ || PredR != ICmpInst::ICMP_NE) + return nullptr; + + const APInt *BCst, *DCst, *ECst; + if (!match(B, m_APInt(BCst)) || !match(D, m_APInt(DCst)) || + !match(E, m_APInt(ECst))) + return nullptr; + if (*DCst != *ECst) + return nullptr; + if (!BCst->isNegatedPowerOf2() || !DCst->isShiftedMask()) + return nullptr; + if (BCst->countLeadingOnes() - DCst->countLeadingZeros() == 0u) + return Builder.CreateICmp(ICmpInst::ICMP_ULT, A, D); + + return nullptr; +} + +/// Try to fold (icmp(A & B) != C) & (icmp(A & D) == E) where C is zero and the +/// BMask is not mixed and not all ones. +static Value *foldLogOpOfMaskedICmps_AllZeros_BMask_NotMixed_and_NotAllOnes( + ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, + Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, + unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) { + assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && + "Expected equality predicates for masked type of icmps."); + + const auto compareBMask = BMask_NotMixed | BMask_NotAllOnes; + // Handle Mask_AllZeros-(BMask_NotMixed|BMask_NotAllOnes) cases. + if ((LHSMask & Mask_AllZeros) && (RHSMask == compareBMask)) { + if (Value *V = foldLogOpOfMaskedICmps_AllZeros_BMask_NotMixed_Contiguous( + LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, Builder)) { + return V; + } + } else if ((LHSMask == compareBMask) && (RHSMask & Mask_AllZeros)) { + if (Value *V = foldLogOpOfMaskedICmps_AllZeros_BMask_NotMixed_Contiguous( + RHS, LHS, IsAnd, A, D, E, B, C, PredR, PredL, Builder)) { + return V; + } + } + return nullptr; +} + /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side /// aren't of the common mask pattern type. @@ -518,7 +570,7 @@ } /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) -/// into a single (icmp(A & X) ==/!= Y). +/// into a single (icmp(A & X) ==/!= Y) or (icmp A u< Y). static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, InstCombiner::BuilderTy &Builder) { Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr; @@ -532,6 +584,12 @@ unsigned LHSMask = MaskPair->first; unsigned RHSMask = MaskPair->second; unsigned Mask = LHSMask & RHSMask; + + if (Value *V = foldLogOpOfMaskedICmps_AllZeros_BMask_NotMixed_and_NotAllOnes( + LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask, + Builder)) + return V; + if (Mask == 0) { // Even if the two sides don't share a common pattern, check if folding can // still happen. diff --git a/llvm/test/Transforms/InstCombine/icmp-logical.ll b/llvm/test/Transforms/InstCombine/icmp-logical.ll --- a/llvm/test/Transforms/InstCombine/icmp-logical.ll +++ b/llvm/test/Transforms/InstCombine/icmp-logical.ll @@ -1729,3 +1729,371 @@ %t5 = select i1 %t4, i1 true, i1 %t2 ret i1 %t5 } + +; ((X u< 8) & ((X & 7) != 7)) -> X u< 7 +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_1(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_1( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 7 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 7 + %t3 = icmp ne i32 %t2, 7 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_1flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_1flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 7 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 7 + %t3 = icmp ne i32 %t2, 7 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 8) & ((X & 6) != 6)) -> X u< 6 +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_2(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_2( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 6 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 6 + %t3 = icmp ne i32 %t2, 6 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_2flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_2flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 6 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 6 + %t3 = icmp ne i32 %t2, 6 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 0x4000000) & ((X & 0x30000000) != 0x30000000)) -> X u< 0x30000000 +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_3(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_3( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 805306368 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 1073741824 + %t2 = and i32 %x, 805306368 + %t3 = icmp ne i32 %t2, 805306368 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_3flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_3flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 805306368 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 1073741824 + %t2 = and i32 %x, 805306368 + %t3 = icmp ne i32 %t2, 805306368 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 0x40000000) & ((X & 0x3FFFFFFF) != 0x3FFFFFFF)) -> X u< 0x3FFFFFFF +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_4(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_4( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 1073741823 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 1073741824 + %t2 = and i32 %x, 1073741823 + %t3 = icmp ne i32 %t2, 1073741823 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_4flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_4flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 1073741823 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 1073741824 + %t2 = and i32 %x, 1073741823 + %t3 = icmp ne i32 %t2, 1073741823 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 0x8000000) & ((X & 0x60000000) != 0x60000000)) -> X u< 0x60000000 +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_5(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_5( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 1610612736 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 2147483648 + %t2 = and i32 %x, 1610612736 + %t3 = icmp ne i32 %t2, 1610612736 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_5flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_5flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 1610612736 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 2147483648 + %t2 = and i32 %x, 1610612736 + %t3 = icmp ne i32 %t2, 1610612736 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 0x8000000) & ((X & 0x7FFFFFFF) != 0x7FFFFFFF)) -> X u< 0x7FFFFFFF +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_6(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_6( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 2147483647 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 2147483648 + %t2 = and i32 %x, 2147483647 + %t3 = icmp ne i32 %t2, 2147483647 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_6flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_6flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 2147483647 +; CHECK-NEXT: ret i1 [[T1]] +; + %t1 = icmp ult i32 %x, 2147483648 + %t2 = and i32 %x, 2147483647 + %t3 = icmp ne i32 %t2, 2147483647 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + + +; ((X u< 8) & ((X & 5) != 5)) -> no change +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_7(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_7( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 5 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 5 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t1, %t3 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 5 + %t3 = icmp ne i32 %t2, 5 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_7flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_7flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 5 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 5 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t3, %t1 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 5 + %t3 = icmp ne i32 %t2, 5 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 8) & ((X & 3) != 3)) -> no change +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_8(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_8( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 3 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 3 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t1, %t3 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 3 + %t3 = icmp ne i32 %t2, 3 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_8flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_8flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 3 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 3 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t3, %t1 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 3 + %t3 = icmp ne i32 %t2, 3 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 256) & ((X & 239) != 239)) -> no change +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_9(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_9( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 256 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 239 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 239 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t1, %t3 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 256 + %t2 = and i32 %x, 239 + %t3 = icmp ne i32 %t2, 239 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_9flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_9flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 256 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 239 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 239 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t3, %t1 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 256 + %t2 = and i32 %x, 239 + %t3 = icmp ne i32 %t2, 239 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 8) & ((X & 112) != 112)) -> no change +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_10(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_10( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 112 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 112 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t1, %t3 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 112 + %t3 = icmp ne i32 %t2, 112 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_10flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_10flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 112 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 112 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t3, %t1 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 112 + %t3 = icmp ne i32 %t2, 112 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 8) & ((X & 56) != 56)) -> no change +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_11(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_11( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 56 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 56 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t1, %t3 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 56 + %t3 = icmp ne i32 %t2, 56 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_11flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_11flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 56 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 56 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t3, %t1 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 56 + %t3 = icmp ne i32 %t2, 56 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 8) & ((X & 24) != 24)) -> no change +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_12(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_12( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 24 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 24 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t1, %t3 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 24 + %t3 = icmp ne i32 %t2, 24 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_12flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_12flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 24 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 24 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t3, %t1 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 24 + %t3 = icmp ne i32 %t2, 24 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +} + +; ((X u< 8) & ((X & 12) != 12)) -> no change +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_13(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_13( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 12 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 12 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t1, %t3 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 12 + %t3 = icmp ne i32 %t2, 12 + %t4 = and i1 %t1, %t3 + ret i1 %t4 +} + +define i1 @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_13flip(i32 %x) { +; CHECK-LABEL: @masked_icmps_mask_allzeros_bmask_notmixed_and_notallones_13flip( +; CHECK-NEXT: [[T1:%.*]] = icmp ult i32 %x, 8 +; CHECK-NEXT: [[T2:%.*]] = and i32 %x, 12 +; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 %t2, 12 +; CHECK-NEXT: [[T4:%.*]] = and i1 %t3, %t1 +; CHECK-NEXT: ret i1 [[T4]] +; + %t1 = icmp ult i32 %x, 8 + %t2 = and i32 %x, 12 + %t3 = icmp ne i32 %t2, 12 + %t4 = and i1 %t3, %t1 + ret i1 %t4 +}