Index: lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp =================================================================== --- lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -57,18 +57,45 @@ }; } // namespace -/// This is a recursive helper for 'and X, 1' that walks through a chain of 'or' -/// instructions looking for shift ops of a common source value (first member of -/// the pair). The second member of the pair is a mask constant for all of the -/// bits that are being compared. So this: -/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8) -/// returns {X, 0x129} and those are the operands of an 'and' that is compared -/// to zero. -static bool matchMaskedCmpOp(Value *V, std::pair &Result) { - // Recurse through a chain of 'or' operands. +/// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and +/// the bit indexes (Mask) needed by a masked compare. If we're matching a chain +/// of 'and' ops, then we also need to capture the fact that we saw an +/// "and X, 1", so that's an extra return value for that case. +struct MaskOps { + Value *Root; + APInt Mask; + bool MatchAndChain; + bool FoundAnd1; + + MaskOps(unsigned BitWidth, bool MatchAnds) : + Root(nullptr), Mask(APInt::getNullValue(BitWidth)), + MatchAndChain(MatchAnds), FoundAnd1(false) {} +}; + +/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a +/// chain of 'and' or 'or' instructions looking for shift ops of a common source +/// value. Examples: +/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8) +/// returns { X, 0x129 } +/// and (and (X >> 1), 1), (X >> 4) +/// returns { X, 0x12 } +static bool matchAndOrChain(Value *V, MaskOps &MOps) { Value *Op0, *Op1; - if (match(V, m_Or(m_Value(Op0), m_Value(Op1)))) - return matchMaskedCmpOp(Op0, Result) && matchMaskedCmpOp(Op1, Result); + if (MOps.MatchAndChain) { + // Recurse through a chain of 'and' operands. This requires an extra check + // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere + // in the chain to know that all of the high bits are cleared. + if (match(V, m_And(m_Value(Op0), m_One()))) { + MOps.FoundAnd1 = true; + return matchAndOrChain(Op0, MOps); + } + if (match(V, m_And(m_Value(Op0), m_Value(Op1)))) + return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); + } else { + // Recurse through a chain of 'or' operands. + if (match(V, m_Or(m_Value(Op0), m_Value(Op1)))) + return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); + } // We need a shift-right or a bare value representing a compare of bit 0 of // the original source operand. @@ -78,33 +105,50 @@ Candidate = V; // Initialize result source operand. - if (!Result.first) - Result.first = Candidate; + if (!MOps.Root) + MOps.Root = Candidate; // Fill in the mask bit derived from the shift constant. - Result.second.setBit(BitIndex); - return Result.first == Candidate; + MOps.Mask.setBit(BitIndex); + return MOps.Root == Candidate; } -/// Match an 'and' of a chain of or-shifted bits from a common source value into -/// a masked compare: -/// and (or (lshr X, C), ...), 1 --> (X & C') != 0 -static bool foldToMaskedCmp(Instruction &I) { - // TODO: This is only looking for 'any-bits-set' and 'all-bits-clear'. - // We should also match 'all-bits-set' and 'any-bits-clear' by looking for a - // a chain of 'and'. - if (!match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One()))) +/// Match patterns that correspond to "any-bits-set" and "all-bits-set". +/// These will include a chain of 'or' or 'and'-shifted bits from a +/// common source value: +/// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0 +/// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask +/// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns +/// that differ only with a final 'not' of the result. We expect that final +/// 'not' to be folded with the compare that we create here (invert predicate). +static bool foldAnyOrAllBitsSet(Instruction &I) { + // The 'any-bits-set' ('or' chain) pattern is simpler to match because the + // final "and X, 1" instruction must be the final op in the sequence. + bool MatchAllBitsSet; + if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value()))) + MatchAllBitsSet = true; + else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One()))) + MatchAllBitsSet = false; + else return false; - std::pair - MaskOps(nullptr, APInt::getNullValue(I.getType()->getScalarSizeInBits())); - if (!matchMaskedCmpOp(cast(&I)->getOperand(0), MaskOps)) - return false; + MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet); + if (MatchAllBitsSet) { + if (!matchAndOrChain(cast(&I), MOps) || !MOps.FoundAnd1) + return false; + } else { + if (!matchAndOrChain(cast(&I)->getOperand(0), MOps)) + return false; + } + // The pattern was found. Create a masked compare that replaces all of the + // shift and logic ops. IRBuilder<> Builder(&I); - Value *Mask = Builder.CreateAnd(MaskOps.first, MaskOps.second); - Value *CmpZero = Builder.CreateIsNotNull(Mask); - Value *Zext = Builder.CreateZExt(CmpZero, I.getType()); + Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask); + Value *And = Builder.CreateAnd(MOps.Root, Mask); + Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask) : + Builder.CreateIsNotNull(And); + Value *Zext = Builder.CreateZExt(Cmp, I.getType()); I.replaceAllUsesWith(Zext); return true; } @@ -119,8 +163,13 @@ if (!DT.isReachableFromEntry(&BB)) continue; // Do not delete instructions under here and invalidate the iterator. - for (Instruction &I : BB) - MadeChange |= foldToMaskedCmp(I); + // Walk the block backwards for efficiency. We're matching a chain of + // use->defs, so we're more likely to succeed by starting from the bottom. + // Also, we want to avoid matching partial patterns. + // TODO: It would be more efficient if we killed dead instructions + // iteratively in this loop rather than waiting until the end. + for (Instruction &I : make_range(BB.rbegin(), BB.rend())) + MadeChange |= foldAnyOrAllBitsSet(I); } // We're done with transforms, so remove dead instructions. Index: test/Transforms/AggressiveInstCombine/masked-cmp.ll =================================================================== --- test/Transforms/AggressiveInstCombine/masked-cmp.ll +++ test/Transforms/AggressiveInstCombine/masked-cmp.ll @@ -51,19 +51,27 @@ ret i32 %r } -; TODO: Recognize the 'and' sibling pattern. The 'and 1' may not be at the end. +; Recognize the 'and' sibling pattern (all-bits-set). The 'and 1' may not be at the end. + +define i32 @allset_two_bit_mask(i32 %x) { +; CHECK-LABEL: @allset_two_bit_mask( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 129 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 129 +; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32 +; CHECK-NEXT: ret i32 [[TMP3]] +; + %s = lshr i32 %x, 7 + %o = and i32 %s, %x + %r = and i32 %o, 1 + ret i32 %r +} define i64 @allset_four_bit_mask(i64 %x) { ; CHECK-LABEL: @allset_four_bit_mask( -; CHECK-NEXT: [[T1:%.*]] = lshr i64 [[X:%.*]], 1 -; CHECK-NEXT: [[T2:%.*]] = lshr i64 [[X]], 2 -; CHECK-NEXT: [[T3:%.*]] = lshr i64 [[X]], 3 -; CHECK-NEXT: [[T4:%.*]] = lshr i64 [[X]], 4 -; CHECK-NEXT: [[A1:%.*]] = and i64 [[T4]], 1 -; CHECK-NEXT: [[A2:%.*]] = and i64 [[T2]], [[A1]] -; CHECK-NEXT: [[A3:%.*]] = and i64 [[A2]], [[T1]] -; CHECK-NEXT: [[R:%.*]] = and i64 [[A3]], [[T3]] -; CHECK-NEXT: ret i64 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 30 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 30 +; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64 +; CHECK-NEXT: ret i64 [[TMP3]] ; %t1 = lshr i64 %x, 1 %t2 = lshr i64 %x, 2 @@ -76,3 +84,41 @@ ret i64 %r } +declare void @use(i32) + +; negative test - extra use means the transform would increase instruction count + +define i32 @allset_two_bit_mask_multiuse(i32 %x) { +; CHECK-LABEL: @allset_two_bit_mask_multiuse( +; CHECK-NEXT: [[S:%.*]] = lshr i32 [[X:%.*]], 7 +; CHECK-NEXT: [[O:%.*]] = and i32 [[S]], [[X]] +; CHECK-NEXT: [[R:%.*]] = and i32 [[O]], 1 +; CHECK-NEXT: call void @use(i32 [[O]]) +; CHECK-NEXT: ret i32 [[R]] +; + %s = lshr i32 %x, 7 + %o = and i32 %s, %x + %r = and i32 %o, 1 + call void @use(i32 %o) + ret i32 %r +} + +; negative test - missing 'and 1' mask, so more than the low bit is used here + +define i8 @allset_three_bit_mask_no_and1(i8 %x) { +; CHECK-LABEL: @allset_three_bit_mask_no_and1( +; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[X:%.*]], 1 +; CHECK-NEXT: [[T2:%.*]] = lshr i8 [[X]], 2 +; CHECK-NEXT: [[T3:%.*]] = lshr i8 [[X]], 3 +; CHECK-NEXT: [[A2:%.*]] = and i8 [[T1]], [[T2]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[A2]], [[T3]] +; CHECK-NEXT: ret i8 [[R]] +; + %t1 = lshr i8 %x, 1 + %t2 = lshr i8 %x, 2 + %t3 = lshr i8 %x, 3 + %a2 = and i8 %t1, %t2 + %r = and i8 %a2, %t3 + ret i8 %r +} + Index: test/Transforms/PhaseOrdering/bitfield-bittests.ll =================================================================== --- test/Transforms/PhaseOrdering/bitfield-bittests.ll +++ test/Transforms/PhaseOrdering/bitfield-bittests.ll @@ -76,14 +76,10 @@ define i32 @allset(i32 %a) { ; CHECK-LABEL: @allset( -; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1 -; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2 -; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3 -; CHECK-NEXT: [[BF_CLEAR2:%.*]] = and i32 [[A]], 1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[BF_CLEAR2]], [[BF_LSHR]] -; CHECK-NEXT: [[AND8:%.*]] = and i32 [[AND]], [[BF_LSHR5]] -; CHECK-NEXT: [[AND13:%.*]] = and i32 [[AND8]], [[BF_LSHR10]] -; CHECK-NEXT: ret i32 [[AND13]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 15 +; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32 +; CHECK-NEXT: ret i32 [[TMP3]] ; %a.sroa.0.0.trunc = trunc i32 %a to i8 %a.sroa.5.0.shift = lshr i32 %a, 8 @@ -110,15 +106,10 @@ define i32 @anyclear(i32 %a) { ; CHECK-LABEL: @anyclear( -; CHECK-NEXT: [[BF_LSHR:%.*]] = lshr i32 [[A:%.*]], 1 -; CHECK-NEXT: [[BF_LSHR5:%.*]] = lshr i32 [[A]], 2 -; CHECK-NEXT: [[BF_LSHR10:%.*]] = lshr i32 [[A]], 3 -; CHECK-NEXT: [[BF_CLEAR2:%.*]] = and i32 [[A]], 1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[BF_CLEAR2]], [[BF_LSHR]] -; CHECK-NEXT: [[AND8:%.*]] = and i32 [[AND]], [[BF_LSHR5]] -; CHECK-NEXT: [[AND13:%.*]] = and i32 [[AND8]], [[BF_LSHR10]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND13]], 1 -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 15 +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 15 +; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32 +; CHECK-NEXT: ret i32 [[TMP3]] ; %a.sroa.0.0.trunc = trunc i32 %a to i8 %a.sroa.5.0.shift = lshr i32 %a, 8