Index: include/llvm/Analysis/ValueTracking.h =================================================================== --- include/llvm/Analysis/ValueTracking.h +++ include/llvm/Analysis/ValueTracking.h @@ -42,6 +42,53 @@ enum ID : unsigned; } + // Simplifying using an assume can only be done in a particular control-flow + // context (the context instruction provides that context). If an assume and + // the context instruction are not in the same block then the DT helps in + // figuring out if we can use it. + struct KnownBitsQuery { + const DataLayout &DL; + AssumptionCache *AC; + const Instruction *CxtI; + const DominatorTree *DT; + // Unlike the other analyses, this may be a nullptr because not all clients + // provide it currently. + OptimizationRemarkEmitter *ORE; + + /// Set of assumptions that should be excluded from further queries. + /// This is because of the potential for mutual recursion to cause + /// computeKnownBits to repeatedly visit the same assume intrinsic. The + /// classic case of this is assume(x = y), which will attempt to determine + /// bits in x from bits in y, which will attempt to determine bits in y from + /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can + /// call isKnownNonZero, which calls computeKnownBits and + /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so + /// on. + static const unsigned MaxDepth = 6; + std::array Excluded; + unsigned NumExcluded; + + KnownBitsQuery(const DataLayout &DL, AssumptionCache *AC, + const Instruction *CxtI, const DominatorTree *DT, + OptimizationRemarkEmitter *ORE = nullptr) + : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {} + + KnownBitsQuery(const KnownBitsQuery &Q, const Value *NewExcl) + : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), + NumExcluded(Q.NumExcluded) { + Excluded = Q.Excluded; + Excluded[NumExcluded++] = NewExcl; + assert(NumExcluded <= Excluded.size()); + } + + bool isExcluded(const Value *Value) const { + if (NumExcluded == 0) + return false; + auto End = Excluded.begin() + NumExcluded; + return std::find(Excluded.begin(), End, Value) != End; + } + }; + /// Determine which bits of V are known to be either zero or one and return /// them in the KnownZero/KnownOne bit sets. /// Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -47,7 +47,7 @@ using namespace llvm; using namespace llvm::PatternMatch; -const unsigned MaxDepth = 6; +const unsigned MaxDepth = KnownBitsQuery::MaxDepth; // Controls the number of uses of the value searched for possible // dominating comparisons. @@ -69,52 +69,6 @@ return DL.getPointerTypeSizeInBits(Ty); } -namespace { -// Simplifying using an assume can only be done in a particular control-flow -// context (the context instruction provides that context). If an assume and -// the context instruction are not in the same block then the DT helps in -// figuring out if we can use it. -struct Query { - const DataLayout &DL; - AssumptionCache *AC; - const Instruction *CxtI; - const DominatorTree *DT; - // Unlike the other analyses, this may be a nullptr because not all clients - // provide it currently. - OptimizationRemarkEmitter *ORE; - - /// Set of assumptions that should be excluded from further queries. - /// This is because of the potential for mutual recursion to cause - /// computeKnownBits to repeatedly visit the same assume intrinsic. The - /// classic case of this is assume(x = y), which will attempt to determine - /// bits in x from bits in y, which will attempt to determine bits in y from - /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call - /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo - /// (all of which can call computeKnownBits), and so on. - std::array Excluded; - unsigned NumExcluded; - - Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, - const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr) - : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {} - - Query(const Query &Q, const Value *NewExcl) - : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), - NumExcluded(Q.NumExcluded) { - Excluded = Q.Excluded; - Excluded[NumExcluded++] = NewExcl; - assert(NumExcluded <= Excluded.size()); - } - - bool isExcluded(const Value *Value) const { - if (NumExcluded == 0) - return false; - auto End = Excluded.begin() + NumExcluded; - return std::find(Excluded.begin(), End, Value) != End; - } -}; -} // end anonymous namespace - // Given the provided Value and, potentially, a context instruction, return // the preferred context instruction (if any). static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { @@ -131,8 +85,8 @@ return nullptr; } -static void computeKnownBits(const Value *V, KnownBits &Known, - unsigned Depth, const Query &Q); +static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, + const KnownBitsQuery &Q); void llvm::computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth, @@ -140,17 +94,18 @@ const DominatorTree *DT, OptimizationRemarkEmitter *ORE) { ::computeKnownBits(V, Known, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); + KnownBitsQuery(DL, AC, safeCxtI(V, CxtI), DT, ORE)); } static KnownBits computeKnownBits(const Value *V, unsigned Depth, - const Query &Q); + const KnownBitsQuery &Q); KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { - return ::computeKnownBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); + return ::computeKnownBits(V, Depth, + KnownBitsQuery(DL, AC, safeCxtI(V, CxtI), DT)); } bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, @@ -169,25 +124,26 @@ return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); } - static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, - const Query &Q); + const KnownBitsQuery &Q); bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { - return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT)); + return ::isKnownToBeAPowerOfTwo( + V, OrZero, Depth, KnownBitsQuery(DL, AC, safeCxtI(V, CxtI), DT)); } -static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); +static bool isKnownNonZero(const Value *V, unsigned Depth, + const KnownBitsQuery &Q); bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { - return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); + return ::isKnownNonZero(V, Depth, + KnownBitsQuery(DL, AC, safeCxtI(V, CxtI), DT)); } bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, @@ -217,42 +173,43 @@ return Known.isNegative(); } -static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); +static bool isKnownNonEqual(const Value *V1, const Value *V2, + const KnownBitsQuery &Q); bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { - return ::isKnownNonEqual(V1, V2, Query(DL, AC, - safeCxtI(V1, safeCxtI(V2, CxtI)), - DT)); + return ::isKnownNonEqual( + V1, V2, KnownBitsQuery(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT)); } static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, - const Query &Q); + const KnownBitsQuery &Q); bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { return ::MaskedValueIsZero(V, Mask, Depth, - Query(DL, AC, safeCxtI(V, CxtI), DT)); + KnownBitsQuery(DL, AC, safeCxtI(V, CxtI), DT)); } static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, - const Query &Q); + const KnownBitsQuery &Q); unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { - return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); + return ::ComputeNumSignBits(V, Depth, + KnownBitsQuery(DL, AC, safeCxtI(V, CxtI), DT)); } static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, - bool NSW, - KnownBits &KnownOut, KnownBits &Known2, - unsigned Depth, const Query &Q) { + bool NSW, KnownBits &KnownOut, + KnownBits &Known2, unsigned Depth, + const KnownBitsQuery &Q) { unsigned BitWidth = KnownOut.getBitWidth(); // If an initial sequence of bits in the result is not needed, the @@ -306,7 +263,7 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, KnownBits &Known, KnownBits &Known2, - unsigned Depth, const Query &Q) { + unsigned Depth, const KnownBitsQuery &Q) { unsigned BitWidth = Known.getBitWidth(); computeKnownBits(Op1, Known, Depth + 1, Q); computeKnownBits(Op0, Known2, Depth + 1, Q); @@ -495,7 +452,8 @@ } static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, - unsigned Depth, const Query &Q) { + unsigned Depth, + const KnownBitsQuery &Q) { // Use of assumptions is context-sensitive. If we don't have a context, we // cannot use them! if (!Q.AC || !Q.CxtI) @@ -551,7 +509,7 @@ if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); Known.Zero |= RHSKnown.Zero; Known.One |= RHSKnown.One; // assume(v & b = a) @@ -560,9 +518,9 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); KnownBits MaskKnown(BitWidth); - computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); + computeKnownBits(B, MaskKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in the mask that are known to be one, we can propagate // known bits from the RHS to V. @@ -574,9 +532,9 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); KnownBits MaskKnown(BitWidth); - computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); + computeKnownBits(B, MaskKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in the mask that are known to be one, we can propagate // inverted known bits from the RHS to V. @@ -588,9 +546,9 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); KnownBits BKnown(BitWidth); - computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); + computeKnownBits(B, BKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in B that are known to be zero, we can propagate known // bits from the RHS to V. @@ -602,9 +560,9 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); KnownBits BKnown(BitWidth); - computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); + computeKnownBits(B, BKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in B that are known to be zero, we can propagate // inverted known bits from the RHS to V. @@ -616,9 +574,9 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); KnownBits BKnown(BitWidth); - computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); + computeKnownBits(B, BKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in B that are known to be zero, we can propagate known // bits from the RHS to V. For those bits in B that are known to be one, @@ -633,9 +591,9 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); KnownBits BKnown(BitWidth); - computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); + computeKnownBits(B, BKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in B that are known to be zero, we can propagate // inverted known bits from the RHS to V. For those bits in B that are @@ -650,7 +608,7 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in RHS that are known, we can propagate them to known // bits in V shifted to the right by C. RHSKnown.Zero.lshrInPlace(C->getZExtValue()); @@ -663,7 +621,7 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in RHS that are known, we can propagate them inverted // to known bits in V shifted to the right by C. RHSKnown.One.lshrInPlace(C->getZExtValue()); @@ -678,7 +636,7 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in RHS that are known, we can propagate them to known // bits in V shifted to the right by C. Known.Zero |= RHSKnown.Zero << C->getZExtValue(); @@ -691,7 +649,7 @@ Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); // For those bits in RHS that are known, we can propagate them inverted // to known bits in V shifted to the right by C. Known.Zero |= RHSKnown.One << C->getZExtValue(); @@ -701,7 +659,7 @@ Pred == ICmpInst::ICMP_SGE && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); if (RHSKnown.isNonNegative()) { // We know that the sign bit is zero. @@ -712,7 +670,7 @@ Pred == ICmpInst::ICMP_SGT && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { // We know that the sign bit is zero. @@ -723,7 +681,7 @@ Pred == ICmpInst::ICMP_SLE && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); if (RHSKnown.isNegative()) { // We know that the sign bit is one. @@ -734,7 +692,7 @@ Pred == ICmpInst::ICMP_SLT && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); if (RHSKnown.isZero() || RHSKnown.isNegative()) { // We know that the sign bit is one. @@ -745,7 +703,7 @@ Pred == ICmpInst::ICMP_ULE && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); // Whatever high bits in c are zero are known to be zero. Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); @@ -754,11 +712,11 @@ Pred == ICmpInst::ICMP_ULT && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { KnownBits RHSKnown(BitWidth); - computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); + computeKnownBits(A, RHSKnown, Depth+1, KnownBitsQuery(Q, I)); // Whatever high bits in c are zero are known to be zero (if c is a power // of 2, then one more). - if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) + if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, KnownBitsQuery(Q, I))) Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); else Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); @@ -792,9 +750,8 @@ // results from calling KZF and KOF are conservatively combined for all // permitted shift amounts. static void computeKnownBitsFromShiftOperator( - const Operator *I, KnownBits &Known, KnownBits &Known2, - unsigned Depth, const Query &Q, - function_ref KZF, + const Operator *I, KnownBits &Known, KnownBits &Known2, unsigned Depth, + const KnownBitsQuery &Q, function_ref KZF, function_ref KOF) { unsigned BitWidth = Known.getBitWidth(); @@ -881,7 +838,8 @@ } static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, - unsigned Depth, const Query &Q) { + unsigned Depth, + const KnownBitsQuery &Q) { unsigned BitWidth = Known.getBitWidth(); KnownBits Known2(Known); @@ -1461,7 +1419,8 @@ /// Determine which bits of V are known to be either zero or one and return /// them. -KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { +KnownBits computeKnownBits(const Value *V, unsigned Depth, + const KnownBitsQuery &Q) { KnownBits Known(getBitWidth(V->getType(), Q.DL)); computeKnownBits(V, Known, Depth, Q); return Known; @@ -1483,7 +1442,7 @@ /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, - const Query &Q) { + const KnownBitsQuery &Q) { assert(V && "No Value?"); assert(Depth <= MaxDepth && "Limit Search Depth"); unsigned BitWidth = Known.getBitWidth(); @@ -1591,7 +1550,7 @@ /// be a power of two when defined. Supports values with integer or pointer /// types and vectors of integers. bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, - const Query &Q) { + const KnownBitsQuery &Q) { if (const Constant *C = dyn_cast(V)) { if (C->isNullValue()) return OrZero; @@ -1690,7 +1649,7 @@ /// /// Currently this routine does not support vector GEPs. static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, - const Query &Q) { + const KnownBitsQuery &Q) { if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) return false; @@ -1769,7 +1728,7 @@ /// specified, perform context-sensitive analysis and return true if the /// pointer couldn't possibly be null at the specified instruction. /// Supports values with integer or pointer type and vectors of integers. -bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { +bool isKnownNonZero(const Value *V, unsigned Depth, const KnownBitsQuery &Q) { if (auto *C = dyn_cast(V)) { if (C->isNullValue()) return false; @@ -1952,7 +1911,8 @@ } /// Return true if V2 == V1 + X, where X is known non-zero. -static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { +static bool isAddOfNonZero(const Value *V1, const Value *V2, + const KnownBitsQuery &Q) { const BinaryOperator *BO = dyn_cast(V1); if (!BO || BO->getOpcode() != Instruction::Add) return false; @@ -1967,7 +1927,8 @@ } /// Return true if it is known that V1 != V2. -static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { +static bool isKnownNonEqual(const Value *V1, const Value *V2, + const KnownBitsQuery &Q) { if (V1->getType()->isVectorTy() || V1 == V2) return false; if (V1->getType() != V2->getType()) @@ -2003,7 +1964,7 @@ /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, - const Query &Q) { + const KnownBitsQuery &Q) { KnownBits Known(Mask.getBitWidth()); computeKnownBits(V, Known, Depth, Q); return Mask.isSubsetOf(Known.Zero); @@ -2038,10 +1999,10 @@ } static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, - const Query &Q); + const KnownBitsQuery &Q); static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, - const Query &Q) { + const KnownBitsQuery &Q) { unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); assert(Result > 0 && "At least one sign bit needs to be present!"); return Result; @@ -2054,7 +2015,7 @@ /// other, so we return 3. For vectors, return the number of sign bits for the /// vector element with the mininum number of known sign bits. static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, - const Query &Q) { + const KnownBitsQuery &Q) { // We return the minimum number of sign bits that are guaranteed to be present // in V, so for undef we have to conservatively return 1. We don't have the