diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h --- a/llvm/include/llvm/ADT/APInt.h +++ b/llvm/include/llvm/ADT/APInt.h @@ -183,13 +183,11 @@ static APInt getZeroWidth() { return getZero(0); } /// Gets maximum unsigned value of APInt for specific bit width. - static APInt getMaxValue(unsigned numBits) { - return getAllOnesValue(numBits); - } + static APInt getMaxValue(unsigned numBits) { return getAllOnes(numBits); } /// Gets maximum signed value of APInt for a specific bit width. static APInt getSignedMaxValue(unsigned numBits) { - APInt API = getAllOnesValue(numBits); + APInt API = getAllOnes(numBits); API.clearBit(numBits - 1); return API; } diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1238,7 +1238,7 @@ assert(Indices.size() <= Factor && "Interleaved memory op has too many members"); - APInt DemandedLoadStoreElts = APInt::getNullValue(NumElts); + APInt DemandedLoadStoreElts = APInt::getZero(NumElts); for (unsigned Index : Indices) { assert(Index < Factor && "Invalid index for interleaved memory op"); for (unsigned Elm = 0; Elm < NumSubElts; Elm++) diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h --- a/llvm/include/llvm/IR/Constants.h +++ b/llvm/include/llvm/IR/Constants.h @@ -203,7 +203,7 @@ /// to true. /// @returns true iff this constant's bits are all set to true. /// Determine if the value is all ones. - bool isMinusOne() const { return Val.isAllOnesValue(); } + bool isMinusOne() const { return Val.isAllOnes(); } /// This function will return true iff this constant represents the largest /// value that may be represented by the constant's type. diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -438,7 +438,7 @@ } struct is_all_ones { - bool isValue(const APInt &C) { return C.isAllOnesValue(); } + bool isValue(const APInt &C) { return C.isAllOnes(); } }; /// Match an integer or vector with all bits set. /// For vectors, this includes constants with undefined elements. @@ -506,7 +506,7 @@ inline api_pred_ty m_NonPositive(const APInt *&V) { return V; } struct is_one { - bool isValue(const APInt &C) { return C.isOneValue(); } + bool isValue(const APInt &C) { return C.isOne(); } }; /// Match an integer 1 or a vector with all elements equal to 1. /// For vectors, this includes constants with undefined elements. diff --git a/llvm/include/llvm/Support/KnownBits.h b/llvm/include/llvm/Support/KnownBits.h --- a/llvm/include/llvm/Support/KnownBits.h +++ b/llvm/include/llvm/Support/KnownBits.h @@ -71,13 +71,13 @@ /// Returns true if value is all zero. bool isZero() const { assert(!hasConflict() && "KnownBits conflict!"); - return Zero.isAllOnesValue(); + return Zero.isAllOnes(); } /// Returns true if value is all one bits. bool isAllOnes() const { assert(!hasConflict() && "KnownBits conflict!"); - return One.isAllOnesValue(); + return One.isAllOnes(); } /// Make all bits known to be zero and discard any previous information. @@ -294,7 +294,7 @@ /// Return true if LHS and RHS have no common bits set. static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS) { - return (LHS.Zero | RHS.Zero).isAllOnesValue(); + return (LHS.Zero | RHS.Zero).isAllOnes(); } /// Compute known bits resulting from adding LHS, RHS and a 1-bit Carry. diff --git a/llvm/lib/Analysis/CmpInstAnalysis.cpp b/llvm/lib/Analysis/CmpInstAnalysis.cpp --- a/llvm/lib/Analysis/CmpInstAnalysis.cpp +++ b/llvm/lib/Analysis/CmpInstAnalysis.cpp @@ -77,28 +77,28 @@ return false; case ICmpInst::ICMP_SLT: // X < 0 is equivalent to (X & SignMask) != 0. - if (!C->isNullValue()) + if (!C->isZero()) return false; Mask = APInt::getSignMask(C->getBitWidth()); Pred = ICmpInst::ICMP_NE; break; case ICmpInst::ICMP_SLE: // X <= -1 is equivalent to (X & SignMask) != 0. - if (!C->isAllOnesValue()) + if (!C->isAllOnes()) return false; Mask = APInt::getSignMask(C->getBitWidth()); Pred = ICmpInst::ICMP_NE; break; case ICmpInst::ICMP_SGT: // X > -1 is equivalent to (X & SignMask) == 0. - if (!C->isAllOnesValue()) + if (!C->isAllOnes()) return false; Mask = APInt::getSignMask(C->getBitWidth()); Pred = ICmpInst::ICMP_EQ; break; case ICmpInst::ICMP_SGE: // X >= 0 is equivalent to (X & SignMask) == 0. - if (!C->isNullValue()) + if (!C->isZero()) return false; Mask = APInt::getSignMask(C->getBitWidth()); Pred = ICmpInst::ICMP_EQ; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -795,11 +795,11 @@ if (Opc == Instruction::And) { KnownBits Known0 = computeKnownBits(Op0, DL); KnownBits Known1 = computeKnownBits(Op1, DL); - if ((Known1.One | Known0.Zero).isAllOnesValue()) { + if ((Known1.One | Known0.Zero).isAllOnes()) { // All the bits of Op0 that the 'and' could be masking are already zero. return Op0; } - if ((Known0.One | Known1.Zero).isAllOnesValue()) { + if ((Known0.One | Known1.Zero).isAllOnes()) { // All the bits of Op1 that the 'and' could be masking are already zero. return Op1; } @@ -2651,7 +2651,7 @@ assert(C1 && "Must be constant int"); // cttz(0, 1) and ctlz(0, 1) are undef. - if (C1->isOneValue() && (!C0 || C0->isNullValue())) + if (C1->isOne() && (!C0 || C0->isZero())) return UndefValue::get(Ty); if (!C0) return Constant::getNullValue(Ty); @@ -2663,11 +2663,11 @@ case Intrinsic::abs: // Undef or minimum val operand with poison min --> undef assert(C1 && "Must be constant int"); - if (C1->isOneValue() && (!C0 || C0->isMinSignedValue())) + if (C1->isOne() && (!C0 || C0->isMinSignedValue())) return UndefValue::get(Ty); // Undef operand with no poison min --> 0 (sign bit must be clear) - if (C1->isNullValue() && !C0) + if (C1->isZero() && !C0) return Constant::getNullValue(Ty); return ConstantInt::get(Ty, C0->abs()); diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -2053,13 +2053,13 @@ // If all bits in the inverted and shifted mask are clear: // and (shl X, ShAmt), Mask --> shl X, ShAmt if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && - (~(*Mask)).lshr(*ShAmt).isNullValue()) + (~(*Mask)).lshr(*ShAmt).isZero()) return Op0; // If all bits in the inverted and shifted mask are clear: // and (lshr X, ShAmt), Mask --> lshr X, ShAmt if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && - (~(*Mask)).shl(*ShAmt).isNullValue()) + (~(*Mask)).shl(*ShAmt).isZero()) return Op0; } @@ -3109,7 +3109,7 @@ // - C isn't zero. if (Q.IIQ.hasNoSignedWrap(cast(LBO)) || Q.IIQ.hasNoUnsignedWrap(cast(LBO)) || - match(LHS, m_Shl(m_One(), m_Value())) || !C->isNullValue()) { + match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) { if (Pred == ICmpInst::ICMP_EQ) return ConstantInt::getFalse(GetCompareTy(RHS)); if (Pred == ICmpInst::ICMP_NE) @@ -4432,14 +4432,14 @@ // gep (gep V, C), (sub 0, V) -> C if (match(Ops.back(), m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) && - !BasePtrOffset.isNullValue()) { + !BasePtrOffset.isZero()) { auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); return ConstantExpr::getIntToPtr(CI, GEPTy); } // gep (gep V, C), (xor V, -1) -> C-1 if (match(Ops.back(), m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) && - !BasePtrOffset.isOneValue()) { + !BasePtrOffset.isOne()) { auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); return ConstantExpr::getIntToPtr(CI, GEPTy); } @@ -5872,7 +5872,7 @@ if (match(ShAmtArg, m_APInt(ShAmtC))) { // If there's effectively no shift, return the 1st arg or 2nd arg. APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth()); - if (ShAmtC->urem(BitWidth).isNullValue()) + if (ShAmtC->urem(BitWidth).isZero()) return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); } diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp --- a/llvm/lib/Analysis/LazyValueInfo.cpp +++ b/llvm/lib/Analysis/LazyValueInfo.cpp @@ -1117,8 +1117,7 @@ } // If (Val & Mask) != 0 then the value must be larger than the lowest set // bit of Mask. - if (EdgePred == ICmpInst::ICMP_NE && !Mask->isNullValue() && - C->isNullValue()) { + if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) { unsigned BitWidth = Ty->getIntegerBitWidth(); return ValueLatticeElement::getRange(ConstantRange::getNonEmpty( APInt::getOneBitSet(BitWidth, Mask->countTrailingZeros()), diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -6116,7 +6116,7 @@ // initial value. if (AddRec->hasNoUnsignedWrap()) { APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); - if (!UnsignedMinValue.isNullValue()) + if (!UnsignedMinValue.isZero()) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); } @@ -6218,9 +6218,9 @@ if (NS > 1) { // If we know any of the sign bits, we know all of the sign bits. - if (!Known.Zero.getHiBits(NS).isNullValue()) + if (!Known.Zero.getHiBits(NS).isZero()) Known.Zero.setHighBits(NS); - if (!Known.One.getHiBits(NS).isNullValue()) + if (!Known.One.getHiBits(NS).isZero()) Known.One.setHighBits(NS); } @@ -9173,7 +9173,7 @@ APInt L = LC->getAPInt(); APInt M = MC->getAPInt(); APInt N = NC->getAPInt(); - assert(!N.isNullValue() && "This is not a quadratic addrec"); + assert(!N.isZero() && "This is not a quadratic addrec"); unsigned BitWidth = LC->getAPInt().getBitWidth(); unsigned NewWidth = BitWidth + 1; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -166,7 +166,7 @@ cast(Shuf->getOperand(0)->getType())->getNumElements(); int NumMaskElts = cast(Shuf->getType())->getNumElements(); DemandedLHS = DemandedRHS = APInt::getZero(NumElts); - if (DemandedElts.isNullValue()) + if (DemandedElts.isZero()) return true; // Simple case of a shuffle with zeroinitializer. if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) { @@ -1378,7 +1378,7 @@ Known = KnownBits::computeForAddSub( /*Add=*/true, /*NSW=*/false, Known, IndexBits); } - if (!Known.isUnknown() && !AccConstIndices.isNullValue()) { + if (!Known.isUnknown() && !AccConstIndices.isZero()) { KnownBits Index = KnownBits::makeConstant(AccConstIndices); Known = KnownBits::computeForAddSub( /*Add=*/true, /*NSW=*/false, Known, Index); @@ -2270,7 +2270,7 @@ Value *Start = nullptr, *Step = nullptr; const APInt *StartC, *StepC; if (!matchSimpleRecurrence(PN, BO, Start, Step) || - !match(Start, m_APInt(StartC)) || StartC->isNullValue()) + !match(Start, m_APInt(StartC)) || StartC->isZero()) return false; switch (BO->getOpcode()) { @@ -2282,7 +2282,7 @@ StartC->isNegative() == StepC->isNegative()); case Instruction::Mul: return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && - match(Step, m_APInt(StepC)) && !StepC->isNullValue(); + match(Step, m_APInt(StepC)) && !StepC->isZero(); case Instruction::Shl: return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); case Instruction::AShr: @@ -2716,8 +2716,7 @@ const APInt *C; return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && - !C->isNullValue() && !C->isOneValue() && - isKnownNonZero(V1, Depth + 1, Q); + !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q); } return false; } @@ -2730,7 +2729,7 @@ const APInt *C; return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && - !C->isNullValue() && isKnownNonZero(V1, Depth + 1, Q); + !C->isZero() && isKnownNonZero(V1, Depth + 1, Q); } return false; } @@ -3073,7 +3072,7 @@ // If the input is known to be 0 or 1, the output is 0/-1, which is // all sign bits set. - if ((Known.Zero | 1).isAllOnesValue()) + if ((Known.Zero | 1).isAllOnes()) return TyBits; // If we are subtracting one from a positive number, there is no carry @@ -3097,7 +3096,7 @@ computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); // If the input is known to be 0 or 1, the output is 0/-1, which is // all sign bits set. - if ((Known.Zero | 1).isAllOnesValue()) + if ((Known.Zero | 1).isAllOnes()) return TyBits; // If the input is known to be positive (the sign bit is known clear), @@ -4642,7 +4641,7 @@ if (*Denominator == 0) return false; // It's safe to hoist if the denominator is not 0 or -1. - if (!Denominator->isAllOnesValue()) + if (!Denominator->isAllOnes()) return true; // At this point we know that the denominator is -1. It is safe to hoist as // long we know that the numerator is not INT_MIN. @@ -5863,15 +5862,13 @@ // Is the sign bit set? // (X (X >u MAXVAL) ? X : MAXVAL ==> UMAX // (X (X >u MAXVAL) ? MAXVAL : X ==> UMIN - if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && - C2->isMaxSignedValue()) + if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue()) return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; // Is the sign bit clear? // (X >s -1) ? MINVAL : X ==> (X UMAX // (X >s -1) ? X : MINVAL ==> (X UMIN - if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && - C2->isMinSignedValue()) + if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue()) return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; } @@ -6719,7 +6716,7 @@ const APInt *C; switch (BO.getOpcode()) { case Instruction::Add: - if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { + if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { // FIXME: If we have both nuw and nsw, we should reduce the range further. if (IIQ.hasNoUnsignedWrap(cast(&BO))) { // 'add nuw x, C' produces [C, UINT_MAX]. @@ -6757,7 +6754,7 @@ Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; } else if (match(BO.getOperand(0), m_APInt(C))) { unsigned ShiftAmount = Width - 1; - if (!C->isNullValue() && IIQ.isExact(&BO)) + if (!C->isZero() && IIQ.isExact(&BO)) ShiftAmount = C->countTrailingZeros(); if (C->isNegative()) { // 'ashr C, x' produces [C, C >> (Width-1)] @@ -6778,7 +6775,7 @@ } else if (match(BO.getOperand(0), m_APInt(C))) { // 'lshr C, x' produces [C >> (Width-1), C]. unsigned ShiftAmount = Width - 1; - if (!C->isNullValue() && IIQ.isExact(&BO)) + if (!C->isZero() && IIQ.isExact(&BO)) ShiftAmount = C->countTrailingZeros(); Lower = C->lshr(ShiftAmount); Upper = *C + 1; @@ -6811,7 +6808,7 @@ if (match(BO.getOperand(1), m_APInt(C))) { APInt IntMin = APInt::getSignedMinValue(Width); APInt IntMax = APInt::getSignedMaxValue(Width); - if (C->isAllOnesValue()) { + if (C->isAllOnes()) { // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] // where C != -1 and C != 0 and C != 1 Lower = IntMin + 1; @@ -6840,7 +6837,7 @@ break; case Instruction::UDiv: - if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { + if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { // 'udiv x, C' produces [0, UINT_MAX / C]. Upper = APInt::getMaxValue(Width).udiv(*C) + 1; } else if (match(BO.getOperand(0), m_APInt(C))) { diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -4187,7 +4187,7 @@ if (Inst->getOpcode() == Instruction::Xor) { const ConstantInt *Cst = dyn_cast(Inst->getOperand(1)); // Make sure it is not a NOT. - if (Cst && !Cst->getValue().isAllOnesValue()) + if (Cst && !Cst->getValue().isAllOnes()) return true; } diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2242,7 +2242,7 @@ assert(MI.getOpcode() == TargetOpcode::G_SELECT); if (auto MaybeCstCmp = getIConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) { - OpIdx = MaybeCstCmp->Value.isNullValue() ? 3 : 2; + OpIdx = MaybeCstCmp->Value.isZero() ? 3 : 2; return true; } return false; @@ -2768,14 +2768,14 @@ // // Check if we can replace OrDst with the LHS of the G_OR if (canReplaceReg(OrDst, LHS, MRI) && - (LHSBits.One | RHSBits.Zero).isAllOnesValue()) { + (LHSBits.One | RHSBits.Zero).isAllOnes()) { Replacement = LHS; return true; } // Check if we can replace OrDst with the RHS of the G_OR if (canReplaceReg(OrDst, RHS, MRI) && - (LHSBits.Zero | RHSBits.One).isAllOnesValue()) { + (LHSBits.Zero | RHSBits.One).isAllOnes()) { Replacement = RHS; return true; } diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -4820,7 +4820,7 @@ Register InH = MRI.createGenericVirtualRegister(HalfTy); MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1)); - if (Amt.isNullValue()) { + if (Amt.isZero()) { MIRBuilder.buildMerge(MI.getOperand(0), {InL, InH}); MI.eraseFromParent(); return Legalized; diff --git a/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp --- a/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp @@ -570,7 +570,7 @@ assert((ValueMask & PartMapMask) == PartMapMask && "Some partial mappings overlap"); } - assert(ValueMask.isAllOnesValue() && "Value is not fully mapped"); + assert(ValueMask.isAllOnes() && "Value is not fully mapped"); return true; } diff --git a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp --- a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp +++ b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp @@ -308,7 +308,7 @@ } // Multiplying by one is a no-op. - if (C.isOneValue()) { + if (C.isOne()) { return *this; } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -9421,7 +9421,7 @@ } // select Cond, Pow2, 0 --> (zext Cond) << log2(Pow2) - if (C1Val.isPowerOf2() && C2Val.isNullValue()) { + if (C1Val.isPowerOf2() && C2Val.isZero()) { if (VT != MVT::i1) Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond); SDValue ShAmtC = DAG.getConstant(C1Val.exactLogBase2(), DL, VT); @@ -11272,7 +11272,7 @@ Known = DAG.computeKnownBits(Op); - return (Known.Zero | 1).isAllOnesValue(); + return (Known.Zero | 1).isAllOnes(); } /// Given an extending node with a pop-count operand, if the target does not @@ -16309,7 +16309,7 @@ /// \p UsedBits looks like 0..0 1..1 0..0. static bool areUsedBitsDense(const APInt &UsedBits) { // If all the bits are one, this is dense! - if (UsedBits.isAllOnesValue()) + if (UsedBits.isAllOnes()) return true; // Get rid of the unused bits on the right. @@ -16318,7 +16318,7 @@ if (NarrowedUsedBits.countLeadingZeros()) NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits()); // Check that the chunk of bits is completely used. - return NarrowedUsedBits.isAllOnesValue(); + return NarrowedUsedBits.isAllOnes(); } /// Check whether or not \p First and \p Second are next to each other @@ -16737,7 +16737,7 @@ APInt Imm = cast(N1)->getAPIntValue(); if (Opc == ISD::AND) Imm ^= APInt::getAllOnes(BitWidth); - if (Imm == 0 || Imm.isAllOnesValue()) + if (Imm == 0 || Imm.isAllOnes()) return SDValue(); unsigned ShAmt = Imm.countTrailingZeros(); unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1; @@ -22132,7 +22132,7 @@ else Bits = Bits.extractBits(NumSubBits, SubIdx * NumSubBits); - if (Bits.isAllOnesValue()) + if (Bits.isAllOnes()) Indices.push_back(i); else if (Bits == 0) Indices.push_back(i + NumSubElts); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -175,7 +175,7 @@ if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { APInt SplatVal; - return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue(); + return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes(); } if (N->getOpcode() != ISD::BUILD_VECTOR) return false; @@ -224,7 +224,7 @@ if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { APInt SplatVal; - return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue(); + return isConstantSplatVector(N, SplatVal) && SplatVal.isZero(); } if (N->getOpcode() != ISD::BUILD_VECTOR) return false; diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -672,7 +672,7 @@ for (unsigned i = 0; i != Scale; ++i) { unsigned Offset = i * NumSrcEltBits; APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); - if (!Sub.isNullValue()) { + if (!Sub.isZero()) { DemandedSrcBits |= Sub; for (unsigned j = 0; j != NumElts; ++j) if (DemandedElts[j]) @@ -1612,7 +1612,7 @@ // always convert this into a logical shr, even if the shift amount is // variable. The low bit of the shift cannot be an input sign bit unless // the shift amount is >= the size of the datatype, which is undefined. - if (DemandedBits.isOneValue()) + if (DemandedBits.isOne()) return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); if (const APInt *SA = @@ -1788,7 +1788,7 @@ // If only 1 bit is demanded, replace with PARITY as long as we're before // op legalization. // FIXME: Limit to scalars for now. - if (DemandedBits.isOneValue() && !TLO.LegalOps && !VT.isVector()) + if (DemandedBits.isOne() && !TLO.LegalOps && !VT.isVector()) return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, Op.getOperand(0))); @@ -2149,7 +2149,7 @@ for (unsigned i = 0; i != Scale; ++i) { unsigned Offset = i * NumSrcEltBits; APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); - if (!Sub.isNullValue()) { + if (!Sub.isZero()) { DemandedSrcBits |= Sub; for (unsigned j = 0; j != NumElts; ++j) if (DemandedElts[j]) @@ -3109,7 +3109,7 @@ case UndefinedBooleanContent: return CVal[0]; case ZeroOrOneBooleanContent: - return CVal.isOneValue(); + return CVal.isOne(); case ZeroOrNegativeOneBooleanContent: return CVal.isAllOnes(); } @@ -3323,7 +3323,7 @@ EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, DAGCombinerInfo &DCI, const SDLoc &DL) const { assert(isConstOrConstSplat(N1C) && - isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() && + isConstOrConstSplat(N1C)->getAPIntValue().isZero() && "Should be a comparison with 0."); assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Valid only for [in]equality comparisons."); @@ -3546,7 +3546,7 @@ // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an // equality comparison, then we're just comparing whether X itself is // zero. - if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) && + if (N0.getOpcode() == ISD::SRL && (C1.isZero() || C1.isOne()) && N0.getOperand(0).getOpcode() == ISD::CTLZ && isPowerOf2_32(N0.getScalarValueSizeInBits())) { if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) { @@ -4019,7 +4019,7 @@ // For example, when high 32-bits of i64 X are known clear: // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0 // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1 - bool CmpZero = N1C->getAPIntValue().isNullValue(); + bool CmpZero = N1C->getAPIntValue().isZero(); bool CmpNegOne = N1C->getAPIntValue().isAllOnes(); if ((CmpZero || CmpNegOne) && N0.hasOneUse()) { // Match or(lo,shl(hi,bw/2)) pattern. @@ -5275,7 +5275,7 @@ int NumeratorFactor = 0; int ShiftMask = -1; - if (Divisor.isOneValue() || Divisor.isAllOnes()) { + if (Divisor.isOne() || Divisor.isAllOnes()) { // If d is +1/-1, we just multiply the numerator by +1/-1. NumeratorFactor = Divisor.getSExtValue(); magics.m = 0; @@ -5432,7 +5432,7 @@ APInt Magic = magics.m; unsigned SelNPQ; - if (magics.a == 0 || Divisor.isOneValue()) { + if (magics.a == 0 || Divisor.isOne()) { assert(magics.s < Divisor.getBitWidth() && "We shouldn't generate an undefined shift!"); PostShift = magics.s; @@ -5632,7 +5632,7 @@ const APInt &D = CDiv->getAPIntValue(); const APInt &Cmp = CCmp->getAPIntValue(); - ComparingWithAllZeros &= Cmp.isNullValue(); + ComparingWithAllZeros &= Cmp.isZero(); // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, // if C2 is not less than C1, the comparison is always false. @@ -5644,26 +5644,26 @@ // If all lanes are tautological (either all divisors are ones, or divisor // is not greater than the constant we are comparing with), // we will prefer to avoid the fold. - bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane; + bool TautologicalLane = D.isOne() || TautologicalInvertedLane; HadTautologicalLanes |= TautologicalLane; AllLanesAreTautological &= TautologicalLane; // If we are comparing with non-zero, we need'll need to subtract said // comparison value from the LHS. But there is no point in doing that if // every lane where we are comparing with non-zero is tautological.. - if (!Cmp.isNullValue()) + if (!Cmp.isZero()) AllComparisonsWithNonZerosAreTautological &= TautologicalLane; // Decompose D into D0 * 2^K unsigned K = D.countTrailingZeros(); - assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); + assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); APInt D0 = D.lshr(K); // D is even if it has trailing zeros. HadEvenDivisor |= (K != 0); // D is a power-of-two if D0 is one. // If all divisors are power-of-two, we will prefer to avoid the fold. - AllDivisorsArePowerOfTwo &= D0.isOneValue(); + AllDivisorsArePowerOfTwo &= D0.isOne(); // P = inv(D0, 2^W) // 2^W requires W + 1 bits, so we have to extend and then truncate. @@ -5671,8 +5671,8 @@ APInt P = D0.zext(W + 1) .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) .trunc(W); - assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable - assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); + assert(!P.isZero() && "No multiplicative inverse!"); // unreachable + assert((D0 * P).isOne() && "Multiplicative inverse sanity check."); // Q = floor((2^W - 1) u/ D) // R = ((2^W - 1) u% D) @@ -5893,12 +5893,12 @@ HadIntMinDivisor |= D.isMinSignedValue(); // If all divisors are ones, we will prefer to avoid the fold. - HadOneDivisor |= D.isOneValue(); - AllDivisorsAreOnes &= D.isOneValue(); + HadOneDivisor |= D.isOne(); + AllDivisorsAreOnes &= D.isOne(); // Decompose D into D0 * 2^K unsigned K = D.countTrailingZeros(); - assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); + assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); APInt D0 = D.lshr(K); if (!D.isMinSignedValue()) { @@ -5909,7 +5909,7 @@ // D is a power-of-two if D0 is one. This includes INT_MIN. // If all divisors are power-of-two, we will prefer to avoid the fold. - AllDivisorsArePowerOfTwo &= D0.isOneValue(); + AllDivisorsArePowerOfTwo &= D0.isOne(); // P = inv(D0, 2^W) // 2^W requires W + 1 bits, so we have to extend and then truncate. @@ -5917,8 +5917,8 @@ APInt P = D0.zext(W + 1) .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) .trunc(W); - assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable - assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); + assert(!P.isZero() && "No multiplicative inverse!"); // unreachable + assert((D0 * P).isOne() && "Multiplicative inverse sanity check."); // A = floor((2^(W - 1) - 1) / D0) & -2^K APInt A = APInt::getSignedMaxValue(W).udiv(D0); @@ -5940,7 +5940,7 @@ // If the divisor is 1 the result can be constant-folded. Likewise, we // don't care about INT_MIN lanes, those can be set to undef if appropriate. - if (D.isOneValue()) { + if (D.isOne()) { // Set P, A and K to a bogus values so we can try to splat them. P = 0; A = -1; diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -1763,7 +1763,7 @@ void MDFieldPrinter::printAPInt(StringRef Name, const APInt &Int, bool IsUnsigned, bool ShouldSkipZero) { - if (ShouldSkipZero && Int.isNullValue()) + if (ShouldSkipZero && Int.isZero()) return; Out << FS << Name << ": "; diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -1141,7 +1141,7 @@ return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V)); case Instruction::SDiv: assert(!CI2->isZero() && "Div by zero handled above"); - if (C2V.isAllOnesValue() && C1V.isMinSignedValue()) + if (C2V.isAllOnes() && C1V.isMinSignedValue()) return PoisonValue::get(CI1->getType()); // MIN_INT / -1 -> poison return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V)); case Instruction::URem: @@ -1149,7 +1149,7 @@ return ConstantInt::get(CI1->getContext(), C1V.urem(C2V)); case Instruction::SRem: assert(!CI2->isZero() && "Div by zero handled above"); - if (C2V.isAllOnesValue() && C1V.isMinSignedValue()) + if (C2V.isAllOnes() && C1V.isMinSignedValue()) return PoisonValue::get(CI1->getType()); // MIN_INT % -1 -> poison return ConstantInt::get(CI1->getContext(), C1V.srem(C2V)); case Instruction::And: diff --git a/llvm/lib/IR/ConstantRange.cpp b/llvm/lib/IR/ConstantRange.cpp --- a/llvm/lib/IR/ConstantRange.cpp +++ b/llvm/lib/IR/ConstantRange.cpp @@ -204,13 +204,13 @@ // Handle special case for 0, -1 and 1. See the last for reason why we // specialize -1 and 1. unsigned BitWidth = V.getBitWidth(); - if (V == 0 || V.isOneValue()) + if (V == 0 || V.isOne()) return ConstantRange::getFull(BitWidth); APInt MinValue = APInt::getSignedMinValue(BitWidth); APInt MaxValue = APInt::getSignedMaxValue(BitWidth); // e.g. Returning [-127, 127], represented as [-127, -128). - if (V.isAllOnesValue()) + if (V.isAllOnes()) return ConstantRange(-MaxValue, MinValue); APInt Lower, Upper; @@ -1161,9 +1161,9 @@ if (NegL.Lower.isMinSignedValue() && NegR.Upper.isZero()) { // Remove -1 from the LHS. Skip if it's the only element, as this would // leave us with an empty set. - if (!NegR.Lower.isAllOnesValue()) { + if (!NegR.Lower.isAllOnes()) { APInt AdjNegRUpper; - if (RHS.Lower.isAllOnesValue()) + if (RHS.Lower.isAllOnes()) // Negative part of [-1, X] without -1 is [SignedMin, X]. AdjNegRUpper = RHS.Upper; else @@ -1332,9 +1332,9 @@ return {*getSingleElement() ^ *Other.getSingleElement()}; // Special-case binary complement, since we can give a precise answer. - if (Other.isSingleElement() && Other.getSingleElement()->isAllOnesValue()) + if (Other.isSingleElement() && Other.getSingleElement()->isAllOnes()) return binaryNot(); - if (isSingleElement() && getSingleElement()->isAllOnesValue()) + if (isSingleElement() && getSingleElement()->isAllOnes()) return Other.binaryNot(); // TODO: replace this with something less conservative diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -95,7 +95,7 @@ // Check for FP which are bitcasted from -1 integers if (const ConstantFP *CFP = dyn_cast(this)) - return CFP->getValueAPF().bitcastToAPInt().isAllOnesValue(); + return CFP->getValueAPF().bitcastToAPInt().isAllOnes(); // Check for constant splat vectors of 1 values. if (getType()->isVectorTy()) @@ -112,7 +112,7 @@ // Check for FP which are bitcasted from 1 integers if (const ConstantFP *CFP = dyn_cast(this)) - return CFP->getValueAPF().bitcastToAPInt().isOneValue(); + return CFP->getValueAPF().bitcastToAPInt().isOne(); // Check for constant splat vectors of 1 values. if (getType()->isVectorTy()) @@ -129,7 +129,7 @@ // Check for FP which are bitcasted from 1 integers if (const ConstantFP *CFP = dyn_cast(this)) - return !CFP->getValueAPF().bitcastToAPInt().isOneValue(); + return !CFP->getValueAPF().bitcastToAPInt().isOne(); // Check that vectors don't contain 1 if (auto *VTy = dyn_cast(getType())) { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -2330,9 +2330,9 @@ Src1Identity &= (M == (i + NumSrcElts)); continue; } - assert((Src0Elts | Src1Elts | UndefElts).isAllOnesValue() && + assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() && "unknown shuffle elements"); - assert(!Src0Elts.isNullValue() && !Src1Elts.isNullValue() && + assert(!Src0Elts.isZero() && !Src1Elts.isZero() && "2-source shuffle not found"); // Determine lo/hi span ranges. diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -5070,14 +5070,14 @@ case Intrinsic::masked_gather: { const APInt &Alignment = cast(Call.getArgOperand(1))->getValue(); - Assert(Alignment.isNullValue() || Alignment.isPowerOf2(), + Assert(Alignment.isZero() || Alignment.isPowerOf2(), "masked_gather: alignment must be 0 or a power of 2", Call); break; } case Intrinsic::masked_scatter: { const APInt &Alignment = cast(Call.getArgOperand(2))->getValue(); - Assert(Alignment.isNullValue() || Alignment.isPowerOf2(), + Assert(Alignment.isZero() || Alignment.isPowerOf2(), "masked_scatter: alignment must be 0 or a power of 2", Call); break; } diff --git a/llvm/lib/Support/APFixedPoint.cpp b/llvm/lib/Support/APFixedPoint.cpp --- a/llvm/lib/Support/APFixedPoint.cpp +++ b/llvm/lib/Support/APFixedPoint.cpp @@ -306,7 +306,7 @@ APInt::sdivrem(ThisVal, OtherVal, Result, Rem); // If the quotient is negative and the remainder is nonzero, round // towards negative infinity by subtracting epsilon from the result. - if (ThisVal.isNegative() != OtherVal.isNegative() && !Rem.isNullValue()) + if (ThisVal.isNegative() != OtherVal.isNegative() && !Rem.isZero()) Result = Result - 1; } else Result = ThisVal.udiv(OtherVal); diff --git a/llvm/lib/Support/APInt.cpp b/llvm/lib/Support/APInt.cpp --- a/llvm/lib/Support/APInt.cpp +++ b/llvm/lib/Support/APInt.cpp @@ -1943,7 +1943,7 @@ APInt APInt::sdiv_ov(const APInt &RHS, bool &Overflow) const { // MININT/-1 --> overflow. - Overflow = isMinSignedValue() && RHS.isAllOnesValue(); + Overflow = isMinSignedValue() && RHS.isAllOnes(); return sdiv(RHS); } @@ -2970,10 +2970,10 @@ if (OldBitWidth == NewBitWidth) return A; - APInt NewA = APInt::getNullValue(NewBitWidth); + APInt NewA = APInt::getZero(NewBitWidth); // Check for null input. - if (A.isNullValue()) + if (A.isZero()) return NewA; if (NewBitWidth > OldBitWidth) { @@ -2986,7 +2986,7 @@ // Merge bits - if any old bit is set, then set scale equivalent new bit. unsigned Scale = OldBitWidth / NewBitWidth; for (unsigned i = 0; i != NewBitWidth; ++i) - if (!A.extractBits(Scale, i * Scale).isNullValue()) + if (!A.extractBits(Scale, i * Scale).isZero()) NewA.setBit(i); } diff --git a/llvm/lib/Support/KnownBits.cpp b/llvm/lib/Support/KnownBits.cpp --- a/llvm/lib/Support/KnownBits.cpp +++ b/llvm/lib/Support/KnownBits.cpp @@ -404,7 +404,7 @@ // We only know that the absolute values's MSB will be zero if INT_MIN is // poison, or there is a set bit that isn't the sign bit (otherwise it could // be INT_MIN). - if (IntMinIsPoison || (!One.isNullValue() && !One.isMinSignedValue())) + if (IntMinIsPoison || (!One.isZero() && !One.isMinSignedValue())) KnownAbs.Zero.setSignBit(); // FIXME: Handle known negative input? diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -2167,7 +2167,7 @@ APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth); return (SignificantDstMask & SignificantBitsToBeInserted) == 0 && - (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue(); + (SignificantDstMask | SignificantBitsToBeInserted).isAllOnes(); } // Look for bits that will be useful for later uses. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10224,7 +10224,7 @@ unsigned BitSize = VT.getVectorElementType().getSizeInBits(); APInt Val(BitSize, Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue()); - if (Val.isNullValue() || Val.isAllOnesValue()) + if (Val.isZero() || Val.isAllOnes()) return Op; } } @@ -16151,7 +16151,7 @@ MVT::v2i32, MVT::v4i32, MVT::v2i64}), VT.getSimpleVT().SimpleTy) && ISD::isConstantSplatVector(SplatLHS, SplatLHSVal) && - SplatLHSVal.isOneValue() && ISD::isConstantSplatVectorAllOnes(CmpRHS) && + SplatLHSVal.isOne() && ISD::isConstantSplatVectorAllOnes(CmpRHS) && ISD::isConstantSplatVectorAllOnes(SplatRHS)) { unsigned NumElts = VT.getVectorNumElements(); SmallVector Ops( diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -7716,7 +7716,7 @@ unsigned SplatBitSize; bool HasAnyUndefs; if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { - if (SplatUndef.isAllOnesValue()) + if (SplatUndef.isAllOnes()) return DAG.getUNDEF(VT); if ((ST->hasNEON() && SplatBitSize <= 64) || diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -175,7 +175,7 @@ PatternMatch::m_Constant(XorMask))) && II.getType() == ArgArg->getType()) { if (auto *CI = dyn_cast(XorMask)) { - if (CI->getValue().trunc(16).isAllOnesValue()) { + if (CI->getValue().trunc(16).isAllOnes()) { auto TrueVector = IC.Builder.CreateVectorSplat( cast(II.getType())->getNumElements(), IC.Builder.getTrue()); @@ -410,7 +410,7 @@ } // xor a, -1 can always be folded to MVN - if (Opcode == Instruction::Xor && Imm.isAllOnesValue()) + if (Opcode == Instruction::Xor && Imm.isAllOnes()) return 0; // Ensures negative constant of min(max()) or max(min()) patterns that diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp --- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -1348,7 +1348,7 @@ KnownBits Known0 = computeKnownBits(Idx0, DL, 0, &AC, Gep0, &DT); KnownBits Known1 = computeKnownBits(Idx1, DL, 0, &AC, Gep1, &DT); APInt Unknown = ~(Known0.Zero | Known0.One) | ~(Known1.Zero | Known1.One); - if (Unknown.isAllOnesValue()) + if (Unknown.isAllOnes()) return None; Value *MaskU = ConstantInt::get(Idx0->getType(), Unknown); diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp --- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp @@ -145,14 +145,14 @@ MachineIRBuilder &B) const { assert(Imm.getBitWidth() == 32 && "Unsupported immediate size."); // Ori zero extends immediate. Used for values with zeros in high 16 bits. - if (Imm.getHiBits(16).isNullValue()) { + if (Imm.getHiBits(16).isZero()) { MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)}) .addImm(Imm.getLoBits(16).getLimitedValue()); return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); } // Lui places immediate in high 16 bits and sets low 16 bits to zero. - if (Imm.getLoBits(16).isNullValue()) { + if (Imm.getLoBits(16).isZero()) { MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {}) .addImm(Imm.getHiBits(16).getLimitedValue()); return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -569,7 +569,7 @@ // Endianness doesn't matter in this context because we are looking for // an all-ones value. if (BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs)) - return SplatValue.isAllOnesValue(); + return SplatValue.isAllOnes(); return false; } @@ -701,7 +701,7 @@ // Fold degenerate cases. if (IsConstantMask) { - if (Mask.isAllOnesValue()) + if (Mask.isAllOnes()) return IfSet; else if (Mask == 0) return IfClr; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -3779,7 +3779,7 @@ bool Simplified = false; // If final mask is 0, MI result should be 0 too. - if (FinalMask.isNullValue()) { + if (FinalMask.isZero()) { bool Is64Bit = (MI.getOpcode() == PPC::RLWINM8 || MI.getOpcode() == PPC::RLWINM8_rec); Simplified = true; diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -890,7 +890,7 @@ APInt SplatVal; if (X86::isConstantSplat(N->getOperand(1), SplatVal) && - SplatVal.isOneValue()) { + SplatVal.isOne()) { SDLoc DL(N); MVT VT = N->getSimpleValueType(0); @@ -4356,7 +4356,7 @@ // Check if the mask is -1. In that case, this is an unnecessary instruction // that escaped earlier analysis. - if (NegMaskVal.isAllOnesValue()) { + if (NegMaskVal.isAllOnes()) { ReplaceNode(And, And0.getNode()); return true; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5951,7 +5951,7 @@ // Here we do not set undef elements as zeroable. SmallVector ZeroableMask(Mask.begin(), Mask.end()); if (V2IsZero) { - assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!"); + assert(!Zeroable.isZero() && "V2's non-undef elements are used?!"); for (int i = 0, Size = Mask.size(); i != Size; ++i) if (Mask[i] != SM_SentinelUndef && Zeroable[i]) ZeroableMask[i] = SM_SentinelZero; @@ -6790,7 +6790,7 @@ APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset); // Only treat an element as UNDEF if all bits are UNDEF. - if (UndefEltBits.isAllOnesValue()) { + if (UndefEltBits.isAllOnes()) { if (!AllowWholeUndefs) return false; UndefElts.setBit(i); @@ -7992,9 +7992,9 @@ // lanes), we can treat this as a truncation shuffle. bool Offset0 = false, Offset1 = false; if (Opcode == X86ISD::PACKSS) { - if ((!(N0.isUndef() || EltsLHS.isNullValue()) && + if ((!(N0.isUndef() || EltsLHS.isZero()) && DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) || - (!(N1.isUndef() || EltsRHS.isNullValue()) && + (!(N1.isUndef() || EltsRHS.isZero()) && DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt)) return false; // We can't easily fold ASHR into a shuffle, but if it was feeding a @@ -8012,9 +8012,9 @@ } } else { APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt); - if ((!(N0.isUndef() || EltsLHS.isNullValue()) && + if ((!(N0.isUndef() || EltsLHS.isZero()) && !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) || - (!(N1.isUndef() || EltsRHS.isNullValue()) && + (!(N1.isUndef() || EltsRHS.isZero()) && !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1))) return false; } @@ -8903,7 +8903,7 @@ // If the upper half of a ymm/zmm load is undef then just load the lower half. if (VT.is256BitVector() || VT.is512BitVector()) { unsigned HalfNumElems = NumElems / 2; - if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) { + if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) { EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems); SDValue HalfLD = @@ -8942,7 +8942,7 @@ // BROADCAST - match the smallest possible repetition pattern, load that // scalar/subvector element and then broadcast to the entire vector. - if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() && + if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() && (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) { for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) { unsigned RepeatSize = SubElems * BaseSizeInBits; @@ -10605,7 +10605,7 @@ // All undef vector. Return an UNDEF. All zero vectors were handled above. if (NonZeroMask == 0) { - assert(UndefMask.isAllOnesValue() && "Fully undef mask expected"); + assert(UndefMask.isAllOnes() && "Fully undef mask expected"); return DAG.getUNDEF(VT); } @@ -11452,7 +11452,7 @@ // Arbitrarily choose from the 2nd operand if the select condition element // is undef. // TODO: Can we do better by matching patterns such as even/odd? - if (UndefElts[i] || EltBits[i].isNullValue()) + if (UndefElts[i] || EltBits[i].isZero()) Mask[i] += NumElts; } @@ -11821,7 +11821,7 @@ if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale)) continue; unsigned UpperElts = NumElts - NumSrcElts; - if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnesValue()) + if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes()) continue; SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale); SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts); @@ -11918,7 +11918,7 @@ unsigned NumSrcElts = NumElts / Scale; unsigned UpperElts = NumElts - NumSrcElts; if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) || - !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnesValue()) + !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes()) continue; SDValue Src = V1; @@ -11975,7 +11975,7 @@ // The elements beyond the truncation must be undef/zero. unsigned UpperElts = NumElts - NumSrcElts; if (UpperElts > 0 && - !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnesValue()) + !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes()) continue; bool UndefUppers = UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts); @@ -13265,7 +13265,7 @@ int Size = Mask.size(); int HalfSize = Size / 2; assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size"); - assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask"); + assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask"); // Upper half must be undefined. if (!isUndefUpperHalf(Mask)) @@ -18835,7 +18835,7 @@ computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero); APInt Zeroable = KnownUndef | KnownZero; - if (Zeroable.isAllOnesValue()) + if (Zeroable.isAllOnes()) return getZeroVector(VT, Subtarget, DAG, DL); bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode()); @@ -22759,7 +22759,7 @@ } else { // Quit if not all elements are used. for (const auto &I : SrcOpMap) - if (!I.second.isAllOnesValue()) + if (!I.second.isAllOnes()) return false; } @@ -22782,7 +22782,7 @@ X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE); auto MaskBits = [&](SDValue Src) { - if (Mask.isAllOnesValue()) + if (Mask.isAllOnes()) return Src; EVT SrcVT = Src.getValueType(); SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT); @@ -22820,8 +22820,8 @@ // Without PTEST, a masked v2i64 or-reduction is not faster than // scalarization. - if (!Mask.isAllOnesValue() && VT.getScalarSizeInBits() > 32) - return SDValue(); + if (!Mask.isAllOnes() && VT.getScalarSizeInBits() > 32) + return SDValue(); V = DAG.getBitcast(MVT::v16i8, MaskBits(V)); V = DAG.getNode(X86ISD::PCMPEQ, DL, MVT::v16i8, V, @@ -23488,7 +23488,7 @@ // Avoid overflow/underflow. const APInt &EltC = Elt->getAPIntValue(); - if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue())) + if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero())) return SDValue(); NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT)); @@ -23807,7 +23807,7 @@ Cond = ISD::SETGT; else if (ConstValue.isMaxSignedValue()) Cond = ISD::SETLT; - else if (ConstValue.isNullValue() && DAG.SignBitIsZero(Op0)) + else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0)) Cond = ISD::SETGT; } @@ -24160,7 +24160,7 @@ // TODO: Can we move this to TranslateX86CC to handle jumps/branches too? if (auto *Op1C = dyn_cast(Op1)) { const APInt &Op1Val = Op1C->getAPIntValue(); - if (!Op1Val.isNullValue()) { + if (!Op1Val.isZero()) { // Ensure the constant+1 doesn't overflow. if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) || (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) { @@ -37719,10 +37719,10 @@ ConstantElts.setBit(i); ConstantBitData[i] = Bits; } - assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue()); + assert((UndefElts | ZeroElts | ConstantElts).isAllOnes()); // Attempt to create a zero vector. - if ((UndefElts | ZeroElts).isAllOnesValue()) + if ((UndefElts | ZeroElts).isAllOnes()) return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL); // Create the constant data. @@ -37857,14 +37857,14 @@ // Only resolve zeros if it will remove an input, otherwise we might end // up in an infinite loop. bool ResolveKnownZeros = true; - if (!OpZero.isNullValue()) { + if (!OpZero.isZero()) { APInt UsedInputs = APInt::getZero(OpInputs.size()); for (int i = 0, e = OpMask.size(); i != e; ++i) { int M = OpMask[i]; if (OpUndef[i] || OpZero[i] || isUndefOrZero(M)) continue; UsedInputs.setBit(M / OpMask.size()); - if (UsedInputs.isAllOnesValue()) { + if (UsedInputs.isAllOnes()) { ResolveKnownZeros = false; break; } @@ -39551,7 +39551,7 @@ TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const { // If we're demanding all elements don't bother trying to simplify the mask. unsigned NumElts = DemandedElts.getBitWidth(); - if (DemandedElts.isAllOnesValue()) + if (DemandedElts.isAllOnes()) return false; SDValue Mask = Op.getOperand(MaskIndex); @@ -39647,7 +39647,7 @@ "Unexpected PSADBW types"); // Aggressively peek through ops to get at the demanded elts. - if (!DemandedElts.isAllOnesValue()) { + if (!DemandedElts.isAllOnes()) { unsigned NumSrcElts = LHS.getValueType().getVectorNumElements(); APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts( @@ -39698,7 +39698,7 @@ return true; // Aggressively peek through ops to get at the demanded elts. - if (!DemandedElts.isAllOnesValue()) + if (!DemandedElts.isAllOnes()) if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( Src, DemandedElts, TLO.DAG, Depth + 1)) return TLO.CombineTo( @@ -39815,7 +39815,7 @@ // Aggressively peek through ops to get at the demanded elts. // TODO - we should do this for all target/faux shuffles ops. - if (!DemandedElts.isAllOnesValue()) { + if (!DemandedElts.isAllOnes()) { SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS, TLO.DAG, Depth + 1); SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS, @@ -39852,7 +39852,7 @@ // Aggressively peek through ops to get at the demanded elts. // TODO: Handle repeated operands. - if (N0 != N1 && !DemandedElts.isAllOnesValue()) { + if (N0 != N1 && !DemandedElts.isAllOnes()) { SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS, TLO.DAG, Depth + 1); SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS, @@ -40125,7 +40125,7 @@ // For broadcasts, unless we *only* demand the 0'th element, // stop attempts at simplification here, we aren't going to improve things, // this is better than any potential shuffle. - if (isTargetShuffleSplat(Op) && !DemandedElts.isOneValue()) + if (isTargetShuffleSplat(Op) && !DemandedElts.isOne()) return false; // Get target/faux shuffle mask. @@ -40195,7 +40195,7 @@ // to match. This prevents combineX86ShuffleChain from returning a // combined shuffle that's the same as the original root, causing an // infinite loop. - if (!DemandedElts.isAllOnesValue()) { + if (!DemandedElts.isAllOnes()) { assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range"); SmallVector DemandedMask(NumElts, SM_SentinelUndef); @@ -42575,7 +42575,7 @@ SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond); // Multiply condition by the difference if non-one. - if (!AbsDiff.isOneValue()) + if (!AbsDiff.isOne()) R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT)); // Add the base if non-zero. @@ -43333,7 +43333,7 @@ // We can handle comparisons with zero in a number of cases by manipulating // the CC used. - if (!Comparison.isNullValue()) + if (!Comparison.isZero()) return SDValue(); if (CC == X86::COND_S && Addend == 1) @@ -43739,7 +43739,7 @@ unsigned NumElts = VecVT.getVectorNumElements(); unsigned NumEltBits = VecVT.getScalarSizeInBits(); - bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isNullValue(); + bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero(); bool IsAllOf = CmpOpcode == X86ISD::SUB && NumElts <= CmpBits && CmpVal.isMask(NumElts); if (!IsAnyOf && !IsAllOf) @@ -43837,7 +43837,7 @@ assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index"); DemandedElts.setBit(M); } - if (DemandedElts.isAllOnesValue()) { + if (DemandedElts.isAllOnes()) { SDLoc DL(EFLAGS); SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]); Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result); @@ -45973,7 +45973,7 @@ N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) && getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) && llvm::all_of(EltBits, [](const APInt &M) { - return M.isNullValue() || M.isAllOnesValue(); + return M.isZero() || M.isAllOnes(); })) { unsigned NumElts = SrcVecVT.getVectorNumElements(); unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8; @@ -45985,8 +45985,7 @@ if (UndefElts[i]) continue; int VecIdx = Scale * Idx + i; - ShuffleMask[VecIdx] = - EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx; + ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx; } if (SDValue Shuffle = combineX86ShufflesRecursively( @@ -52102,7 +52101,7 @@ // TODO: SimplifyDemandedBits instead? if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse()) if (auto *C = dyn_cast(Src.getOperand(1))) - if (C->getAPIntValue().isOneValue()) + if (C->getAPIntValue().isOne()) return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Src.getOperand(0)); diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp --- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp +++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp @@ -239,7 +239,7 @@ KnownBits KnownUpperBits = llvm::computeKnownBits( Amt, DemandedUpper, II.getModule()->getDataLayout()); if (KnownLowerBits.getMaxValue().ult(BitWidth) && - (DemandedUpper.isNullValue() || KnownUpperBits.isZero())) { + (DemandedUpper.isZero() || KnownUpperBits.isZero())) { SmallVector ZeroSplat(VWidth, 0); Amt = Builder.CreateShuffleVector(Amt, ZeroSplat); return (LogicalShift ? (ShiftLeft ? Builder.CreateShl(Vec, Amt) @@ -269,7 +269,7 @@ } // If shift-by-zero then just return the original value. - if (Count.isNullValue()) + if (Count.isZero()) return Vec; // Handle cases when Shift >= BitWidth. @@ -1764,7 +1764,7 @@ // we know that DemandedMask is non-zero already. APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth); Type *VTy = II.getType(); - if (DemandedElts.isNullValue()) { + if (DemandedElts.isZero()) { return ConstantInt::getNullValue(VTy); } diff --git a/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp b/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp --- a/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp +++ b/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp @@ -100,7 +100,7 @@ // Only treat the element as UNDEF if all bits are UNDEF, otherwise // treat it as zero. - if (EltUndef.isAllOnesValue()) { + if (EltUndef.isAllOnes()) { UndefElts.setBit(i); RawMask[i] = 0; continue; diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -8730,25 +8730,25 @@ case Instruction::Mul: return LHS * RHS; case Instruction::UDiv: - if (RHS.isNullValue()) { + if (RHS.isZero()) { SkipOperation = true; return LHS; } return LHS.udiv(RHS); case Instruction::SDiv: - if (RHS.isNullValue()) { + if (RHS.isZero()) { SkipOperation = true; return LHS; } return LHS.sdiv(RHS); case Instruction::URem: - if (RHS.isNullValue()) { + if (RHS.isZero()) { SkipOperation = true; return LHS; } return LHS.urem(RHS); case Instruction::SRem: - if (RHS.isNullValue()) { + if (RHS.isZero()) { SkipOperation = true; return LHS; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -939,7 +939,7 @@ // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X if (C2->isMask()) { KnownBits LHSKnown = computeKnownBits(X, 0, &Add); - if ((*C2 | LHSKnown.Zero).isAllOnesValue()) + if ((*C2 | LHSKnown.Zero).isAllOnes()) return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C2 + *C), X); } @@ -963,7 +963,7 @@ } } - if (C->isOneValue() && Op0->hasOneUse()) { + if (C->isOne() && Op0->hasOneUse()) { // add (sext i1 X), 1 --> zext (not X) // TODO: The smallest IR representation is (select X, 0, 1), and that would // not require the one-use check. But we need to remove a transform in @@ -1910,7 +1910,7 @@ // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known // zero. KnownBits RHSKnown = computeKnownBits(Op1, 0, &I); - if ((*Op0C | RHSKnown.Zero).isAllOnesValue()) + if ((*Op0C | RHSKnown.Zero).isAllOnes()) return BinaryOperator::CreateXor(Op1, Op0); } @@ -2154,7 +2154,7 @@ unsigned BitWidth = Ty->getScalarSizeInBits(); unsigned Cttz = AddC->countTrailingZeros(); APInt HighMask(APInt::getHighBitsSet(BitWidth, BitWidth - Cttz)); - if ((HighMask & *AndC).isNullValue()) + if ((HighMask & *AndC).isZero()) return BinaryOperator::CreateAnd(Op0, ConstantInt::get(Ty, ~(*AndC))); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -779,7 +779,7 @@ // Special case: get the ordering right when the values wrap around zero. // Ie, we assumed the constants were unsigned when swapping earlier. - if (C1->isNullValue() && C2->isAllOnesValue()) + if (C1->isZero() && C2->isAllOnes()) std::swap(C1, C2); if (*C1 == *C2 - 1) { @@ -923,7 +923,7 @@ if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask)) return nullptr; - assert(!UnsetBitsMask.isNullValue() && "empty mask makes no sense."); + assert(!UnsetBitsMask.isZero() && "empty mask makes no sense."); // Are they working on the same value? Value *X; @@ -1310,8 +1310,8 @@ // Check that the low bits are zero. APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); - if ((Low & AndC->getValue()).isNullValue() && - (Low & BigC->getValue()).isNullValue()) { + if ((Low & AndC->getValue()).isZero() && + (Low & BigC->getValue()).isZero()) { Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue()); APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue(); Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N); @@ -1883,7 +1883,7 @@ // (X + AddC) & LowMaskC --> X & LowMaskC unsigned Ctlz = C->countLeadingZeros(); APInt LowMask(APInt::getLowBitsSet(Width, Width - Ctlz)); - if ((*AddC & LowMask).isNullValue()) + if ((*AddC & LowMask).isZero()) return BinaryOperator::CreateAnd(X, Op1); // If we are masking the result of the add down to exactly one bit and @@ -2677,7 +2677,7 @@ Value *X, *Y; const APInt *CV; if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) && - !CV->isAllOnesValue() && MaskedValueIsZero(Y, *CV, 0, &I)) { + !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, 0, &I)) { // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X). Value *Or = Builder.CreateOr(X, Y); @@ -2692,7 +2692,7 @@ ConstantInt *C1, *C2; if (match(C, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2))) { Value *V1 = nullptr, *V2 = nullptr; - if ((C1->getValue() & C2->getValue()).isNullValue()) { + if ((C1->getValue() & C2->getValue()).isZero()) { // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2) // iff (C1&C2) == 0 and (N&~C1) == 0 if (match(A, m_Or(m_Value(V1), m_Value(V2))) && @@ -2715,9 +2715,9 @@ // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. ConstantInt *C3 = nullptr, *C4 = nullptr; if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) && - (C3->getValue() & ~C1->getValue()).isNullValue() && + (C3->getValue() & ~C1->getValue()).isZero() && match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) && - (C4->getValue() & ~C2->getValue()).isNullValue()) { + (C4->getValue() & ~C2->getValue()).isZero()) { V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield"); return BinaryOperator::CreateAnd(V2, Builder.getInt(C1->getValue()|C2->getValue())); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -513,7 +513,7 @@ // If the input to cttz/ctlz is known to be non-zero, // then change the 'ZeroIsUndef' parameter to 'true' // because we know the zero behavior can't affect the result. - if (!Known.One.isNullValue() || + if (!Known.One.isZero() || isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, &IC.getDominatorTree())) { if (!match(II.getArgOperand(1), m_One())) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -988,8 +988,8 @@ // zext (x x>>u31 true if signbit set. // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. - if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) || - (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) { + if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isZero()) || + (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnes())) { Value *In = Cmp->getOperand(0); Value *Sh = ConstantInt::get(In->getType(), In->getType()->getScalarSizeInBits() - 1); @@ -1013,7 +1013,7 @@ // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. - if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) && + if ((Op1CV->isZero() || Op1CV->isPowerOf2()) && // This only works for EQ and NE Cmp->isEquality()) { // If Op1C some other power of two, convert: @@ -1022,7 +1022,7 @@ APInt KnownZeroMask(~Known.Zero); if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE; - if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) { + if (!Op1CV->isZero() && (*Op1CV != KnownZeroMask)) { // (X&4) == 2 --> false // (X&4) != 2 --> true Constant *Res = ConstantInt::get(Zext.getType(), isNE); @@ -1038,7 +1038,7 @@ In->getName() + ".lobit"); } - if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit. + if (!Op1CV->isZero() == isNE) { // Toggle the low bit. Constant *One = ConstantInt::get(In->getType(), 1); In = Builder.CreateXor(In, One); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -78,15 +78,15 @@ if (!ICmpInst::isSigned(Pred)) return false; - if (C.isNullValue()) + if (C.isZero()) return ICmpInst::isRelational(Pred); - if (C.isOneValue()) { + if (C.isOne()) { if (Pred == ICmpInst::ICMP_SLT) { Pred = ICmpInst::ICMP_SLE; return true; } - } else if (C.isAllOnesValue()) { + } else if (C.isAllOnes()) { if (Pred == ICmpInst::ICMP_SGT) { Pred = ICmpInst::ICMP_SGE; return true; @@ -1147,12 +1147,12 @@ }; // Don't bother doing any work for cases which InstSimplify handles. - if (AP2.isNullValue()) + if (AP2.isZero()) return nullptr; bool IsAShr = isa(I.getOperand(0)); if (IsAShr) { - if (AP2.isAllOnesValue()) + if (AP2.isAllOnes()) return nullptr; if (AP2.isNegative() != AP1.isNegative()) return nullptr; @@ -1178,7 +1178,7 @@ if (IsAShr && AP1 == AP2.ashr(Shift)) { // There are multiple solutions if we are comparing against -1 and the LHS // of the ashr is not a power of two. - if (AP1.isAllOnesValue() && !AP2.isPowerOf2()) + if (AP1.isAllOnes() && !AP2.isPowerOf2()) return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift)); return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); } else if (AP1 == AP2.lshr(Shift)) { @@ -1206,7 +1206,7 @@ }; // Don't bother doing any work for cases which InstSimplify handles. - if (AP2.isNullValue()) + if (AP2.isZero()) return nullptr; unsigned AP2TrailingZeros = AP2.countTrailingZeros(); @@ -1544,7 +1544,7 @@ const APInt &C) { ICmpInst::Predicate Pred = Cmp.getPredicate(); Value *X = Trunc->getOperand(0); - if (C.isOneValue() && C.getBitWidth() > 1) { + if (C.isOne() && C.getBitWidth() > 1) { // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1 Value *V = nullptr; if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V)))) @@ -1725,7 +1725,7 @@ // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is // preferable because it allows the C2 << Y expression to be hoisted out of a // loop if Y is invariant and X is not. - if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() && + if (Shift->hasOneUse() && C1.isZero() && Cmp.isEquality() && !Shift->isArithmeticShift() && !isa(Shift->getOperand(0))) { // Compute C2 << Y. Value *NewShift = @@ -1749,7 +1749,7 @@ // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1 // TODO: We canonicalize to the longer form for scalars because we have // better analysis/folds for icmp, and codegen may be better with icmp. - if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isNullValue() && + if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isZero() && match(And->getOperand(1), m_One())) return new TruncInst(And->getOperand(0), Cmp.getType()); @@ -1762,7 +1762,7 @@ if (!And->hasOneUse()) return nullptr; - if (Cmp.isEquality() && C1.isNullValue()) { + if (Cmp.isEquality() && C1.isZero()) { // Restrict this fold to single-use 'and' (PR10267). // Replace (and X, (1 << size(X)-1) != 0) with X s< 0 if (C2->isSignMask()) { @@ -1812,7 +1812,7 @@ // (icmp pred (and A, (or (shl 1, B), 1), 0)) // // iff pred isn't signed - if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() && + if (!Cmp.isSigned() && C1.isZero() && And->getOperand(0)->hasOneUse() && match(And->getOperand(1), m_One())) { Constant *One = cast(And->getOperand(1)); Value *Or = And->getOperand(0); @@ -1899,7 +1899,7 @@ // (X & C2) != 0 -> (trunc X) < 0 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type. const APInt *C2; - if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) { + if (And->hasOneUse() && C.isZero() && match(Y, m_APInt(C2))) { int32_t ExactLogBase2 = C2->exactLogBase2(); if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); @@ -1920,7 +1920,7 @@ BinaryOperator *Or, const APInt &C) { ICmpInst::Predicate Pred = Cmp.getPredicate(); - if (C.isOneValue()) { + if (C.isOne()) { // icmp slt signum(V) 1 --> icmp slt V, 1 Value *V = nullptr; if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V)))) @@ -1950,7 +1950,7 @@ } } - if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse()) + if (!Cmp.isEquality() || !C.isZero() || !Or->hasOneUse()) return nullptr; Value *P, *Q; @@ -2001,14 +2001,14 @@ // If the multiply does not wrap, try to divide the compare constant by the // multiplication factor. - if (Cmp.isEquality() && !MulC->isNullValue()) { + if (Cmp.isEquality() && !MulC->isZero()) { // (mul nsw X, MulC) == C --> X == C /s MulC - if (Mul->hasNoSignedWrap() && C.srem(*MulC).isNullValue()) { + if (Mul->hasNoSignedWrap() && C.srem(*MulC).isZero()) { Constant *NewC = ConstantInt::get(Mul->getType(), C.sdiv(*MulC)); return new ICmpInst(Pred, Mul->getOperand(0), NewC); } // (mul nuw X, MulC) == C --> X == C /u MulC - if (Mul->hasNoUnsignedWrap() && C.urem(*MulC).isNullValue()) { + if (Mul->hasNoUnsignedWrap() && C.urem(*MulC).isZero()) { Constant *NewC = ConstantInt::get(Mul->getType(), C.udiv(*MulC)); return new ICmpInst(Pred, Mul->getOperand(0), NewC); } @@ -2053,7 +2053,7 @@ return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2)); } else if (Cmp.isSigned()) { Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1); - if (C.isAllOnesValue()) { + if (C.isAllOnes()) { // (1 << Y) <= -1 -> Y == 31 if (Pred == ICmpInst::ICMP_SLE) return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); @@ -2227,8 +2227,7 @@ // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0 Value *X = Shr->getOperand(0); CmpInst::Predicate Pred = Cmp.getPredicate(); - if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && - C.isNullValue()) + if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && C.isZero()) return new ICmpInst(Pred, X, Cmp.getOperand(1)); const APInt *ShiftVal; @@ -2316,7 +2315,7 @@ if (Shr->isExact()) return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal)); - if (C.isNullValue()) { + if (C.isZero()) { // == 0 is u< 1. if (Pred == CmpInst::ICMP_EQ) return new ICmpInst(CmpInst::ICMP_ULT, X, @@ -2355,7 +2354,7 @@ return nullptr; const APInt *DivisorC; - if (!C.isNullValue() || !match(SRem->getOperand(1), m_Power2(DivisorC))) + if (!C.isZero() || !match(SRem->getOperand(1), m_Power2(DivisorC))) return nullptr; // Mask off the sign bit and the modulo bits (low-bits). @@ -2435,8 +2434,7 @@ // INT_MIN will also fail if the divisor is 1. Although folds of all these // division-by-constant cases should be present, we can not assert that they // have happened before we reach this icmp instruction. - if (C2->isNullValue() || C2->isOneValue() || - (DivIsSigned && C2->isAllOnesValue())) + if (C2->isZero() || C2->isOne() || (DivIsSigned && C2->isAllOnes())) return nullptr; // Compute Prod = C * C2. We are essentially solving an equation of @@ -2476,16 +2474,16 @@ HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false); } } else if (C2->isStrictlyPositive()) { // Divisor is > 0. - if (C.isNullValue()) { // (X / pos) op 0 + if (C.isZero()) { // (X / pos) op 0 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) LoBound = -(RangeSize - 1); HiBound = RangeSize; - } else if (C.isStrictlyPositive()) { // (X / pos) op pos + } else if (C.isStrictlyPositive()) { // (X / pos) op pos LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) HiOverflow = LoOverflow = ProdOV; if (!HiOverflow) HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true); - } else { // (X / pos) op neg + } else { // (X / pos) op neg // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) HiBound = Prod + 1; LoOverflow = HiOverflow = ProdOV ? -1 : 0; @@ -2497,7 +2495,7 @@ } else if (C2->isNegative()) { // Divisor is < 0. if (Div->isExact()) RangeSize.negate(); - if (C.isNullValue()) { // (X / neg) op 0 + if (C.isZero()) { // (X / neg) op 0 // e.g. X/-5 op 0 --> [-4, 5) LoBound = RangeSize + 1; HiBound = -RangeSize; @@ -2505,13 +2503,13 @@ HiOverflow = 1; // [INTMIN+1, overflow) HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN } - } else if (C.isStrictlyPositive()) { // (X / neg) op pos + } else if (C.isStrictlyPositive()) { // (X / neg) op pos // e.g. X/-5 op 3 --> [-19, -14) HiBound = Prod + 1; HiOverflow = LoOverflow = ProdOV ? -1 : 0; if (!LoOverflow) LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0; - } else { // (X / neg) op neg + } else { // (X / neg) op neg LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) LoOverflow = HiOverflow = ProdOV; if (!HiOverflow) @@ -2604,19 +2602,19 @@ if (Sub->hasNoSignedWrap()) { // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y) - if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue()) + if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes()) return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y) - if (Pred == ICmpInst::ICMP_SGT && C.isNullValue()) + if (Pred == ICmpInst::ICMP_SGT && C.isZero()) return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y) - if (Pred == ICmpInst::ICMP_SLT && C.isNullValue()) + if (Pred == ICmpInst::ICMP_SLT && C.isZero()) return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y) - if (Pred == ICmpInst::ICMP_SLT && C.isOneValue()) + if (Pred == ICmpInst::ICMP_SLT && C.isOne()) return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); } @@ -2929,7 +2927,7 @@ // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0 // Example: are all elements equal? --> are zero elements not equal? // TODO: Try harder to reduce compare of 2 freely invertible operands? - if (Cmp.isEquality() && C->isAllOnesValue() && Bitcast->hasOneUse() && + if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse() && isFreeToInvert(BCSrcOp, BCSrcOp->hasOneUse())) { Type *ScalarTy = Bitcast->getType(); Value *Cast = Builder.CreateBitCast(Builder.CreateNot(BCSrcOp), ScalarTy); @@ -2940,7 +2938,7 @@ // compare in a narrow type to eliminate the extend: // icmp eq/ne (bitcast (ext X) to iN), 0 --> icmp eq/ne (bitcast X to iM), 0 Value *X; - if (Cmp.isEquality() && C->isNullValue() && Bitcast->hasOneUse() && + if (Cmp.isEquality() && C->isZero() && Bitcast->hasOneUse() && match(BCSrcOp, m_ZExtOrSExt(m_Value(X)))) { if (auto *VecTy = dyn_cast(X->getType())) { Type *NewType = Builder.getIntNTy(VecTy->getPrimitiveSizeInBits()); @@ -3081,7 +3079,7 @@ switch (BO->getOpcode()) { case Instruction::SRem: // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one. - if (C.isNullValue() && BO->hasOneUse()) { + if (C.isZero() && BO->hasOneUse()) { const APInt *BOC; if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) { Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName()); @@ -3095,7 +3093,7 @@ if (Constant *BOC = dyn_cast(BOp1)) { if (BO->hasOneUse()) return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, BOC)); - } else if (C.isNullValue()) { + } else if (C.isZero()) { // Replace ((add A, B) != 0) with (A != -B) if A or B is // efficiently invertible, or if the add has just this one use. if (Value *NegVal = dyn_castNegVal(BOp1)) @@ -3116,7 +3114,7 @@ // For the xor case, we can xor two constants together, eliminating // the explicit xor. return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC)); - } else if (C.isNullValue()) { + } else if (C.isZero()) { // Replace ((xor A, B) != 0) with (A != B) return new ICmpInst(Pred, BOp0, BOp1); } @@ -3129,7 +3127,7 @@ if (Constant *BOC = dyn_cast(BOp0)) { // Replace ((sub BOC, B) != C) with (B != BOC-C). return new ICmpInst(Pred, BOp1, ConstantExpr::getSub(BOC, RHS)); - } else if (C.isNullValue()) { + } else if (C.isZero()) { // Replace ((sub A, B) != 0) with (A != B). return new ICmpInst(Pred, BOp0, BOp1); } @@ -3158,7 +3156,7 @@ break; } case Instruction::UDiv: - if (C.isNullValue()) { + if (C.isZero()) { // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A) auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; return new ICmpInst(NewPred, BOp1, BOp0); @@ -3181,7 +3179,7 @@ case Intrinsic::abs: // abs(A) == 0 -> A == 0 // abs(A) == INT_MIN -> A == INT_MIN - if (C.isNullValue() || C.isMinSignedValue()) + if (C.isZero() || C.isMinSignedValue()) return new ICmpInst(Pred, II->getArgOperand(0), ConstantInt::get(Ty, C)); break; @@ -3217,7 +3215,7 @@ case Intrinsic::ctpop: { // popcount(A) == 0 -> A == 0 and likewise for != // popcount(A) == bitwidth(A) -> A == -1 and likewise for != - bool IsZero = C.isNullValue(); + bool IsZero = C.isZero(); if (IsZero || C == BitWidth) return new ICmpInst(Pred, II->getArgOperand(0), IsZero ? Constant::getNullValue(Ty) @@ -3232,7 +3230,7 @@ // (rot X, ?) == 0/-1 --> X == 0/-1 // TODO: This transform is safe to re-use undef elts in a vector, but // the constant value passed in by the caller doesn't allow that. - if (C.isNullValue() || C.isAllOnesValue()) + if (C.isZero() || C.isAllOnes()) return new ICmpInst(Pred, II->getArgOperand(0), Cmp.getOperand(1)); const APInt *RotAmtC; @@ -3248,7 +3246,7 @@ case Intrinsic::uadd_sat: { // uadd.sat(a, b) == 0 -> (a | b) == 0 - if (C.isNullValue()) { + if (C.isZero()) { Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1)); return new ICmpInst(Pred, Or, Constant::getNullValue(Ty)); } @@ -3257,7 +3255,7 @@ case Intrinsic::usub_sat: { // usub.sat(a, b) == 0 -> a <= b - if (C.isNullValue()) { + if (C.isZero()) { ICmpInst::Predicate NewPred = Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1)); @@ -4238,8 +4236,8 @@ break; const APInt *C; - if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() && - !C->isOneValue()) { + if (match(BO0->getOperand(1), m_APInt(C)) && !C->isZero() && + !C->isOne()) { // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask) // Mask = -1 >> count-trailing-zeros(C). if (unsigned TZs = C->countTrailingZeros()) { @@ -5378,7 +5376,7 @@ // Check if the LHS is 8 >>u x and the result is a power of 2 like 1. const APInt *CI; - if (Op0KnownZeroInverted.isOneValue() && + if (Op0KnownZeroInverted.isOne() && match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) { // ((8 >>u X) & 1) == 0 -> X != 3 // ((8 >>u X) & 1) != 0 -> X == 3 diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -716,11 +716,11 @@ assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal"); // Bail if we will divide by zero. - if (C2.isNullValue()) + if (C2.isZero()) return false; // Bail if we would divide INT_MIN by -1. - if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue()) + if (IsSigned && C1.isMinSignedValue() && C2.isAllOnes()) return false; APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned); @@ -814,7 +814,7 @@ } } - if (!C2->isNullValue()) // avoid X udiv 0 + if (!C2->isZero()) // avoid X udiv 0 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I)) return FoldedDiv; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -166,7 +166,7 @@ // simplify/reduce the instructions. APInt TC = *SelTC; APInt FC = *SelFC; - if (!TC.isNullValue() && !FC.isNullValue()) { + if (!TC.isZero() && !FC.isZero()) { // If the select constants differ by exactly one bit and that's the same // bit that is masked and checked by the select condition, the select can // be replaced by bitwise logic to set/clear one bit of the constant result. @@ -203,7 +203,7 @@ // Determine which shift is needed to transform result of the 'and' into the // desired result. - const APInt &ValC = !TC.isNullValue() ? TC : FC; + const APInt &ValC = !TC.isZero() ? TC : FC; unsigned ValZeros = ValC.logBase2(); unsigned AndZeros = AndMask.logBase2(); @@ -225,7 +225,7 @@ // Okay, now we know that everything is set up, we just don't know whether we // have a icmp_ne or icmp_eq and whether the true or false val is the zero. - bool ShouldNotVal = !TC.isNullValue(); + bool ShouldNotVal = !TC.isZero(); ShouldNotVal ^= Pred == ICmpInst::ICMP_NE; if (ShouldNotVal) V = Builder.CreateXor(V, ValC); @@ -429,10 +429,9 @@ } static bool isSelect01(const APInt &C1I, const APInt &C2I) { - if (!C1I.isNullValue() && !C2I.isNullValue()) // One side must be zero. + if (!C1I.isZero() && !C2I.isZero()) // One side must be zero. return false; - return C1I.isOneValue() || C1I.isAllOnesValue() || - C2I.isOneValue() || C2I.isAllOnesValue(); + return C1I.isOne() || C1I.isAllOnes() || C2I.isOne() || C2I.isAllOnes(); } /// Try to fold the select into one of the operands to allow further @@ -1877,9 +1876,7 @@ m_Value(TrueVal), m_Value(FalseVal)))) return false; - auto IsZeroOrOne = [](const APInt &C) { - return C.isNullValue() || C.isOneValue(); - }; + auto IsZeroOrOne = [](const APInt &C) { return C.isZero() || C.isOne(); }; auto IsMinMax = [&](Value *Min, Value *Max) { APInt MinVal = APInt::getSignedMinValue(Ty->getScalarSizeInBits()); APInt MaxVal = APInt::getSignedMaxValue(Ty->getScalarSizeInBits()); @@ -3255,9 +3252,9 @@ if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) { KnownBits Known(1); computeKnownBits(CondVal, Known, 0, &SI); - if (Known.One.isOneValue()) + if (Known.One.isOne()) return replaceInstUsesWith(SI, TrueVal); - if (Known.Zero.isOneValue()) + if (Known.Zero.isOne()) return replaceInstUsesWith(SI, FalseVal); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -124,7 +124,7 @@ } Known.resetAll(); - if (DemandedMask.isNullValue()) // Not demanding any bits from V. + if (DemandedMask.isZero()) // Not demanding any bits from V. return UndefValue::get(VTy); if (Depth == MaxAnalysisRecursionDepth) @@ -274,8 +274,8 @@ // constant because that's a canonical 'not' op, and that is better for // combining, SCEV, and codegen. const APInt *C; - if (match(I->getOperand(1), m_APInt(C)) && !C->isAllOnesValue()) { - if ((*C | ~DemandedMask).isAllOnesValue()) { + if (match(I->getOperand(1), m_APInt(C)) && !C->isAllOnes()) { + if ((*C | ~DemandedMask).isAllOnes()) { // Force bits to 1 to create a 'not' op. I->setOperand(1, ConstantInt::getAllOnesValue(VTy)); return I; @@ -516,8 +516,7 @@ return I->getOperand(0); // We can't do this with the LHS for subtraction, unless we are only // demanding the LSB. - if ((I->getOpcode() == Instruction::Add || - DemandedFromOps.isOneValue()) && + if ((I->getOpcode() == Instruction::Add || DemandedFromOps.isOne()) && DemandedFromOps.isSubsetOf(LHSKnown.Zero)) return I->getOperand(1); @@ -615,7 +614,7 @@ // always convert this into a logical shr, even if the shift amount is // variable. The low bit of the shift cannot be an input sign bit unless // the shift amount is >= the size of the datatype, which is undefined. - if (DemandedMask.isOneValue()) { + if (DemandedMask.isOne()) { // Perform the logical shift right. Instruction *NewVal = BinaryOperator::CreateLShr( I->getOperand(0), I->getOperand(1), I->getName()); @@ -1120,7 +1119,7 @@ return nullptr; } - if (DemandedElts.isNullValue()) { // If nothing is demanded, provide poison. + if (DemandedElts.isZero()) { // If nothing is demanded, provide poison. UndefElts = EltMask; return PoisonValue::get(V->getType()); } @@ -1130,7 +1129,7 @@ if (auto *C = dyn_cast(V)) { // Check if this is identity. If so, return 0 since we are not simplifying // anything. - if (DemandedElts.isAllOnesValue()) + if (DemandedElts.isAllOnes()) return nullptr; Type *EltTy = cast(V->getType())->getElementType(); @@ -1283,7 +1282,7 @@ // Handle trivial case of a splat. Only check the first element of LHS // operand. if (all_of(Shuffle->getShuffleMask(), [](int Elt) { return Elt == 0; }) && - DemandedElts.isAllOnesValue()) { + DemandedElts.isAllOnes()) { if (!match(I->getOperand(1), m_Undef())) { I->setOperand(1, PoisonValue::get(I->getOperand(1)->getType())); MadeChange = true; @@ -1591,7 +1590,7 @@ // If we've proven all of the lanes undef, return an undef value. // TODO: Intersect w/demanded lanes - if (UndefElts.isAllOnesValue()) + if (UndefElts.isAllOnes()) return UndefValue::get(I->getType());; return MadeChange ? I : nullptr; diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1059,7 +1059,7 @@ // Compare for equality including undefs as equal. auto *Cmp = ConstantExpr::getCompare(ICmpInst::ICMP_EQ, ConstA, ConstB); const APInt *C; - return match(Cmp, m_APIntAllowUndef(C)) && C->isOneValue(); + return match(Cmp, m_APIntAllowUndef(C)) && C->isOne(); }; if ((areLooselyEqual(TV, Op0) && areLooselyEqual(FV, Op1)) || diff --git a/llvm/lib/Transforms/Scalar/BDCE.cpp b/llvm/lib/Transforms/Scalar/BDCE.cpp --- a/llvm/lib/Transforms/Scalar/BDCE.cpp +++ b/llvm/lib/Transforms/Scalar/BDCE.cpp @@ -53,7 +53,7 @@ // in the def-use chain needs to be changed. auto *J = dyn_cast(JU); if (J && J->getType()->isIntOrIntVectorTy() && - !DB.getDemandedBits(J).isAllOnesValue()) { + !DB.getDemandedBits(J).isAllOnes()) { Visited.insert(J); WorkList.push_back(J); } @@ -84,7 +84,7 @@ // that in the def-use chain needs to be changed. auto *K = dyn_cast(KU); if (K && Visited.insert(K).second && K->getType()->isIntOrIntVectorTy() && - !DB.getDemandedBits(K).isAllOnesValue()) + !DB.getDemandedBits(K).isAllOnes()) WorkList.push_back(K); } } @@ -103,8 +103,7 @@ // Remove instructions that are dead, either because they were not reached // during analysis or have no demanded bits. if (DB.isInstructionDead(&I) || - (I.getType()->isIntOrIntVectorTy() && - DB.getDemandedBits(&I).isNullValue() && + (I.getType()->isIntOrIntVectorTy() && DB.getDemandedBits(&I).isZero() && wouldInstructionBeTriviallyDead(&I))) { salvageDebugInfo(I); Worklist.push_back(&I); diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -689,7 +689,7 @@ const APInt &RA = RC->getAPInt(); // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do // some folding. - if (RA.isAllOnesValue()) { + if (RA.isAllOnes()) { if (LHS->getType()->isPointerTy()) return nullptr; return SE.getMulExpr(LHS, RC); diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp --- a/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -1279,10 +1279,10 @@ /// be returned. static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd, const APInt &ConstOpnd) { - if (ConstOpnd.isNullValue()) + if (ConstOpnd.isZero()) return nullptr; - if (ConstOpnd.isAllOnesValue()) + if (ConstOpnd.isAllOnes()) return Opnd; Instruction *I = BinaryOperator::CreateAnd( @@ -1304,7 +1304,7 @@ // = ((x | c1) ^ c1) ^ (c1 ^ c2) // = (x & ~c1) ^ (c1 ^ c2) // It is useful only when c1 == c2. - if (!Opnd1->isOrExpr() || Opnd1->getConstPart().isNullValue()) + if (!Opnd1->isOrExpr() || Opnd1->getConstPart().isZero()) return false; if (!Opnd1->getValue()->hasOneUse()) @@ -1468,8 +1468,7 @@ Value *CV; // Step 3.1: Try simplifying "CurrOpnd ^ ConstOpnd" - if (!ConstOpnd.isNullValue() && - CombineXorOpnd(I, CurrOpnd, ConstOpnd, CV)) { + if (!ConstOpnd.isZero() && CombineXorOpnd(I, CurrOpnd, ConstOpnd, CV)) { Changed = true; if (CV) *CurrOpnd = XorOpnd(CV); @@ -1510,7 +1509,7 @@ ValueEntry VE(getRank(O.getValue()), O.getValue()); Ops.push_back(VE); } - if (!ConstOpnd.isNullValue()) { + if (!ConstOpnd.isZero()) { Value *C = ConstantInt::get(Ty, ConstOpnd); ValueEntry VE(getRank(C), C); Ops.push_back(VE); @@ -1519,7 +1518,7 @@ if (Sz == 1) return Ops.back().Op; if (Sz == 0) { - assert(ConstOpnd.isNullValue()); + assert(ConstOpnd.isZero()); return ConstantInt::get(Ty, ConstOpnd); } } diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp --- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -607,7 +607,7 @@ if (IndexOffset == 1) return C.Stride; // Common case 2: if (i' - i) is -1, Bump = -S. - if (IndexOffset.isAllOnesValue()) + if (IndexOffset.isAllOnes()) return Builder.CreateNeg(C.Stride); // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -3199,7 +3199,7 @@ Instruction *Result = CallInst::Create(F, Provider, "rev", I); InsertedInsts.push_back(Result); - if (!DemandedMask.isAllOnesValue()) { + if (!DemandedMask.isAllOnes()) { auto *Mask = ConstantInt::get(DemandedTy, DemandedMask); Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I); InsertedInsts.push_back(Result); diff --git a/llvm/unittests/IR/ConstantRangeTest.cpp b/llvm/unittests/IR/ConstantRangeTest.cpp --- a/llvm/unittests/IR/ConstantRangeTest.cpp +++ b/llvm/unittests/IR/ConstantRangeTest.cpp @@ -643,8 +643,8 @@ EnumerateConstantRanges(Bits, [&](const ConstantRange &CR) { unsigned MinBitWidth = CR.getActiveBits(); if (MinBitWidth == 0) { - EXPECT_TRUE(CR.isEmptySet() || (CR.isSingleElement() && - CR.getSingleElement()->isNullValue())); + EXPECT_TRUE(CR.isEmptySet() || + (CR.isSingleElement() && CR.getSingleElement()->isZero())); return; } if (MinBitWidth == Bits) @@ -1225,7 +1225,7 @@ return; // SignedMin / -1 is UB. - if (N1.isMinSignedValue() && N2.isAllOnesValue()) + if (N1.isMinSignedValue() && N2.isAllOnes()) return; APInt N = N1.sdiv(N2); @@ -1298,7 +1298,7 @@ return CR1.urem(CR2); }, [](const APInt &N1, const APInt &N2) -> Optional { - if (N2.isNullValue()) + if (N2.isZero()) return None; return N1.urem(N2); }); @@ -1372,7 +1372,7 @@ return CR1.srem(CR2); }, [](const APInt &N1, const APInt &N2) -> Optional { - if (N2.isNullValue()) + if (N2.isZero()) return None; return N1.srem(N2); }); diff --git a/llvm/unittests/IR/PatternMatch.cpp b/llvm/unittests/IR/PatternMatch.cpp --- a/llvm/unittests/IR/PatternMatch.cpp +++ b/llvm/unittests/IR/PatternMatch.cpp @@ -1092,29 +1092,29 @@ // We can always match simple constants and simple splats. C = nullptr; EXPECT_TRUE(match(ScalarZero, m_APInt(C))); - EXPECT_TRUE(C->isNullValue()); + EXPECT_TRUE(C->isZero()); C = nullptr; EXPECT_TRUE(match(ScalarZero, m_APIntForbidUndef(C))); - EXPECT_TRUE(C->isNullValue()); + EXPECT_TRUE(C->isZero()); C = nullptr; EXPECT_TRUE(match(ScalarZero, m_APIntAllowUndef(C))); - EXPECT_TRUE(C->isNullValue()); + EXPECT_TRUE(C->isZero()); C = nullptr; EXPECT_TRUE(match(VectorZero, m_APInt(C))); - EXPECT_TRUE(C->isNullValue()); + EXPECT_TRUE(C->isZero()); C = nullptr; EXPECT_TRUE(match(VectorZero, m_APIntForbidUndef(C))); - EXPECT_TRUE(C->isNullValue()); + EXPECT_TRUE(C->isZero()); C = nullptr; EXPECT_TRUE(match(VectorZero, m_APIntAllowUndef(C))); - EXPECT_TRUE(C->isNullValue()); + EXPECT_TRUE(C->isZero()); // Whether splats with undef can be matched depends on the matcher. EXPECT_FALSE(match(VectorZeroUndef, m_APInt(C))); EXPECT_FALSE(match(VectorZeroUndef, m_APIntForbidUndef(C))); C = nullptr; EXPECT_TRUE(match(VectorZeroUndef, m_APIntAllowUndef(C))); - EXPECT_TRUE(C->isNullValue()); + EXPECT_TRUE(C->isZero()); } TEST_F(PatternMatchTest, VectorUndefFloat) { @@ -1440,7 +1440,7 @@ namespace { struct is_unsigned_zero_pred { - bool isValue(const APInt &C) { return C.isNullValue(); } + bool isValue(const APInt &C) { return C.isZero(); } }; struct is_float_zero_pred { diff --git a/llvm/unittests/Support/KnownBitsTest.cpp b/llvm/unittests/Support/KnownBitsTest.cpp --- a/llvm/unittests/Support/KnownBitsTest.cpp +++ b/llvm/unittests/Support/KnownBitsTest.cpp @@ -166,7 +166,7 @@ KnownMulHU.One &= Res; KnownMulHU.Zero &= ~Res; - if (!N2.isNullValue()) { + if (!N2.isZero()) { Res = N1.udiv(N2); KnownUDiv.One &= Res; KnownUDiv.Zero &= ~Res; diff --git a/llvm/utils/TableGen/PredicateExpander.cpp b/llvm/utils/TableGen/PredicateExpander.cpp --- a/llvm/utils/TableGen/PredicateExpander.cpp +++ b/llvm/utils/TableGen/PredicateExpander.cpp @@ -470,7 +470,7 @@ increaseIndentLevel(); OS.indent(getIndentLevel() * 2); if (ShouldUpdateOpcodeMask) { - if (PI.OperandMask.isNullValue()) + if (PI.OperandMask.isZero()) OS << "Mask.clearAllBits();\n"; else OS << "Mask = " << PI.OperandMask << ";\n";