diff --git a/llvm/include/llvm/Analysis/Utils/Local.h b/llvm/include/llvm/Analysis/Utils/Local.h --- a/llvm/include/llvm/Analysis/Utils/Local.h +++ b/llvm/include/llvm/Analysis/Utils/Local.h @@ -63,7 +63,8 @@ // Splat the constant if needed. if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy()) - OpC = ConstantVector::getSplat(IntIdxTy->getVectorElementCount(), OpC); + OpC = ConstantVector::getSplat( + cast(IntIdxTy)->getElementCount(), OpC); Constant *Scale = ConstantInt::get(IntIdxTy, Size); Constant *OC = ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/); @@ -76,7 +77,8 @@ // Splat the index if needed. if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy()) - Op = Builder->CreateVectorSplat(IntIdxTy->getVectorNumElements(), Op); + Op = Builder->CreateVectorSplat( + cast(IntIdxTy)->getNumElements(), Op); // Convert to correct type. if (Op->getType() != IntIdxTy) diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -155,11 +155,11 @@ // If the element types match, IR can fold it. unsigned NumDstElt = DestVTy->getNumElements(); - unsigned NumSrcElt = C->getType()->getVectorNumElements(); + unsigned NumSrcElt = cast(C->getType())->getNumElements(); if (NumDstElt == NumSrcElt) return ConstantExpr::getBitCast(C, DestTy); - Type *SrcEltTy = C->getType()->getVectorElementType(); + Type *SrcEltTy = cast(C->getType())->getElementType(); Type *DstEltTy = DestVTy->getElementType(); // Otherwise, we're changing the number of elements in a vector, which @@ -218,7 +218,8 @@ for (unsigned j = 0; j != Ratio; ++j) { Constant *Src = C->getAggregateElement(SrcElt++); if (Src && isa(Src)) - Src = Constant::getNullValue(C->getType()->getVectorElementType()); + Src = Constant::getNullValue( + cast(C->getType())->getElementType()); else Src = dyn_cast_or_null(Src); if (!Src) // Reject constantexpr elements. @@ -469,8 +470,8 @@ NumElts = AT->getNumElements(); EltTy = AT->getElementType(); } else { - NumElts = C->getType()->getVectorNumElements(); - EltTy = C->getType()->getVectorElementType(); + NumElts = cast(C->getType())->getNumElements(); + EltTy = cast(C->getType())->getElementType(); } uint64_t EltSize = DL.getTypeAllocSize(EltTy); uint64_t Index = ByteOffset / EltSize; @@ -508,7 +509,7 @@ Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, const DataLayout &DL) { // Bail out early. Not expect to load from scalable global variable. - if (LoadTy->isVectorTy() && LoadTy->getVectorIsScalable()) + if (LoadTy->isVectorTy() && cast(LoadTy)->isScalable()) return nullptr; auto *PTy = cast(C->getType()); @@ -836,7 +837,7 @@ Type *ResElemTy = GEP->getResultElementType(); Type *ResTy = GEP->getType(); if (!SrcElemTy->isSized() || - (SrcElemTy->isVectorTy() && SrcElemTy->getVectorIsScalable())) + (SrcElemTy->isVectorTy() && cast(SrcElemTy)->isScalable())) return nullptr; if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, @@ -2571,7 +2572,7 @@ // Do not iterate on scalable vector. The number of elements is unknown at // compile-time. - if (VTy->getVectorIsScalable()) + if (VTy->isScalable()) return nullptr; if (IntrinsicID == Intrinsic::masked_load) { diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -945,8 +945,9 @@ // If any element of a constant divisor vector is zero or undef, the whole op // is undef. auto *Op1C = dyn_cast(Op1); - if (Op1C && Ty->isVectorTy()) { - unsigned NumElts = Ty->getVectorNumElements(); + auto *VTy = dyn_cast(Ty); + if (Op1C && VTy) { + unsigned NumElts = VTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = Op1C->getAggregateElement(i); if (Elt && (Elt->isNullValue() || isa(Elt))) @@ -1221,7 +1222,8 @@ // If all lanes of a vector shift are undefined the whole shift is. if (isa(C) || isa(C)) { - for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) + for (unsigned I = 0, E = cast(C->getType())->getNumElements(); + I != E; ++I) if (!isUndefShift(C->getAggregateElement(I))) return false; return true; @@ -4011,7 +4013,7 @@ Constant *TrueC, *FalseC; if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) && match(FalseVal, m_Constant(FalseC))) { - unsigned NumElts = TrueC->getType()->getVectorNumElements(); + unsigned NumElts = cast(TrueC->getType())->getNumElements(); SmallVector NewC; for (unsigned i = 0; i != NumElts; ++i) { // Bail out on incomplete vector constants. @@ -4081,7 +4083,7 @@ return UndefValue::get(GEPTy); bool IsScalableVec = - SrcTy->isVectorTy() ? SrcTy->getVectorIsScalable() : false; + isa(SrcTy) && cast(SrcTy)->isScalable(); if (Ops.size() == 2) { // getelementptr P, 0 -> P. @@ -4223,8 +4225,8 @@ // For fixed-length vector, fold into undef if index is out of bounds. if (auto *CI = dyn_cast(Idx)) { - if (!Vec->getType()->getVectorIsScalable() && - CI->uge(Vec->getType()->getVectorNumElements())) + if (!cast(Vec->getType())->isScalable() && + CI->uge(cast(Vec->getType())->getNumElements())) return UndefValue::get(Vec->getType()); } @@ -4280,6 +4282,7 @@ /// If not, this returns null. static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, unsigned) { + auto *VecVTy = cast(Vec->getType()); if (auto *CVec = dyn_cast(Vec)) { if (auto *CIdx = dyn_cast(Idx)) return ConstantFoldExtractElementInstruction(CVec, CIdx); @@ -4289,16 +4292,15 @@ return Splat; if (isa(Vec)) - return UndefValue::get(Vec->getType()->getVectorElementType()); + return UndefValue::get(VecVTy->getElementType()); } // If extracting a specified index from the vector, see if we can recursively // find a previously computed scalar that was inserted into the vector. if (auto *IdxC = dyn_cast(Idx)) { // For fixed-length vector, fold into undef if index is out of bounds. - if (!Vec->getType()->getVectorIsScalable() && - IdxC->getValue().uge(Vec->getType()->getVectorNumElements())) - return UndefValue::get(Vec->getType()->getVectorElementType()); + if (!VecVTy->isScalable() && IdxC->getValue().uge(VecVTy->getNumElements())) + return UndefValue::get(VecVTy->getElementType()); if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) return Elt; } @@ -4306,7 +4308,7 @@ // An undef extract index can be arbitrarily chosen to be an out-of-range // index value, which would result in the instruction being undef. if (isa(Idx)) - return UndefValue::get(Vec->getType()->getVectorElementType()); + return UndefValue::get(VecVTy->getElementType()); return nullptr; } @@ -4403,7 +4405,7 @@ return nullptr; // The mask value chooses which source operand we need to look at next. - int InVecNumElts = Op0->getType()->getVectorNumElements(); + int InVecNumElts = cast(Op0->getType())->getNumElements(); int RootElt = MaskVal; Value *SourceOp = Op0; if (MaskVal >= InVecNumElts) { @@ -4446,9 +4448,9 @@ if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; })) return UndefValue::get(RetTy); - Type *InVecTy = Op0->getType(); + auto *InVecTy = cast(Op0->getType()); unsigned MaskNumElts = Mask.size(); - ElementCount InVecEltCount = InVecTy->getVectorElementCount(); + ElementCount InVecEltCount = InVecTy->getElementCount(); bool Scalable = InVecEltCount.Scalable; diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -148,7 +148,8 @@ const DominatorTree *DT) { // For unsized types or scalable vectors we don't know exactly how many bytes // are dereferenced, so bail out. - if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable())) + if (!Ty->isSized() || + (Ty->isVectorTy() && cast(Ty)->isScalable())) return false; // When dereferenceability information is provided by a dereferenceable diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -650,7 +650,7 @@ return unknown(); if (I.getAllocatedType()->isVectorTy() && - I.getAllocatedType()->getVectorIsScalable()) + cast(I.getAllocatedType())->isScalable()) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -874,7 +874,7 @@ else if (!SI) return false; - SmallVector Mask(SI->getType()->getVectorNumElements(), -1); + SmallVector Mask(SI->getType()->getNumElements(), -1); // Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether // we look at the left or right side. @@ -1036,8 +1036,8 @@ if (!RD) return RK_None; - Type *VecTy = RdxStart->getType(); - unsigned NumVecElems = VecTy->getVectorNumElements(); + auto *VecTy = cast(RdxStart->getType()); + unsigned NumVecElems = VecTy->getNumElements(); if (!isPowerOf2_32(NumVecElems)) return RK_None; @@ -1101,8 +1101,8 @@ if (!RD) return RK_None; - Type *VecTy = ReduxRoot->getOperand(0)->getType(); - unsigned NumVecElems = VecTy->getVectorNumElements(); + auto *VecTy = cast(ReduxRoot->getOperand(0)->getType()); + unsigned NumVecElems = VecTy->getNumElements(); if (!isPowerOf2_32(NumVecElems)) return RK_None; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -168,11 +168,12 @@ APInt &DemandedLHS, APInt &DemandedRHS) { // The length of scalable vectors is unknown at compile time, thus we // cannot check their values - if (Shuf->getType()->getVectorElementCount().Scalable) + if (Shuf->getType()->isScalable()) return false; - int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements(); - int NumMaskElts = Shuf->getType()->getVectorNumElements(); + int NumElts = + cast(Shuf->getOperand(0)->getType())->getNumElements(); + int NumMaskElts = Shuf->getType()->getNumElements(); DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts); if (DemandedElts.isNullValue()) return true; @@ -206,9 +207,10 @@ static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Query &Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); computeKnownBits(V, DemandedElts, Known, Depth, Q); } @@ -373,9 +375,10 @@ static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, const Query &Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); return ComputeNumSignBits(V, DemandedElts, Depth, Q); } @@ -1791,7 +1794,7 @@ const Value *Vec = I->getOperand(0); const Value *Idx = I->getOperand(1); auto *CIdx = dyn_cast(Idx); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); @@ -1870,8 +1873,8 @@ Type *Ty = V->getType(); assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"); - assert(((Ty->isVectorTy() && - Ty->getVectorNumElements() == DemandedElts.getBitWidth()) || + assert(((Ty->isVectorTy() && cast(Ty)->getNumElements() == + DemandedElts.getBitWidth()) || (!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) && "Unexpected vector size"); @@ -2510,7 +2513,7 @@ const Value *Vec = EEI->getVectorOperand(); const Value *Idx = EEI->getIndexOperand(); auto *CIdx = dyn_cast(Idx); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); @@ -2524,9 +2527,10 @@ bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); return isKnownNonZero(V, DemandedElts, Depth, Q); } @@ -2627,7 +2631,7 @@ return 0; unsigned MinSignBits = TyBits; - unsigned NumElts = CV->getType()->getVectorNumElements(); + unsigned NumElts = cast(CV->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { if (!DemandedElts[i]) continue; @@ -2670,8 +2674,8 @@ // same behavior for poison though -- that's a FIXME today. Type *Ty = V->getType(); - assert(((Ty->isVectorTy() && - Ty->getVectorNumElements() == DemandedElts.getBitWidth()) || + assert(((Ty->isVectorTy() && cast(Ty)->getNumElements() == + DemandedElts.getBitWidth()) || (!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) && "Unexpected vector size"); @@ -3246,8 +3250,8 @@ // Handle vector of constants. if (auto *CV = dyn_cast(V)) { - if (CV->getType()->isVectorTy()) { - unsigned NumElts = CV->getType()->getVectorNumElements(); + if (auto *CVVTy = dyn_cast(CV->getType())) { + unsigned NumElts = CVVTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { auto *CFP = dyn_cast_or_null(CV->getAggregateElement(i)); if (!CFP) @@ -3423,7 +3427,7 @@ return false; // For vectors, verify that each element is not infinity. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = cast(V)->getAggregateElement(i); if (!Elt) @@ -3524,7 +3528,7 @@ return false; // For vectors, verify that each element is not NaN. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = cast(V)->getAggregateElement(i); if (!Elt) diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -263,7 +263,7 @@ assert(V->getType()->isVectorTy() && "Not looking at a vector?"); VectorType *VTy = cast(V->getType()); // For fixed-length vector, return undef for out of range access. - if (!V->getType()->getVectorIsScalable()) { + if (!VTy->isScalable()) { unsigned Width = VTy->getNumElements(); if (EltNo >= Width) return UndefValue::get(VTy->getElementType()); @@ -289,7 +289,8 @@ } if (ShuffleVectorInst *SVI = dyn_cast(V)) { - unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements(); + unsigned LHSWidth = + cast(SVI->getOperand(0)->getType())->getNumElements(); int InEl = SVI->getMaskValue(EltNo); if (InEl < 0) return UndefValue::get(VTy->getElementType()); @@ -805,8 +806,9 @@ return false; if (ConstMask->isNullValue() || isa(ConstMask)) return true; - for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; - ++I) { + for (unsigned I = 0, + E = cast(ConstMask->getType())->getNumElements(); + I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isNullValue() || isa(MaskElt)) continue; @@ -822,8 +824,9 @@ return false; if (ConstMask->isAllOnesValue() || isa(ConstMask)) return true; - for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; - ++I) { + for (unsigned I = 0, + E = cast(ConstMask->getType())->getNumElements(); + I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isAllOnesValue() || isa(MaskElt)) continue;