diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h --- a/llvm/include/llvm/IR/Constants.h +++ b/llvm/include/llvm/IR/Constants.h @@ -803,8 +803,8 @@ /// Specialize the getType() method to always return a VectorType, /// which reduces the amount of casting needed in parts of the compiler. - inline VectorType *getType() const { - return cast(Value::getType()); + inline FixedVectorType *getType() const { + return cast(Value::getType()); } /// Methods for support type inquiry through isa, cast, and dyn_cast: diff --git a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h --- a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h +++ b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h @@ -83,7 +83,7 @@ if (isa(VTy)) NumElements = Unbounded; else - NumElements = VTy->getNumElements(); + NumElements = cast(VTy)->getNumElements(); } else CurTy = dyn_cast(Ty); ++OpIt; diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -1992,7 +1992,7 @@ /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> bool changesLength() const { unsigned NumSourceElts = - cast(Op<0>()->getType())->getNumElements(); + cast(Op<0>()->getType())->getNumElements(); unsigned NumMaskElts = ShuffleMask.size(); return NumSourceElts != NumMaskElts; } @@ -2002,7 +2002,7 @@ /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> bool increasesLength() const { unsigned NumSourceElts = - cast(Op<0>()->getType())->getNumElements(); + cast(Op<0>()->getType())->getNumElements(); unsigned NumMaskElts = ShuffleMask.size(); return NumSourceElts < NumMaskElts; } @@ -2195,7 +2195,8 @@ /// Return true if this shuffle mask is an extract subvector mask. bool isExtractSubvectorMask(int &Index) const { - int NumSrcElts = cast(Op<0>()->getType())->getNumElements(); + int NumSrcElts = + cast(Op<0>()->getType())->getNumElements(); return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); } diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -275,7 +275,8 @@ return this->isValue(CI->getValue()); // Non-splat vector constant: check each element for a match. - unsigned NumElts = cast(V->getType())->getNumElements(); + unsigned NumElts = + cast(V->getType())->getNumElements(); assert(NumElts != 0 && "Constant vector with no elements?"); bool HasNonUndefElements = false; for (unsigned i = 0; i != NumElts; ++i) { @@ -334,7 +335,8 @@ return this->isValue(CF->getValueAPF()); // Non-splat vector constant: check each element for a match. - unsigned NumElts = cast(V->getType())->getNumElements(); + unsigned NumElts = + cast(V->getType())->getNumElements(); assert(NumElts != 0 && "Constant vector with no elements?"); bool HasNonUndefElements = false; for (unsigned i = 0; i != NumElts; ++i) { diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -1506,7 +1506,7 @@ } if (isa(CV) || isa(CV)) { - auto *CVVTy = cast(CV->getType()); + auto *CVVTy = cast(CV->getType()); Type *ETy = CVVTy->getElementType(); Out << '<'; TypePrinter.print(ETy, Out); diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -899,7 +899,7 @@ // to byte shuffles. static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift) { - auto *ResultTy = cast(Op->getType()); + auto *ResultTy = cast(Op->getType()); unsigned NumElts = ResultTy->getNumElements() * 8; // Bitcast from a 64-bit element type to a byte element type. @@ -933,7 +933,7 @@ // to byte shuffles. static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift) { - auto *ResultTy = cast(Op->getType()); + auto *ResultTy = cast(Op->getType()); unsigned NumElts = ResultTy->getNumElements() * 8; // Bitcast from a 64-bit element type to a byte element type. @@ -991,7 +991,7 @@ return Op0; Mask = getX86MaskVec(Builder, Mask, - cast(Op0->getType())->getNumElements()); + cast(Op0->getType())->getNumElements()); return Builder.CreateSelect(Mask, Op0, Op1); } @@ -1019,7 +1019,7 @@ bool IsVALIGN) { unsigned ShiftVal = cast(Shift)->getZExtValue(); - unsigned NumElts = cast(Op0->getType())->getNumElements(); + unsigned NumElts = cast(Op0->getType())->getNumElements(); assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); @@ -1150,7 +1150,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = cast(Ty)->getNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = Builder.CreateVectorSplat(NumElts, Amt); } @@ -1220,7 +1220,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = cast(Ty)->getNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = Builder.CreateVectorSplat(NumElts, Amt); } @@ -1257,7 +1257,7 @@ return Builder.CreateAlignedStore(Data, Ptr, Alignment); // Convert the mask from an integer type to a vector of i1. - unsigned NumElts = cast(Data->getType())->getNumElements(); + unsigned NumElts = cast(Data->getType())->getNumElements(); Mask = getX86MaskVec(Builder, Mask, NumElts); return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask); } @@ -1280,7 +1280,8 @@ return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment); // Convert the mask from an integer type to a vector of i1. - unsigned NumElts = cast(Passthru->getType())->getNumElements(); + unsigned NumElts = + cast(Passthru->getType())->getNumElements(); Mask = getX86MaskVec(Builder, Mask, NumElts); return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru); } @@ -1344,7 +1345,7 @@ // Applying mask on vector of i1's and make sure result is at least 8 bits wide. static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask) { - unsigned NumElts = cast(Vec->getType())->getNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); if (Mask) { const auto *C = dyn_cast(Mask); if (!C || !C->isAllOnesValue()) @@ -1367,7 +1368,7 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, unsigned CC, bool Signed) { Value *Op0 = CI.getArgOperand(0); - unsigned NumElts = cast(Op0->getType())->getNumElements(); + unsigned NumElts = cast(Op0->getType())->getNumElements(); Value *Cmp; if (CC == 3) { @@ -1420,7 +1421,7 @@ static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { Value* Op = CI.getArgOperand(0); Type* ReturnOp = CI.getType(); - unsigned NumElts = cast(CI.getType())->getNumElements(); + unsigned NumElts = cast(CI.getType())->getNumElements(); Value *Mask = getX86MaskVec(Builder, Op, NumElts); return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); } @@ -1869,8 +1870,8 @@ Rep = Builder.CreateICmp(Pred, Rep, Zero); Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ - unsigned NumElts = - cast(CI->getArgOperand(1)->getType())->getNumElements(); + unsigned NumElts = cast(CI->getArgOperand(1)->getType()) + ->getNumElements(); Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, CI->getArgOperand(1)); @@ -2120,9 +2121,9 @@ Name == "avx.cvt.ps2.pd.256" || Name == "avx512.mask.cvtps2pd.128" || Name == "avx512.mask.cvtps2pd.256")) { - auto *DstTy = cast(CI->getType()); + auto *DstTy = cast(CI->getType()); Rep = CI->getArgOperand(0); - auto *SrcTy = cast(Rep->getType()); + auto *SrcTy = cast(Rep->getType()); unsigned NumDstElts = DstTy->getNumElements(); if (NumDstElts < SrcTy->getNumElements()) { @@ -2152,9 +2153,9 @@ CI->getArgOperand(1)); } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") || Name.startswith("vcvtph2ps."))) { - auto *DstTy = cast(CI->getType()); + auto *DstTy = cast(CI->getType()); Rep = CI->getArgOperand(0); - auto *SrcTy = cast(Rep->getType()); + auto *SrcTy = cast(Rep->getType()); unsigned NumDstElts = DstTy->getNumElements(); if (NumDstElts != SrcTy->getNumElements()) { assert(NumDstElts == 4 && "Unexpected vector size"); @@ -2175,7 +2176,7 @@ CI->getArgOperand(1),CI->getArgOperand(2), /*Aligned*/true); } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { - auto *ResultTy = cast(CI->getType()); + auto *ResultTy = cast(CI->getType()); Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. @@ -2190,7 +2191,7 @@ ResultTy); Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { - auto *ResultTy = cast(CI->getArgOperand(1)->getType()); + auto *ResultTy = cast(CI->getArgOperand(1)->getType()); Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. @@ -2206,7 +2207,7 @@ Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); } else if (IsX86 && (Name.startswith("avx512.mask.compress.") || Name.startswith("avx512.mask.expand."))) { - auto *ResultTy = cast(CI->getType()); + auto *ResultTy = cast(CI->getType()); Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), ResultTy->getNumElements()); @@ -2286,7 +2287,7 @@ } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || Name.startswith("avx512.vbroadcast.s"))) { // Replace broadcasts with a series of insertelements. - auto *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); Type *EltTy = VecTy->getElementType(); unsigned EltNum = VecTy->getNumElements(); Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), @@ -2303,8 +2304,8 @@ Name.startswith("avx2.pmovzx") || Name.startswith("avx512.mask.pmovsx") || Name.startswith("avx512.mask.pmovzx"))) { - VectorType *SrcTy = cast(CI->getArgOperand(0)->getType()); - VectorType *DstTy = cast(CI->getType()); + auto *SrcTy = cast(CI->getArgOperand(0)->getType()); + auto *DstTy = cast(CI->getType()); unsigned NumDstElts = DstTy->getNumElements(); // Extract a subvector of the first NumDstElts lanes and sign/zero extend. @@ -2371,8 +2372,10 @@ }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || Name.startswith("avx512.mask.broadcasti"))) { unsigned NumSrcElts = - cast(CI->getArgOperand(0)->getType())->getNumElements(); - unsigned NumDstElts = cast(CI->getType())->getNumElements(); + cast(CI->getArgOperand(0)->getType()) + ->getNumElements(); + unsigned NumDstElts = + cast(CI->getType())->getNumElements(); SmallVector ShuffleMask(NumDstElts); for (unsigned i = 0; i != NumDstElts; ++i) @@ -2461,7 +2464,7 @@ Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); unsigned Imm = cast (CI->getArgOperand(2))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); SmallVector Idxs(NumElts); @@ -2475,8 +2478,10 @@ Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); unsigned Imm = cast(CI->getArgOperand(2))->getZExtValue(); - unsigned DstNumElts = cast(CI->getType())->getNumElements(); - unsigned SrcNumElts = cast(Op1->getType())->getNumElements(); + unsigned DstNumElts = + cast(CI->getType())->getNumElements(); + unsigned SrcNumElts = + cast(Op1->getType())->getNumElements(); unsigned Scale = DstNumElts / SrcNumElts; // Mask off the high bits of the immediate value; hardware ignores those. @@ -2519,8 +2524,10 @@ Name.startswith("avx512.mask.vextract"))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - unsigned DstNumElts = cast(CI->getType())->getNumElements(); - unsigned SrcNumElts = cast(Op0->getType())->getNumElements(); + unsigned DstNumElts = + cast(CI->getType())->getNumElements(); + unsigned SrcNumElts = + cast(Op0->getType())->getNumElements(); unsigned Scale = SrcNumElts / DstNumElts; // Mask off the high bits of the immediate value; hardware ignores those. @@ -2543,7 +2550,7 @@ Name.startswith("avx512.mask.perm.di."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); SmallVector Idxs(NumElts); @@ -2567,7 +2574,7 @@ uint8_t Imm = cast(CI->getArgOperand(2))->getZExtValue(); - unsigned NumElts = cast(CI->getType())->getNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); unsigned HalfSize = NumElts / 2; SmallVector ShuffleMask(NumElts); @@ -2597,7 +2604,7 @@ Name.startswith("avx512.mask.pshuf.d."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); // Calculate the size of each index in the immediate. unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); @@ -2619,7 +2626,7 @@ Name.startswith("avx512.mask.pshufl.w."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - unsigned NumElts = cast(CI->getType())->getNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); SmallVector Idxs(NumElts); for (unsigned l = 0; l != NumElts; l += 8) { @@ -2638,7 +2645,7 @@ Name.startswith("avx512.mask.pshufh.w."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - unsigned NumElts = cast(CI->getType())->getNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); SmallVector Idxs(NumElts); for (unsigned l = 0; l != NumElts; l += 8) { @@ -2657,7 +2664,7 @@ Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); unsigned Imm = cast(CI->getArgOperand(2))->getZExtValue(); - unsigned NumElts = cast(CI->getType())->getNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); unsigned HalfLaneElts = NumLaneElts / 2; @@ -2682,7 +2689,7 @@ Name.startswith("avx512.mask.movshdup") || Name.startswith("avx512.mask.movsldup"))) { Value *Op0 = CI->getArgOperand(0); - unsigned NumElts = cast(CI->getType())->getNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); unsigned Offset = 0; @@ -2704,7 +2711,7 @@ Name.startswith("avx512.mask.unpckl."))) { Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); - int NumElts = cast(CI->getType())->getNumElements(); + int NumElts = cast(CI->getType())->getNumElements(); int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); SmallVector Idxs(NumElts); @@ -2720,7 +2727,7 @@ Name.startswith("avx512.mask.unpckh."))) { Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); - int NumElts = cast(CI->getType())->getNumElements(); + int NumElts = cast(CI->getType())->getNumElements(); int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); SmallVector Idxs(NumElts); @@ -3288,7 +3295,7 @@ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), Ops); } else { - int NumElts = cast(CI->getType())->getNumElements(); + int NumElts = cast(CI->getType())->getNumElements(); Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2) }; diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -55,8 +55,8 @@ // If this cast changes element count then we can't handle it here: // doing so requires endianness information. This should be handled by // Analysis/ConstantFolding.cpp - unsigned NumElts = DstTy->getNumElements(); - if (NumElts != cast(CV->getType())->getNumElements()) + unsigned NumElts = cast(DstTy)->getNumElements(); + if (NumElts != cast(CV->getType())->getNumElements()) return nullptr; Type *DstEltTy = DstTy->getElementType(); @@ -573,8 +573,8 @@ // count may be mismatched; don't attempt to handle that here. if ((isa(V) || isa(V)) && DestTy->isVectorTy() && - cast(DestTy)->getNumElements() == - cast(V->getType())->getNumElements()) { + cast(DestTy)->getNumElements() == + cast(V->getType())->getNumElements()) { VectorType *DestVecTy = cast(DestTy); Type *DstEltTy = DestVecTy->getElementType(); // Fast path for splatted constants. @@ -585,7 +585,8 @@ } SmallVector res; Type *Ty = IntegerType::get(V->getContext(), 32); - for (unsigned i = 0, e = cast(V->getType())->getNumElements(); + for (unsigned i = 0, + e = cast(V->getType())->getNumElements(); i != e; ++i) { Constant *C = ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i)); @@ -748,7 +749,7 @@ // If the condition is a vector constant, fold the result elementwise. if (ConstantVector *CondV = dyn_cast(Cond)) { - auto *V1VTy = CondV->getType(); + auto *V1VTy = cast(CondV->getType()); SmallVector Result; Type *Ty = IntegerType::get(CondV->getContext(), 32); for (unsigned i = 0, e = V1VTy->getNumElements(); i != e; ++i) { @@ -798,7 +799,7 @@ Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx) { - auto *ValVTy = cast(Val->getType()); + auto *ValVTy = cast(Val->getType()); // extractelt undef, C -> undef // extractelt C, undef -> undef @@ -851,7 +852,7 @@ if (isa(ValTy)) return nullptr; - unsigned NumElts = cast(Val->getType())->getNumElements(); + unsigned NumElts = cast(Val->getType())->getNumElements(); if (CIdx->uge(NumElts)) return UndefValue::get(Val->getType()); @@ -898,7 +899,7 @@ if (isa(V1VTy)) return nullptr; - unsigned SrcNumElts = V1VTy->getNumElements(); + unsigned SrcNumElts = cast(V1VTy)->getNumElements(); // Loop over the shuffle mask, evaluating each element. SmallVector Result; @@ -1012,7 +1013,8 @@ // Fold each element and create a vector constant from those constants. SmallVector Result; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; ++i) { Constant *ExtractIdx = ConstantInt::get(Ty, i); Constant *Elt = ConstantExpr::getExtractElement(C, ExtractIdx); @@ -1385,7 +1387,8 @@ // Fold each element and create a vector constant from those constants. SmallVector Result; Type *Ty = IntegerType::get(VTy->getContext(), 32); - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; ++i) { Constant *ExtractIdx = ConstantInt::get(Ty, i); Constant *LHS = ConstantExpr::getExtractElement(C1, ExtractIdx); Constant *RHS = ConstantExpr::getExtractElement(C2, ExtractIdx); @@ -2013,7 +2016,8 @@ SmallVector ResElts; Type *Ty = IntegerType::get(C1->getContext(), 32); // Compare the elements, producing an i1 result or constant expr. - for (unsigned i = 0, e = C1VTy->getNumElements(); i != e; ++i) { + for (unsigned i = 0, e = cast(C1VTy)->getNumElements(); + i != e; ++i) { Constant *C1E = ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i)); Constant *C2E = @@ -2286,13 +2290,13 @@ Type *OrigGEPTy = PointerType::get(Ty, PtrTy->getAddressSpace()); Type *GEPTy = PointerType::get(Ty, PtrTy->getAddressSpace()); if (VectorType *VT = dyn_cast(C->getType())) - GEPTy = VectorType::get(OrigGEPTy, VT->getNumElements()); + GEPTy = VectorType::get(OrigGEPTy, VT); // The GEP returns a vector of pointers when one of more of // its arguments is a vector. for (unsigned i = 0, e = Idxs.size(); i != e; ++i) { if (auto *VT = dyn_cast(Idxs[i]->getType())) { - GEPTy = VectorType::get(OrigGEPTy, VT->getNumElements()); + GEPTy = VectorType::get(OrigGEPTy, VT); break; } } @@ -2499,19 +2503,19 @@ if (!IsCurrIdxVector && IsPrevIdxVector) CurrIdx = ConstantDataVector::getSplat( - cast(PrevIdx->getType())->getNumElements(), CurrIdx); + cast(PrevIdx->getType())->getNumElements(), CurrIdx); if (!IsPrevIdxVector && IsCurrIdxVector) PrevIdx = ConstantDataVector::getSplat( - cast(CurrIdx->getType())->getNumElements(), PrevIdx); + cast(CurrIdx->getType())->getNumElements(), PrevIdx); Constant *Factor = ConstantInt::get(CurrIdx->getType()->getScalarType(), NumElements); if (UseVector) Factor = ConstantDataVector::getSplat( IsPrevIdxVector - ? cast(PrevIdx->getType())->getNumElements() - : cast(CurrIdx->getType())->getNumElements(), + ? cast(PrevIdx->getType())->getNumElements() + : cast(CurrIdx->getType())->getNumElements(), Factor); NewIdxs[i] = ConstantExpr::getSRem(CurrIdx, Factor); @@ -2528,10 +2532,8 @@ Type *ExtendedTy = Type::getIntNTy(Div->getContext(), CommonExtendedWidth); if (UseVector) ExtendedTy = VectorType::get( - ExtendedTy, - IsPrevIdxVector - ? cast(PrevIdx->getType())->getNumElements() - : cast(CurrIdx->getType())->getNumElements()); + ExtendedTy, IsPrevIdxVector ? cast(PrevIdx->getType()) + : cast(CurrIdx->getType())); if (!PrevIdx->getType()->isIntOrIntVectorTy(CommonExtendedWidth)) PrevIdx = ConstantExpr::getSExt(PrevIdx, ExtendedTy); diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -161,7 +161,7 @@ // Check that vectors don't contain 1 if (auto *VTy = dyn_cast(this->getType())) { - unsigned NumElts = VTy->getNumElements(); + unsigned NumElts = cast(VTy)->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = this->getAggregateElement(i); if (!Elt || !Elt->isNotOneValue()) @@ -211,7 +211,7 @@ // Check that vectors don't contain INT_MIN if (auto *VTy = dyn_cast(this->getType())) { - unsigned NumElts = VTy->getNumElements(); + unsigned NumElts = cast(VTy)->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = this->getAggregateElement(i); if (!Elt || !Elt->isNotMinSignedValue()) @@ -230,7 +230,8 @@ auto *VTy = dyn_cast(getType()); if (!VTy) return false; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { + for (unsigned i = 0, e = cast(VTy)->getNumElements(); i != e; + ++i) { auto *CFP = dyn_cast_or_null(this->getAggregateElement(i)); if (!CFP || !CFP->getValueAPF().isFiniteNonZero()) return false; @@ -306,7 +307,8 @@ bool Constant::containsUndefElement() const { if (auto *VTy = dyn_cast(getType())) { - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; ++i) if (isa(getAggregateElement(i))) return true; } @@ -316,7 +318,8 @@ bool Constant::containsConstantExpression() const { if (auto *VTy = dyn_cast(getType())) { - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; ++i) if (isa(getAggregateElement(i))) return true; } @@ -954,7 +957,7 @@ Type *Ty = getType(); if (auto *AT = dyn_cast(Ty)) return AT->getNumElements(); - if (auto *VT = dyn_cast(Ty)) + if (auto *VT = dyn_cast(Ty)) return VT->getNumElements(); return Ty->getStructNumElements(); } @@ -989,7 +992,7 @@ Type *Ty = getType(); if (auto *AT = dyn_cast(Ty)) return AT->getNumElements(); - if (auto *VT = dyn_cast(Ty)) + if (auto *VT = dyn_cast(Ty)) return VT->getNumElements(); return Ty->getStructNumElements(); } @@ -1172,7 +1175,7 @@ ConstantVector::ConstantVector(VectorType *T, ArrayRef V) : ConstantAggregate(T, ConstantVectorVal, V) { - assert(V.size() == T->getNumElements() && + assert(V.size() == cast(T)->getNumElements() && "Invalid initializer for constant vector"); } @@ -1897,8 +1900,8 @@ "PtrToInt destination must be integer or integer vector"); assert(isa(C->getType()) == isa(DstTy)); if (isa(C->getType())) - assert(cast(C->getType())->getNumElements() == - cast(DstTy)->getNumElements() && + assert(cast(C->getType())->getElementCount() == + cast(DstTy)->getElementCount() && "Invalid cast between a different number of vector elements"); return getFoldedCast(Instruction::PtrToInt, C, DstTy, OnlyIfReduced); } @@ -1911,8 +1914,8 @@ "IntToPtr destination must be a pointer or pointer vector"); assert(isa(C->getType()) == isa(DstTy)); if (isa(C->getType())) - assert(cast(C->getType())->getNumElements() == - cast(DstTy)->getNumElements() && + assert(cast(C->getType())->getElementCount() == + cast(DstTy)->getElementCount() && "Invalid cast between a different number of vector elements"); return getFoldedCast(Instruction::IntToPtr, C, DstTy, OnlyIfReduced); } @@ -1943,7 +1946,7 @@ Type *MidTy = PointerType::get(DstElemTy, SrcScalarTy->getAddressSpace()); if (VectorType *VT = dyn_cast(DstTy)) { // Handle vectors of pointers. - MidTy = VectorType::get(MidTy, VT->getNumElements()); + MidTy = VectorType::get(MidTy, VT); } C = getBitCast(C, MidTy); } @@ -2579,7 +2582,7 @@ unsigned ConstantDataSequential::getNumElements() const { if (ArrayType *AT = dyn_cast(getType())) return AT->getNumElements(); - return cast(getType())->getNumElements(); + return cast(getType())->getNumElements(); } diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -557,9 +557,9 @@ } else if (AlignType == VECTOR_ALIGN) { // By default, use natural alignment for vector types. This is consistent // with what clang and llvm-gcc do. - unsigned Alignment = - getTypeAllocSize(cast(Ty)->getElementType()); - Alignment *= cast(Ty)->getNumElements(); + auto *FVTy = cast(Ty); + unsigned Alignment = getTypeAllocSize(FVTy->getElementType()); + Alignment *= FVTy->getNumElements(); Alignment = PowerOf2Ceil(Alignment); return Align(Alignment); } @@ -788,7 +788,7 @@ unsigned NumBits = getPointerTypeSizeInBits(Ty); IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); if (VectorType *VecTy = dyn_cast(Ty)) - return VectorType::get(IntTy, VecTy->getNumElements()); + return VectorType::get(IntTy, VecTy); return IntTy; } @@ -810,7 +810,7 @@ unsigned NumBits = getIndexTypeSizeInBits(Ty); IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); if (VectorType *VecTy = dyn_cast(Ty)) - return VectorType::get(IntTy, VecTy->getNumElements()); + return VectorType::get(IntTy, VecTy); return IntTy; } diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -1075,7 +1075,9 @@ return Tys[D.getOverloadArgNumber()]; case IITDescriptor::ScalableVecArgument: { auto *Ty = cast(DecodeFixedType(Infos, Tys, Context)); - return VectorType::get(Ty->getElementType(), {Ty->getNumElements(), true}); + // FIXME: will Ty ever not have getElementCount().Scalable == false? + return VectorType::get(Ty->getElementType(), + {Ty->getElementCount().Min, true}); } } llvm_unreachable("unhandled"); @@ -1180,7 +1182,8 @@ case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width); case IITDescriptor::Vector: { VectorType *VT = dyn_cast(Ty); - return !VT || VT->getNumElements() != D.Vector_Width || + return !VT || + cast(VT)->getNumElements() != D.Vector_Width || matchIntrinsicType(VT->getElementType(), Infos, ArgTys, DeferredChecks, IsDeferredCheck); } @@ -1325,7 +1328,7 @@ VectorType *ReferenceType = dyn_cast(ArgTys[RefArgNumber]); VectorType *ThisArgVecTy = dyn_cast(Ty); if (!ThisArgVecTy || !ReferenceType || - (ReferenceType->getNumElements() != ThisArgVecTy->getNumElements())) + (ReferenceType->getElementCount() != ThisArgVecTy->getElementCount())) return true; PointerType *ThisArgEltTy = dyn_cast(ThisArgVecTy->getElementType()); diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -524,12 +524,11 @@ const Twine &Name) { auto PtrsTy = cast(Ptrs->getType()); auto PtrTy = cast(PtrsTy->getElementType()); - unsigned NumElts = PtrsTy->getNumElements(); - Type *DataTy = VectorType::get(PtrTy->getElementType(), NumElts); + Type *DataTy = VectorType::get(PtrTy->getElementType(), PtrsTy); if (!Mask) - Mask = Constant::getAllOnesValue(VectorType::get(Type::getInt1Ty(Context), - NumElts)); + Mask = Constant::getAllOnesValue( + VectorType::get(Type::getInt1Ty(Context), PtrsTy)); if (!PassThru) PassThru = UndefValue::get(DataTy); @@ -554,18 +553,17 @@ Align Alignment, Value *Mask) { auto PtrsTy = cast(Ptrs->getType()); auto DataTy = cast(Data->getType()); - unsigned NumElts = PtrsTy->getNumElements(); #ifndef NDEBUG auto PtrTy = cast(PtrsTy->getElementType()); - assert(NumElts == DataTy->getNumElements() && + assert(PtrsTy->getElementCount() == DataTy->getElementCount() && PtrTy->getElementType() == DataTy->getElementType() && "Incompatible pointer and data types"); #endif if (!Mask) - Mask = Constant::getAllOnesValue(VectorType::get(Type::getInt1Ty(Context), - NumElts)); + Mask = Constant::getAllOnesValue( + VectorType::get(Type::getInt1Ty(Context), PtrsTy)); Type *OverloadedTypes[] = {DataTy, PtrsTy}; Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask}; diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -90,7 +90,7 @@ VectorType *ET = dyn_cast(Op1->getType()); if (!ET) return "selected values for vector select must be vectors"; - if (ET->getNumElements() != VT->getNumElements()) + if (ET->getElementCount() != VT->getElementCount()) return "vector select requires selected vectors to have " "the same vector length as select condition"; } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) { @@ -1907,7 +1907,7 @@ } void ShuffleVectorInst::commute() { - int NumOpElts = cast(Op<0>()->getType())->getNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); int NumMaskElts = ShuffleMask.size(); SmallVector NewMask(NumMaskElts); for (int i = 0; i != NumMaskElts; ++i) { @@ -1931,7 +1931,7 @@ return false; // Make sure the mask elements make sense. - int V1Size = cast(V1->getType())->getNumElements(); + int V1Size = cast(V1->getType())->getNumElements(); for (int Elem : Mask) if (Elem != UndefMaskElem && Elem >= V1Size * 2) return false; @@ -1961,7 +1961,7 @@ return true; if (const auto *MV = dyn_cast(Mask)) { - unsigned V1Size = cast(V1->getType())->getNumElements(); + unsigned V1Size = cast(V1->getType())->getNumElements(); for (Value *Op : MV->operands()) { if (auto *CI = dyn_cast(Op)) { if (CI->uge(V1Size*2)) @@ -1974,8 +1974,9 @@ } if (const auto *CDS = dyn_cast(Mask)) { - unsigned V1Size = cast(V1->getType())->getNumElements(); - for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) + unsigned V1Size = cast(V1->getType())->getNumElements(); + for (unsigned i = 0, e = cast(MaskTy)->getNumElements(); + i != e; ++i) if (CDS->getElementAsInteger(i) >= V1Size*2) return false; return true; @@ -2171,8 +2172,8 @@ } bool ShuffleVectorInst::isIdentityWithPadding() const { - int NumOpElts = cast(Op<0>()->getType())->getNumElements(); - int NumMaskElts = cast(getType())->getNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); + int NumMaskElts = cast(getType())->getNumElements(); if (NumMaskElts <= NumOpElts) return false; @@ -2190,8 +2191,8 @@ } bool ShuffleVectorInst::isIdentityWithExtract() const { - int NumOpElts = cast(Op<0>()->getType())->getNumElements(); - int NumMaskElts = getType()->getNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); + int NumMaskElts = cast(getType())->getNumElements(); if (NumMaskElts >= NumOpElts) return false; @@ -2203,8 +2204,8 @@ if (isa(Op<0>()) || isa(Op<1>())) return false; - int NumOpElts = cast(Op<0>()->getType())->getNumElements(); - int NumMaskElts = getType()->getNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); + int NumMaskElts = cast(getType())->getNumElements(); if (NumMaskElts != NumOpElts * 2) return false; @@ -2945,8 +2946,8 @@ "Invalid cast"); assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); assert((!Ty->isVectorTy() || - cast(Ty)->getNumElements() == - cast(S->getType())->getNumElements()) && + cast(Ty)->getElementCount() == + cast(S->getType())->getElementCount()) && "Invalid cast"); if (Ty->isIntOrIntVectorTy()) @@ -2964,8 +2965,8 @@ "Invalid cast"); assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); assert((!Ty->isVectorTy() || - cast(Ty)->getNumElements() == - cast(S->getType())->getNumElements()) && + cast(Ty)->getElementCount() == + cast(S->getType())->getElementCount()) && "Invalid cast"); if (Ty->isIntOrIntVectorTy()) @@ -3076,7 +3077,7 @@ if (VectorType *SrcVecTy = dyn_cast(SrcTy)) if (VectorType *DestVecTy = dyn_cast(DestTy)) - if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { + if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { // An element by element cast. Valid if casting the elements is valid. SrcTy = SrcVecTy->getElementType(); DestTy = DestVecTy->getElementType(); @@ -3198,7 +3199,7 @@ // FIXME: Check address space sizes here if (VectorType *SrcVecTy = dyn_cast(SrcTy)) if (VectorType *DestVecTy = dyn_cast(DestTy)) - if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { + if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { // An element by element cast. Find the appropriate opcode based on the // element types. SrcTy = SrcVecTy->getElementType(); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2683,8 +2683,8 @@ &I); if (SrcVec && DstVec) - Assert(cast(SrcTy)->getNumElements() == - cast(DestTy)->getNumElements(), + Assert(cast(SrcTy)->getElementCount() == + cast(DestTy)->getElementCount(), "UIToFP source and dest vector length mismatch", &I); visitInstruction(I); @@ -2706,8 +2706,8 @@ &I); if (SrcVec && DstVec) - Assert(cast(SrcTy)->getNumElements() == - cast(DestTy)->getNumElements(), + Assert(cast(SrcTy)->getElementCount() == + cast(DestTy)->getElementCount(), "SIToFP source and dest vector length mismatch", &I); visitInstruction(I); @@ -2729,8 +2729,8 @@ "FPToUI result must be integer or integer vector", &I); if (SrcVec && DstVec) - Assert(cast(SrcTy)->getNumElements() == - cast(DestTy)->getNumElements(), + Assert(cast(SrcTy)->getElementCount() == + cast(DestTy)->getElementCount(), "FPToUI source and dest vector length mismatch", &I); visitInstruction(I); @@ -2752,8 +2752,8 @@ "FPToSI result must be integer or integer vector", &I); if (SrcVec && DstVec) - Assert(cast(SrcTy)->getNumElements() == - cast(DestTy)->getNumElements(), + Assert(cast(SrcTy)->getElementCount() == + cast(DestTy)->getElementCount(), "FPToSI source and dest vector length mismatch", &I); visitInstruction(I); @@ -2777,7 +2777,7 @@ if (SrcTy->isVectorTy()) { VectorType *VSrc = cast(SrcTy); VectorType *VDest = cast(DestTy); - Assert(VSrc->getNumElements() == VDest->getNumElements(), + Assert(VSrc->getElementCount() == VDest->getElementCount(), "PtrToInt Vector width mismatch", &I); } @@ -2802,7 +2802,7 @@ if (SrcTy->isVectorTy()) { VectorType *VSrc = cast(SrcTy); VectorType *VDest = cast(DestTy); - Assert(VSrc->getNumElements() == VDest->getNumElements(), + Assert(VSrc->getElementCount() == VDest->getElementCount(), "IntToPtr Vector width mismatch", &I); } visitInstruction(I); @@ -2826,8 +2826,8 @@ Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(), "AddrSpaceCast must be between different address spaces", &I); if (auto *SrcVTy = dyn_cast(SrcTy)) - Assert(SrcVTy->getNumElements() == - cast(DestTy)->getNumElements(), + Assert(SrcVTy->getElementCount() == + cast(DestTy)->getElementCount(), "AddrSpaceCast vector pointer number of elements mismatch", &I); visitInstruction(I); } @@ -3336,16 +3336,16 @@ if (auto *GEPVTy = dyn_cast(GEP.getType())) { // Additional checks for vector GEPs. - unsigned GEPWidth = GEPVTy->getNumElements(); + ElementCount GEPWidth = GEPVTy->getElementCount(); if (GEP.getPointerOperandType()->isVectorTy()) Assert( GEPWidth == - cast(GEP.getPointerOperandType())->getNumElements(), + cast(GEP.getPointerOperandType())->getElementCount(), "Vector GEP result width doesn't match operand's", &GEP); for (Value *Idx : Idxs) { Type *IndexTy = Idx->getType(); if (auto *IndexVTy = dyn_cast(IndexTy)) { - unsigned IndexWidth = IndexVTy->getNumElements(); + ElementCount IndexWidth = IndexVTy->getElementCount(); Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP); } Assert(IndexTy->isIntOrIntVectorTy(), @@ -4659,8 +4659,8 @@ "masked_load: return must match pointer type", Call); Assert(PassThru->getType() == DataTy, "masked_load: pass through and data type must match", Call); - Assert(cast(Mask->getType())->getNumElements() == - cast(DataTy)->getNumElements(), + Assert(cast(Mask->getType())->getElementCount() == + cast(DataTy)->getElementCount(), "masked_load: vector mask must be same length as data", Call); break; } @@ -4678,8 +4678,8 @@ Type *DataTy = cast(Ptr->getType())->getElementType(); Assert(DataTy == Val->getType(), "masked_store: storee must match pointer type", Call); - Assert(cast(Mask->getType())->getNumElements() == - cast(DataTy)->getNumElements(), + Assert(cast(Mask->getType())->getElementCount() == + cast(DataTy)->getElementCount(), "masked_store: vector mask must be same length as data", Call); break; } @@ -4799,27 +4799,27 @@ case Intrinsic::matrix_columnwise_store: { ConstantInt *NumRows; ConstantInt *NumColumns; - VectorType *TypeToCheck; + FixedVectorType *TypeToCheck; switch (ID) { case Intrinsic::matrix_multiply: NumRows = cast(Call.getArgOperand(2)); NumColumns = cast(Call.getArgOperand(4)); - TypeToCheck = cast(Call.getType()); + TypeToCheck = cast(Call.getType()); break; case Intrinsic::matrix_transpose: NumRows = cast(Call.getArgOperand(1)); NumColumns = cast(Call.getArgOperand(2)); - TypeToCheck = cast(Call.getType()); + TypeToCheck = cast(Call.getType()); break; case Intrinsic::matrix_columnwise_load: NumRows = cast(Call.getArgOperand(2)); NumColumns = cast(Call.getArgOperand(3)); - TypeToCheck = cast(Call.getType()); + TypeToCheck = cast(Call.getType()); break; case Intrinsic::matrix_columnwise_store: NumRows = cast(Call.getArgOperand(3)); NumColumns = cast(Call.getArgOperand(4)); - TypeToCheck = cast(Call.getArgOperand(0)->getType()); + TypeToCheck = cast(Call.getArgOperand(0)->getType()); break; default: llvm_unreachable("unexpected intrinsic"); @@ -4901,20 +4901,20 @@ case Intrinsic::experimental_constrained_fptosi: case Intrinsic::experimental_constrained_fptoui: { Value *Operand = FPI.getArgOperand(0); - uint64_t NumSrcElem = 0; + Optional NumSrcElem; Assert(Operand->getType()->isFPOrFPVectorTy(), "Intrinsic first argument must be floating point", &FPI); if (auto *OperandT = dyn_cast(Operand->getType())) { - NumSrcElem = OperandT->getNumElements(); + NumSrcElem = OperandT->getElementCount(); } Operand = &FPI; - Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(), + Assert(NumSrcElem.hasValue() == Operand->getType()->isVectorTy(), "Intrinsic first argument and result disagree on vector use", &FPI); Assert(Operand->getType()->isIntOrIntVectorTy(), "Intrinsic result must be an integer", &FPI); if (auto *OperandT = dyn_cast(Operand->getType())) { - Assert(NumSrcElem == OperandT->getNumElements(), + Assert(*NumSrcElem == OperandT->getElementCount(), "Intrinsic first argument and result vector lengths must be equal", &FPI); } @@ -4924,20 +4924,20 @@ case Intrinsic::experimental_constrained_sitofp: case Intrinsic::experimental_constrained_uitofp: { Value *Operand = FPI.getArgOperand(0); - uint64_t NumSrcElem = 0; + Optional NumSrcElem; Assert(Operand->getType()->isIntOrIntVectorTy(), "Intrinsic first argument must be integer", &FPI); if (auto *OperandT = dyn_cast(Operand->getType())) { - NumSrcElem = OperandT->getNumElements(); + NumSrcElem = OperandT->getElementCount(); } Operand = &FPI; - Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(), + Assert(NumSrcElem.hasValue() == Operand->getType()->isVectorTy(), "Intrinsic first argument and result disagree on vector use", &FPI); Assert(Operand->getType()->isFPOrFPVectorTy(), "Intrinsic result must be a floating point", &FPI); if (auto *OperandT = dyn_cast(Operand->getType())) { - Assert(NumSrcElem == OperandT->getNumElements(), + Assert(*NumSrcElem == OperandT->getElementCount(), "Intrinsic first argument and result vector lengths must be equal", &FPI); } @@ -4958,7 +4958,7 @@ if (OperandTy->isVectorTy()) { auto *OperandVecTy = cast(OperandTy); auto *ResultVecTy = cast(ResultTy); - Assert(OperandVecTy->getNumElements() == ResultVecTy->getNumElements(), + Assert(OperandVecTy->getElementCount() == ResultVecTy->getElementCount(), "Intrinsic first argument and result vector lengths must be equal", &FPI); }