diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1652,7 +1652,7 @@ if (C->getType()->isVectorTy()) { // Check each element of a constant vector. - unsigned NumElts = C->getType()->getVectorNumElements(); + unsigned NumElts = cast(C->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = C->getAggregateElement(i); if (!Elt) @@ -2082,7 +2082,7 @@ /// If all elements of two constant vectors are 0/-1 and inverses, return true. static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) { - unsigned NumElts = C1->getType()->getVectorNumElements(); + unsigned NumElts = cast(C1->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *EltC1 = C1->getAggregateElement(i); Constant *EltC2 = C2->getAggregateElement(i); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -523,7 +523,7 @@ auto Vec = II.getArgOperand(0); auto Amt = II.getArgOperand(1); auto VT = cast(II.getType()); - auto SVT = VT->getVectorElementType(); + auto SVT = VT->getElementType(); int NumElts = VT->getNumElements(); int BitWidth = SVT->getIntegerBitWidth(); @@ -620,10 +620,10 @@ if (isa(Arg0) && isa(Arg1)) return UndefValue::get(ResTy); - Type *ArgTy = Arg0->getType(); + auto *ArgTy = cast(Arg0->getType()); unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128; - unsigned NumSrcElts = ArgTy->getVectorNumElements(); - assert(ResTy->getVectorNumElements() == (2 * NumSrcElts) && + unsigned NumSrcElts = ArgTy->getNumElements(); + assert(cast(ResTy)->getNumElements() == (2 * NumSrcElts) && "Unexpected packing types"); unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes; @@ -680,14 +680,14 @@ InstCombiner::BuilderTy &Builder) { Value *Arg = II.getArgOperand(0); Type *ResTy = II.getType(); - Type *ArgTy = Arg->getType(); // movmsk(undef) -> zero as we must ensure the upper bits are zero. if (isa(Arg)) return Constant::getNullValue(ResTy); + auto *ArgTy = dyn_cast(Arg->getType()); // We can't easily peek through x86_mmx types. - if (!ArgTy->isVectorTy()) + if (!ArgTy) return nullptr; // Expand MOVMSK to compare/bitcast/zext: @@ -695,8 +695,8 @@ // %cmp = icmp slt <16 x i8> %x, zeroinitializer // %int = bitcast <16 x i1> %cmp to i16 // %res = zext i16 %int to i32 - unsigned NumElts = ArgTy->getVectorNumElements(); - Type *IntegerVecTy = VectorType::getInteger(cast(ArgTy)); + unsigned NumElts = ArgTy->getNumElements(); + Type *IntegerVecTy = VectorType::getInteger(ArgTy); Type *IntegerTy = Builder.getIntNTy(NumElts); Value *Res = Builder.CreateBitCast(Arg, IntegerVecTy); @@ -1036,7 +1036,7 @@ auto *VecTy = cast(II.getType()); auto *MaskEltTy = Type::getInt32Ty(II.getContext()); - unsigned NumElts = VecTy->getVectorNumElements(); + unsigned NumElts = VecTy->getNumElements(); bool IsPD = VecTy->getScalarType()->isDoubleTy(); unsigned NumLaneElts = IsPD ? 2 : 4; assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2); @@ -1955,8 +1955,8 @@ } // For vector result intrinsics, use the generic demanded vector support. - if (II->getType()->isVectorTy()) { - auto VWidth = II->getType()->getVectorNumElements(); + if (auto *IIVTy = dyn_cast(II->getType())) { + auto VWidth = IIVTy->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) { @@ -2505,8 +2505,9 @@ // Turn PPC QPX qvlfs -> load if the pointer is known aligned. if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, &DT) >= 16) { - Type *VTy = VectorType::get(Builder.getFloatTy(), - II->getType()->getVectorNumElements()); + Type *VTy = + VectorType::get(Builder.getFloatTy(), + cast(II->getType())->getElementCount()); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(VTy)); Value *Load = Builder.CreateLoad(VTy, Ptr); @@ -2526,8 +2527,9 @@ // Turn PPC QPX qvstfs -> store if the pointer is known aligned. if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC, &DT) >= 16) { - Type *VTy = VectorType::get(Builder.getFloatTy(), - II->getArgOperand(0)->getType()->getVectorNumElements()); + Type *VTy = VectorType::get( + Builder.getFloatTy(), + cast(II->getArgOperand(0)->getType())->getElementCount()); Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy); Type *OpPtrTy = PointerType::getUnqual(VTy); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy); @@ -2676,7 +2678,7 @@ // These intrinsics only demand the 0th element of their input vectors. If // we can simplify the input based on that, do so now. Value *Arg = II->getArgOperand(0); - unsigned VWidth = Arg->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) return replaceOperand(*II, 0, V); break; @@ -2726,7 +2728,7 @@ bool MadeChange = false; Value *Arg0 = II->getArgOperand(0); Value *Arg1 = II->getArgOperand(1); - unsigned VWidth = Arg0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg0->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) { replaceOperand(*II, 0, V); MadeChange = true; @@ -2944,7 +2946,7 @@ Value *Arg1 = II->getArgOperand(1); assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 && "Unexpected packed shift size"); - unsigned VWidth = Arg1->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg1->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) return replaceOperand(*II, 1, V); @@ -3011,7 +3013,7 @@ bool MadeChange = false; Value *Arg0 = II->getArgOperand(0); Value *Arg1 = II->getArgOperand(1); - unsigned VWidth = Arg0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg0->getType())->getNumElements(); APInt UndefElts1(VWidth, 0); APInt DemandedElts1 = APInt::getSplat(VWidth, @@ -3051,8 +3053,8 @@ case Intrinsic::x86_sse4a_extrq: { Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth0 = Op0->getType()->getVectorNumElements(); - unsigned VWidth1 = Op1->getType()->getVectorNumElements(); + unsigned VWidth0 = cast(Op0->getType())->getNumElements(); + unsigned VWidth1 = cast(Op1->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 && VWidth1 == 16 && "Unexpected operand sizes"); @@ -3090,7 +3092,7 @@ // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining // bits of the lower 64-bits. The upper 64-bits are undefined. Value *Op0 = II->getArgOperand(0); - unsigned VWidth = Op0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Op0->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 && "Unexpected operand size"); @@ -3112,10 +3114,10 @@ case Intrinsic::x86_sse4a_insertq: { Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth = Op0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Op0->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 && - Op1->getType()->getVectorNumElements() == 2 && + cast(Op1->getType())->getNumElements() == 2 && "Unexpected operand size"); // See if we're dealing with constant values. @@ -3146,8 +3148,8 @@ // undefined. Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth0 = Op0->getType()->getVectorNumElements(); - unsigned VWidth1 = Op1->getType()->getVectorNumElements(); + unsigned VWidth0 = cast(Op0->getType())->getNumElements(); + unsigned VWidth1 = cast(Op1->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 && VWidth1 == 2 && "Unexpected operand sizes"); @@ -3214,8 +3216,10 @@ II->getType()->getPrimitiveSizeInBits() && "Not expecting mask and operands with different sizes"); - unsigned NumMaskElts = Mask->getType()->getVectorNumElements(); - unsigned NumOperandElts = II->getType()->getVectorNumElements(); + unsigned NumMaskElts = + cast(Mask->getType())->getNumElements(); + unsigned NumOperandElts = + cast(II->getType())->getNumElements(); if (NumMaskElts == NumOperandElts) return SelectInst::Create(BoolVec, Op1, Op0); @@ -3306,7 +3310,7 @@ // the permutation mask with respect to 31 and reverse the order of // V1 and V2. if (Constant *Mask = dyn_cast(II->getArgOperand(2))) { - assert(Mask->getType()->getVectorNumElements() == 16 && + assert(cast(Mask->getType())->getNumElements() == 16 && "Bad type for intrinsic!"); // Check that all of the elements are integer constants or undefs. @@ -3464,7 +3468,8 @@ if (auto *CI = dyn_cast(XorMask)) { if (CI->getValue().trunc(16).isAllOnesValue()) { auto TrueVector = Builder.CreateVectorSplat( - II->getType()->getVectorNumElements(), Builder.getTrue()); + cast(II->getType())->getNumElements(), + Builder.getTrue()); return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -856,10 +856,10 @@ Value *VecOp; if (match(Src, m_OneUse(m_ExtractElement(m_Value(VecOp), m_ConstantInt(Cst))))) { - Type *VecOpTy = VecOp->getType(); + auto *VecOpTy = cast(VecOp->getType()); unsigned DestScalarSize = DestTy->getScalarSizeInBits(); unsigned VecOpScalarSize = VecOpTy->getScalarSizeInBits(); - unsigned VecNumElts = VecOpTy->getVectorNumElements(); + unsigned VecNumElts = VecOpTy->getNumElements(); // A badly fit destination size would result in an invalid cast. if (VecOpScalarSize % DestScalarSize == 0) { @@ -1514,12 +1514,13 @@ // TODO: Make these support undef elements. static Type *shrinkFPConstantVector(Value *V) { auto *CV = dyn_cast(V); - if (!CV || !CV->getType()->isVectorTy()) + auto *CVVTy = dyn_cast(V->getType()); + if (!CV || !CVVTy) return nullptr; Type *MinType = nullptr; - unsigned NumElts = CV->getType()->getVectorNumElements(); + unsigned NumElts = CVVTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { auto *CFP = dyn_cast_or_null(CV->getAggregateElement(i)); if (!CFP) @@ -1820,8 +1821,9 @@ if (CI.getOperand(0)->getType()->getScalarSizeInBits() != DL.getPointerSizeInBits(AS)) { Type *Ty = DL.getIntPtrType(CI.getContext(), AS); - if (CI.getType()->isVectorTy()) // Handle vectors of pointers. - Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements()); + // Handle vectors of pointers. + if (auto *CIVTy = dyn_cast(CI.getType())) + Ty = VectorType::get(Ty, CIVTy->getElementCount()); Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); return new IntToPtrInst(P, CI.getType()); @@ -1868,8 +1870,8 @@ return commonPointerCastTransforms(CI); Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); - if (Ty->isVectorTy()) // Handle vectors of pointers. - PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements()); + if (auto *VTy = dyn_cast(Ty)) // Handle vectors of pointers. + PtrTy = VectorType::get(PtrTy, VTy->getNumElements()); Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy); return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); @@ -2199,10 +2201,10 @@ // A vector select must maintain the same number of elements in its operands. Type *CondTy = Cond->getType(); Type *DestTy = BitCast.getType(); - if (CondTy->isVectorTy()) { + if (auto *CondVTy = dyn_cast(CondTy)) { if (!DestTy->isVectorTy()) return nullptr; - if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements()) + if (cast(DestTy)->getNumElements() != CondVTy->getNumElements()) return nullptr; } @@ -2536,10 +2538,11 @@ // a bitcast to a vector with the same # elts. Value *ShufOp0 = Shuf->getOperand(0); Value *ShufOp1 = Shuf->getOperand(1); - unsigned NumShufElts = Shuf->getType()->getVectorNumElements(); - unsigned NumSrcVecElts = ShufOp0->getType()->getVectorNumElements(); + unsigned NumShufElts = Shuf->getType()->getNumElements(); + unsigned NumSrcVecElts = + cast(ShufOp0->getType())->getNumElements(); if (Shuf->hasOneUse() && DestTy->isVectorTy() && - DestTy->getVectorNumElements() == NumShufElts && + cast(DestTy)->getNumElements() == NumShufElts && NumShufElts == NumSrcVecElts) { BitCastInst *Tmp; // If either of the operands is a cast from CI.getType(), then diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -897,7 +897,7 @@ // For vectors, we apply the same reasoning on a per-lane basis. auto *Base = GEPLHS->getPointerOperand(); if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) { - int NumElts = GEPLHS->getType()->getVectorNumElements(); + int NumElts = cast(GEPLHS->getType())->getNumElements(); Base = Builder.CreateVectorSplat(NumElts, Base); } return new ICmpInst(Cond, Base, @@ -1861,8 +1861,8 @@ int32_t ExactLogBase2 = C2->exactLogBase2(); if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); - if (And->getType()->isVectorTy()) - NTy = VectorType::get(NTy, And->getType()->getVectorNumElements()); + if (auto *AndVTy = dyn_cast(And->getType())) + NTy = VectorType::get(NTy, AndVTy->getNumElements()); Value *Trunc = Builder.CreateTrunc(X, NTy); auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE : CmpInst::ICMP_SLT; @@ -2147,8 +2147,8 @@ if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt && DL.isLegalInteger(TypeBits - Amt)) { Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); - if (ShType->isVectorTy()) - TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements()); + if (auto *ShVTy = dyn_cast(ShType)) + TruncTy = VectorType::get(TruncTy, ShVTy->getNumElements()); Constant *NewC = ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt)); return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC); @@ -2774,8 +2774,8 @@ // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1 Type *XType = X->getType(); Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits()); - if (XType->isVectorTy()) - NewType = VectorType::get(NewType, XType->getVectorNumElements()); + if (auto *XVTy = dyn_cast(XType)) + NewType = VectorType::get(NewType, XVTy->getNumElements()); Value *NewBitcast = Builder.CreateBitCast(X, NewType); if (TrueIfSigned) return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast, @@ -3352,8 +3352,9 @@ Type *OpTy = M->getType(); auto *VecC = dyn_cast(M); if (OpTy->isVectorTy() && VecC && VecC->containsUndefElement()) { + auto *OpVTy = cast(OpTy); Constant *SafeReplacementConstant = nullptr; - for (unsigned i = 0, e = OpTy->getVectorNumElements(); i != e; ++i) { + for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) { if (!isa(VecC->getAggregateElement(i))) { SafeReplacementConstant = VecC->getAggregateElement(i); break; @@ -5187,8 +5188,8 @@ // Bail out if the constant can't be safely incremented/decremented. if (!ConstantIsOk(CI)) return llvm::None; - } else if (Type->isVectorTy()) { - unsigned NumElts = Type->getVectorNumElements(); + } else if (auto *VTy = dyn_cast(Type)) { + unsigned NumElts = VTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = C->getAggregateElement(i); if (!Elt) @@ -5409,7 +5410,8 @@ if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) { // We allow undefs in matching, but this transform removes those for safety. // Demanded elements analysis should be able to recover some/all of that. - C = ConstantVector::getSplat(V1Ty->getVectorElementCount(), ScalarC); + C = ConstantVector::getSplat(cast(V1Ty)->getElementCount(), + ScalarC); SmallVector NewM(M.size(), MaskSplatIndex); Value *NewCmp = IsFP ? Builder.CreateFCmp(Pred, V1, C) : Builder.CreateICmp(Pred, V1, C); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -244,9 +244,10 @@ /// If no identity constant exists, replace undef with some other safe constant. static inline Constant *getSafeVectorConstantForBinop( BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) { - assert(In->getType()->isVectorTy() && "Not expecting scalars here"); + auto *InVTy = dyn_cast(In->getType()); + assert(InVTy && "Not expecting scalars here"); - Type *EltTy = In->getType()->getVectorElementType(); + Type *EltTy = InVTy->getElementType(); auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant); if (!SafeC) { // TODO: Should this be available as a constant utility function? It is @@ -284,7 +285,7 @@ } } assert(SafeC && "Must have safe constant for binop"); - unsigned NumElts = In->getType()->getVectorNumElements(); + unsigned NumElts = InVTy->getNumElements(); SmallVector Out(NumElts); for (unsigned i = 0; i != NumElts; ++i) { Constant *C = In->getAggregateElement(i); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -591,10 +591,9 @@ // infinite loop). Type *Dummy; if (!Ty->isIntegerTy() && Ty->isSized() && - !(Ty->isVectorTy() && Ty->getVectorIsScalable()) && + !(Ty->isVectorTy() && cast(Ty)->isScalable()) && DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && - DL.typeSizeEqualsStoreSize(Ty) && - !DL.isNonIntegralPointerType(Ty) && + DL.typeSizeEqualsStoreSize(Ty) && !DL.isNonIntegralPointerType(Ty) && !isMinMaxWithLoads( peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true), Dummy)) { diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -108,7 +108,8 @@ return nullptr; SmallVector Elts; - for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) { + for (unsigned I = 0, E = cast(Ty)->getNumElements(); I != E; + ++I) { Constant *Elt = C->getAggregateElement(I); if (!Elt) return nullptr; @@ -1433,7 +1434,7 @@ // If it's a constant vector, flip any negative values positive. if (isa(Op1) || isa(Op1)) { Constant *C = cast(Op1); - unsigned VWidth = C->getType()->getVectorNumElements(); + unsigned VWidth = cast(C->getType())->getNumElements(); bool hasNegative = false; bool hasMissing = false; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -301,10 +301,11 @@ // The select condition may be a vector. We may only change the operand // type if the vector width remains the same (and matches the condition). - if (CondTy->isVectorTy()) { + if (auto *CondVTy = dyn_cast(CondTy)) { if (!FIOpndTy->isVectorTy()) return nullptr; - if (CondTy->getVectorNumElements() != FIOpndTy->getVectorNumElements()) + if (CondVTy->getNumElements() != + cast(FIOpndTy)->getNumElements()) return nullptr; // TODO: If the backend knew how to deal with casts better, we could @@ -1935,7 +1936,7 @@ if (!CondVal->getType()->isVectorTy() || !match(CondVal, m_Constant(CondC))) return nullptr; - unsigned NumElts = CondVal->getType()->getVectorNumElements(); + unsigned NumElts = cast(CondVal->getType())->getNumElements(); SmallVector Mask; Mask.reserve(NumElts); Type *Int32Ty = Type::getInt32Ty(CondVal->getContext()); @@ -1971,8 +1972,8 @@ /// is likely better for vector codegen. static Instruction *canonicalizeScalarSelectOfVecs( SelectInst &Sel, InstCombiner &IC) { - Type *Ty = Sel.getType(); - if (!Ty->isVectorTy()) + auto *Ty = dyn_cast(Sel.getType()); + if (!Ty) return nullptr; // We can replace a single-use extract with constant index. @@ -1983,7 +1984,7 @@ // select (extelt V, Index), T, F --> select (splat V, Index), T, F // Splatting the extracted condition reduces code (we could directly create a // splat shuffle of the source vector to eliminate the intermediate step). - unsigned NumElts = Ty->getVectorNumElements(); + unsigned NumElts = Ty->getNumElements(); return IC.replaceOperand(Sel, 0, IC.Builder.CreateVectorSplat(NumElts, Cond)); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -1074,7 +1074,8 @@ DemandedElts.getActiveBits() == 3) return nullptr; - unsigned VWidth = II->getType()->getVectorNumElements(); + auto *IIVTy = cast(II->getType()); + unsigned VWidth = IIVTy->getNumElements(); if (VWidth == 1) return nullptr; @@ -1180,7 +1181,7 @@ Intrinsic::matchIntrinsicSignature(FTy, TableRef, OverloadTys); Module *M = II->getParent()->getParent()->getParent(); - Type *EltTy = II->getType()->getVectorElementType(); + Type *EltTy = IIVTy->getElementType(); Type *NewTy = (NewNumElts == 1) ? EltTy : VectorType::get(EltTy, NewNumElts); OverloadTys[0] = NewTy; @@ -1227,7 +1228,7 @@ APInt &UndefElts, unsigned Depth, bool AllowMultipleUsers) { - unsigned VWidth = V->getType()->getVectorNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); APInt EltMask(APInt::getAllOnesValue(VWidth)); assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!"); @@ -1386,7 +1387,7 @@ Shuffle->getOperand(1)->getType() && "Expected shuffle operands to have same type"); unsigned OpWidth = - Shuffle->getOperand(0)->getType()->getVectorNumElements(); + cast(Shuffle->getOperand(0)->getType())->getNumElements(); // Handle trivial case of a splat. Only check the first element of LHS // operand. if (all_of(Shuffle->getShuffleMask(), [](int Elt) { return Elt == 0; }) && @@ -1799,7 +1800,7 @@ case Intrinsic::x86_avx512_packusdw_512: case Intrinsic::x86_avx512_packuswb_512: { auto *Ty0 = II->getArgOperand(0)->getType(); - unsigned InnerVWidth = Ty0->getVectorNumElements(); + unsigned InnerVWidth = cast(Ty0)->getNumElements(); assert(VWidth == (InnerVWidth * 2) && "Unexpected input size"); unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -175,9 +175,9 @@ // If this extractelement is using a bitcast from a vector of the same number // of elements, see if we can find the source element from the source vector: // extelt (bitcast VecX), IndexC --> bitcast X[IndexC] - Type *SrcTy = X->getType(); + auto *SrcTy = cast(X->getType()); Type *DestTy = Ext.getType(); - unsigned NumSrcElts = SrcTy->getVectorNumElements(); + unsigned NumSrcElts = SrcTy->getNumElements(); unsigned NumElts = Ext.getVectorOperandType()->getNumElements(); if (NumSrcElts == NumElts) if (Value *Elt = findScalarElement(X, ExtIndexC)) @@ -258,7 +258,7 @@ /// Find elements of V demanded by UserInstr. static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { - unsigned VWidth = V->getType()->getVectorNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); // Conservatively assume that all elements are needed. APInt UsedElts(APInt::getAllOnesValue(VWidth)); @@ -275,7 +275,8 @@ } case Instruction::ShuffleVector: { ShuffleVectorInst *Shuffle = cast(UserInstr); - unsigned MaskNumElts = UserInstr->getType()->getVectorNumElements(); + unsigned MaskNumElts = + cast(UserInstr->getType())->getNumElements(); UsedElts = APInt(VWidth, 0); for (unsigned i = 0; i < MaskNumElts; i++) { @@ -301,7 +302,7 @@ /// no user demands an element of V, then the corresponding bit /// remains unset in the returned value. static APInt findDemandedEltsByAllUsers(Value *V) { - unsigned VWidth = V->getType()->getVectorNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); APInt UnionUsedElts(VWidth, 0); for (const Use &U : V->uses()) { @@ -419,7 +420,7 @@ int SrcIdx = SVI->getMaskValue(Elt->getZExtValue()); Value *Src; unsigned LHSWidth = - SVI->getOperand(0)->getType()->getVectorNumElements(); + cast(SVI->getOperand(0)->getType())->getNumElements(); if (SrcIdx < 0) return replaceInstUsesWith(EI, UndefValue::get(EI.getType())); @@ -453,7 +454,7 @@ SmallVectorImpl &Mask) { assert(LHS->getType() == RHS->getType() && "Invalid CollectSingleShuffleElements"); - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext()))); @@ -495,7 +496,8 @@ if (isa(EI->getOperand(1))) { unsigned ExtractedIdx = cast(EI->getOperand(1))->getZExtValue(); - unsigned NumLHSElts = LHS->getType()->getVectorNumElements(); + unsigned NumLHSElts = + cast(LHS->getType())->getNumElements(); // This must be extracting from either LHS or RHS. if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) { @@ -531,8 +533,8 @@ InstCombiner &IC) { VectorType *InsVecType = InsElt->getType(); VectorType *ExtVecType = ExtElt->getVectorOperandType(); - unsigned NumInsElts = InsVecType->getVectorNumElements(); - unsigned NumExtElts = ExtVecType->getVectorNumElements(); + unsigned NumInsElts = InsVecType->getNumElements(); + unsigned NumExtElts = ExtVecType->getNumElements(); // The inserted-to vector must be wider than the extracted-from vector. if (InsVecType->getElementType() != ExtVecType->getElementType() || @@ -615,7 +617,7 @@ Value *PermittedRHS, InstCombiner &IC) { assert(V->getType()->isVectorTy() && "Invalid shuffle!"); - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext()))); @@ -659,7 +661,8 @@ return std::make_pair(V, nullptr); } - unsigned NumLHSElts = RHS->getType()->getVectorNumElements(); + unsigned NumLHSElts = + cast(RHS->getType())->getNumElements(); Mask[InsertedIdx % NumElts] = ConstantInt::get(Type::getInt32Ty(V->getContext()), NumLHSElts+ExtractedIdx); @@ -670,7 +673,7 @@ // We've gone as far as we can: anything on the other side of the // extractelement will already have been converted into a shuffle. unsigned NumLHSElts = - EI->getOperand(0)->getType()->getVectorNumElements(); + cast(EI->getOperand(0)->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) Mask.push_back(ConstantInt::get( Type::getInt32Ty(V->getContext()), @@ -731,7 +734,8 @@ static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf) { int MaskSize = Shuf.getShuffleMask().size(); - int VecSize = Shuf.getOperand(0)->getType()->getVectorNumElements(); + int VecSize = + cast(Shuf.getOperand(0)->getType())->getNumElements(); // A vector select does not change the size of the operands. if (MaskSize != VecSize) @@ -841,7 +845,7 @@ // For example: // inselt (shuf (inselt undef, X, 0), undef, <0,undef,0,undef>), X, 1 // --> shuf (inselt undef, X, 0), undef, <0,0,0,undef> - unsigned NumMaskElts = Shuf->getType()->getVectorNumElements(); + unsigned NumMaskElts = Shuf->getType()->getNumElements(); SmallVector NewMask(NumMaskElts); for (unsigned i = 0; i != NumMaskElts; ++i) NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i); @@ -874,7 +878,7 @@ // that same index value. // For example: // inselt (shuf X, IdMask), (extelt X, IdxC), IdxC --> shuf X, IdMask' - unsigned NumMaskElts = Shuf->getType()->getVectorNumElements(); + unsigned NumMaskElts = Shuf->getType()->getNumElements(); SmallVector NewMask(NumMaskElts); ArrayRef OldMask = Shuf->getShuffleMask(); for (unsigned i = 0; i != NumMaskElts; ++i) { @@ -1038,7 +1042,8 @@ match(ScalarOp, m_BitCast(m_Value(ScalarSrc))) && (VecOp->hasOneUse() || ScalarOp->hasOneUse()) && VecSrc->getType()->isVectorTy() && !ScalarSrc->getType()->isVectorTy() && - VecSrc->getType()->getVectorElementType() == ScalarSrc->getType()) { + cast(VecSrc->getType())->getElementType() == + ScalarSrc->getType()) { // inselt (bitcast VecSrc), (bitcast ScalarSrc), IdxOp --> // bitcast (inselt VecSrc, ScalarSrc, IdxOp) Value *NewInsElt = Builder.CreateInsertElement(VecSrc, ScalarSrc, IdxOp); @@ -1050,9 +1055,9 @@ uint64_t InsertedIdx, ExtractedIdx; Value *ExtVecOp; if (match(IdxOp, m_ConstantInt(InsertedIdx)) && - match(ScalarOp, m_ExtractElement(m_Value(ExtVecOp), - m_ConstantInt(ExtractedIdx))) && - ExtractedIdx < ExtVecOp->getType()->getVectorNumElements()) { + match(ScalarOp, + m_ExtractElement(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) && + ExtractedIdx < cast(ExtVecOp->getType())->getNumElements()) { // TODO: Looking at the user(s) to determine if this insert is a // fold-to-shuffle opportunity does not match the usual instcombine // constraints. We should decide if the transform is worthy based only @@ -1093,7 +1098,7 @@ } } - unsigned VWidth = VecOp->getType()->getVectorNumElements(); + unsigned VWidth = cast(VecOp->getType())->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) { @@ -1178,7 +1183,8 @@ // Bail out if we would create longer vector ops. We could allow creating // longer vector ops, but that may result in more expensive codegen. Type *ITy = I->getType(); - if (ITy->isVectorTy() && Mask.size() > ITy->getVectorNumElements()) + if (ITy->isVectorTy() && + Mask.size() > cast(ITy)->getNumElements()) return false; for (Value *Operand : I->operands()) { if (!canEvaluateShuffled(Operand, Mask, Depth - 1)) @@ -1266,9 +1272,9 @@ case Instruction::FPExt: { // It's possible that the mask has a different number of elements from // the original cast. We recompute the destination type to match the mask. - Type *DestTy = - VectorType::get(I->getType()->getScalarType(), - NewOps[0]->getType()->getVectorNumElements()); + Type *DestTy = VectorType::get( + I->getType()->getScalarType(), + cast(NewOps[0]->getType())->getElementCount()); assert(NewOps.size() == 1 && "cast with #ops != 1"); return CastInst::Create(cast(I)->getOpcode(), NewOps[0], DestTy, "", I); @@ -1335,7 +1341,8 @@ case Instruction::Select: case Instruction::GetElementPtr: { SmallVector NewOps; - bool NeedsRebuild = (Mask.size() != I->getType()->getVectorNumElements()); + bool NeedsRebuild = + (Mask.size() != cast(I->getType())->getNumElements()); for (int i = 0, e = I->getNumOperands(); i != e; ++i) { Value *V; // Recursively call evaluateInDifferentElementOrder on vector arguments @@ -1389,7 +1396,8 @@ // +--+--+--+--+ static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI, ArrayRef Mask) { - unsigned LHSElems = SVI.getOperand(0)->getType()->getVectorNumElements(); + unsigned LHSElems = + cast(SVI.getOperand(0)->getType())->getNumElements(); unsigned MaskElems = Mask.size(); unsigned BegIdx = Mask.front(); unsigned EndIdx = Mask.back(); @@ -1521,7 +1529,7 @@ // For example: // shuf (inselt undef, X, 2), undef, <2,2,undef> // --> shuf (inselt undef, X, 0), undef, <0,0,undef> - unsigned NumMaskElts = Shuf.getType()->getVectorNumElements(); + unsigned NumMaskElts = Shuf.getType()->getNumElements(); SmallVector NewMask(NumMaskElts, 0); for (unsigned i = 0; i != NumMaskElts; ++i) if (Mask[i] == UndefMaskElem) @@ -1539,7 +1547,7 @@ // Canonicalize to choose from operand 0 first unless operand 1 is undefined. // Commuting undef to operand 0 conflicts with another canonicalization. - unsigned NumElts = Shuf.getType()->getVectorNumElements(); + unsigned NumElts = Shuf.getType()->getNumElements(); if (!isa(Shuf.getOperand(1)) && Shuf.getMaskValue(0) >= (int)NumElts) { // TODO: Can we assert that both operands of a shuffle-select are not undef @@ -1673,7 +1681,8 @@ // and the source element type must be larger than the shuffle element type. Type *SrcType = X->getType(); if (!SrcType->isVectorTy() || !SrcType->isIntOrIntVectorTy() || - SrcType->getVectorNumElements() != DestType->getVectorNumElements() || + cast(SrcType)->getNumElements() != + cast(DestType)->getNumElements() || SrcType->getScalarSizeInBits() % DestType->getScalarSizeInBits() != 0) return nullptr; @@ -1717,10 +1726,11 @@ // We need a narrow condition value. It must be extended with undef elements // and have the same number of elements as this shuffle. - unsigned NarrowNumElts = Shuf.getType()->getVectorNumElements(); + unsigned NarrowNumElts = Shuf.getType()->getNumElements(); Value *NarrowCond; if (!match(Cond, m_OneUse(m_ShuffleVector(m_Value(NarrowCond), m_Undef()))) || - NarrowCond->getType()->getVectorNumElements() != NarrowNumElts || + cast(NarrowCond->getType())->getNumElements() != + NarrowNumElts || !cast(Cond)->isIdentityWithPadding()) return nullptr; @@ -1759,7 +1769,7 @@ // new shuffle mask. Otherwise, copy the original mask element. Example: // shuf (shuf X, Y, ), undef, <0, undef, 2, 3> --> // shuf X, Y, - unsigned NumElts = Shuf.getType()->getVectorNumElements(); + unsigned NumElts = Shuf.getType()->getNumElements(); SmallVector NewMask(NumElts); assert(NumElts < Mask.size() && "Identity with extract must have less elements than its inputs"); @@ -1784,7 +1794,7 @@ // TODO: This restriction could be removed if the insert has only one use // (because the transform would require a new length-changing shuffle). int NumElts = Mask.size(); - if (NumElts != (int)(V0->getType()->getVectorNumElements())) + if (NumElts != (int)(cast(V0->getType())->getNumElements())) return nullptr; // This is a specialization of a fold in SimplifyDemandedVectorElts. We may @@ -1879,9 +1889,9 @@ Value *X = Shuffle0->getOperand(0); Value *Y = Shuffle1->getOperand(0); if (X->getType() != Y->getType() || - !isPowerOf2_32(Shuf.getType()->getVectorNumElements()) || - !isPowerOf2_32(Shuffle0->getType()->getVectorNumElements()) || - !isPowerOf2_32(X->getType()->getVectorNumElements()) || + !isPowerOf2_32(Shuf.getType()->getNumElements()) || + !isPowerOf2_32(Shuffle0->getType()->getNumElements()) || + !isPowerOf2_32(cast(X->getType())->getNumElements()) || isa(X) || isa(Y)) return nullptr; assert(isa(Shuffle0->getOperand(1)) && @@ -1892,8 +1902,8 @@ // operands directly by adjusting the shuffle mask to account for the narrower // types: // shuf (widen X), (widen Y), Mask --> shuf X, Y, Mask' - int NarrowElts = X->getType()->getVectorNumElements(); - int WideElts = Shuffle0->getType()->getVectorNumElements(); + int NarrowElts = cast(X->getType())->getNumElements(); + int WideElts = Shuffle0->getType()->getNumElements(); assert(WideElts > NarrowElts && "Unexpected types for identity with padding"); Type *I32Ty = IntegerType::getInt32Ty(Shuf.getContext()); @@ -1936,8 +1946,8 @@ return replaceInstUsesWith(SVI, V); // shuffle x, x, mask --> shuffle x, undef, mask' - unsigned VWidth = SVI.getType()->getVectorNumElements(); - unsigned LHSWidth = LHS->getType()->getVectorNumElements(); + unsigned VWidth = SVI.getType()->getNumElements(); + unsigned LHSWidth = cast(LHS->getType())->getNumElements(); ArrayRef Mask = SVI.getShuffleMask(); Type *Int32Ty = Type::getInt32Ty(SVI.getContext()); @@ -1950,10 +1960,10 @@ Value *X; if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_Undef()) && X->getType()->isVectorTy() && VWidth == LHSWidth && - X->getType()->getVectorNumElements() >= VWidth) { + cast(X->getType())->getNumElements() >= VWidth) { // Create the scaled mask constant. - Type *XType = X->getType(); - unsigned XNumElts = XType->getVectorNumElements(); + auto *XType = cast(X->getType()); + unsigned XNumElts = XType->getNumElements(); assert(XNumElts % VWidth == 0 && "Unexpected vector bitcast"); unsigned ScaleFactor = XNumElts / VWidth; SmallVector ScaledMask; @@ -2173,11 +2183,11 @@ if (LHSShuffle) { LHSOp0 = LHSShuffle->getOperand(0); LHSOp1 = LHSShuffle->getOperand(1); - LHSOp0Width = LHSOp0->getType()->getVectorNumElements(); + LHSOp0Width = cast(LHSOp0->getType())->getNumElements(); } if (RHSShuffle) { RHSOp0 = RHSShuffle->getOperand(0); - RHSOp0Width = RHSOp0->getType()->getVectorNumElements(); + RHSOp0Width = cast(RHSOp0->getType())->getNumElements(); } Value* newLHS = LHS; Value* newRHS = RHS; diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1636,7 +1636,7 @@ if (match(&Inst, m_c_BinOp(m_OneUse(m_ShuffleVector(m_Value(V1), m_Undef(), m_Mask(Mask))), m_Constant(C))) && - V1->getType()->getVectorNumElements() <= NumElts) { + cast(V1->getType())->getNumElements() <= NumElts) { assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() && "Shuffle should not change scalar type"); @@ -1647,7 +1647,7 @@ // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = bool ConstOp1 = isa(RHS); ArrayRef ShMask = Mask; - unsigned SrcVecNumElts = V1->getType()->getVectorNumElements(); + unsigned SrcVecNumElts = cast(V1->getType())->getNumElements(); UndefValue *UndefScalar = UndefValue::get(C->getType()->getScalarType()); SmallVector NewVecC(SrcVecNumElts, UndefScalar); bool MayChange = true; @@ -1858,8 +1858,8 @@ return replaceInstUsesWith(GEP, V); // For vector geps, use the generic demanded vector support. - if (GEP.getType()->isVectorTy()) { - auto VWidth = GEP.getType()->getVectorNumElements(); + if (auto *GEPVTy = dyn_cast(GEP.getType())) { + auto VWidth = GEPVTy->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask, @@ -1871,7 +1871,7 @@ // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if // possible (decide on canonical form for pointer broadcast), 3) exploit - // undef elements to decrease demanded bits + // undef elements to decrease demanded bits } Value *PtrOp = GEP.getOperand(0); @@ -1895,7 +1895,8 @@ Type *IndexTy = (*I)->getType(); Type *NewIndexType = IndexTy->isVectorTy() - ? VectorType::get(NewScalarIndexTy, IndexTy->getVectorNumElements()) + ? VectorType::get(NewScalarIndexTy, + cast(IndexTy)->getNumElements()) : NewScalarIndexTy; // If the element type has zero size then any index over it is equivalent @@ -2390,8 +2391,9 @@ // gep (bitcast [c x ty]* X to *), Y, Z --> gep X, Y, Z auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy, const DataLayout &DL) { - return ArrTy->getArrayElementType() == VecTy->getVectorElementType() && - ArrTy->getArrayNumElements() == VecTy->getVectorNumElements() && + auto *VecVTy = cast(VecTy); + return ArrTy->getArrayElementType() == VecVTy->getElementType() && + ArrTy->getArrayNumElements() == VecVTy->getNumElements() && DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy); }; if (GEP.getNumOperands() == 3 &&