diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1705,7 +1705,7 @@ if (C->getType()->isVectorTy()) { // Check each element of a constant vector. - unsigned NumElts = cast(C->getType())->getNumElements(); + unsigned NumElts = cast(C->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = C->getAggregateElement(i); if (!Elt) @@ -2198,7 +2198,7 @@ /// If all elements of two constant vectors are 0/-1 and inverses, return true. static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) { - unsigned NumElts = cast(C1->getType())->getNumElements(); + unsigned NumElts = cast(C1->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *EltC1 = C1->getAggregateElement(i); Constant *EltC2 = C2->getAggregateElement(i); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -377,7 +377,7 @@ auto VT = cast(Vec->getType()); auto SVT = VT->getElementType(); auto AmtVT = Amt->getType(); - unsigned VWidth = VT->getNumElements(); + unsigned VWidth = cast(VT)->getNumElements(); unsigned BitWidth = SVT->getPrimitiveSizeInBits(); // If the shift amount is guaranteed to be in-range we can replace it with a @@ -407,7 +407,7 @@ assert(AmtVT->isVectorTy() && AmtVT->getPrimitiveSizeInBits() == 128 && cast(AmtVT)->getElementType() == SVT && "Unexpected shift-by-scalar type"); - unsigned NumAmtElts = cast(AmtVT)->getNumElements(); + unsigned NumAmtElts = cast(AmtVT)->getNumElements(); APInt DemandedLower = APInt::getOneBitSet(NumAmtElts, 0); APInt DemandedUpper = APInt::getBitsSet(NumAmtElts, 1, NumAmtElts / 2); KnownBits KnownLowerBits = llvm::computeKnownBits( @@ -524,7 +524,7 @@ auto Amt = II.getArgOperand(1); auto VT = cast(II.getType()); auto SVT = VT->getElementType(); - int NumElts = VT->getNumElements(); + int NumElts = cast(VT)->getNumElements(); int BitWidth = SVT->getIntegerBitWidth(); // If the shift amount is guaranteed to be in-range we can replace it with a @@ -620,10 +620,10 @@ if (isa(Arg0) && isa(Arg1)) return UndefValue::get(ResTy); - auto *ArgTy = cast(Arg0->getType()); + auto *ArgTy = cast(Arg0->getType()); unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128; unsigned NumSrcElts = ArgTy->getNumElements(); - assert(cast(ResTy)->getNumElements() == (2 * NumSrcElts) && + assert(cast(ResTy)->getNumElements() == (2 * NumSrcElts) && "Unexpected packing types"); unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes; @@ -695,7 +695,7 @@ // %cmp = icmp slt <16 x i8> %x, zeroinitializer // %int = bitcast <16 x i1> %cmp to i16 // %res = zext i16 %int to i32 - unsigned NumElts = ArgTy->getNumElements(); + unsigned NumElts = cast(ArgTy)->getNumElements(); Type *IntegerVecTy = VectorType::getInteger(ArgTy); Type *IntegerTy = Builder.getIntNTy(NumElts); @@ -739,7 +739,7 @@ if (!CInt) return nullptr; - VectorType *VecTy = cast(II.getType()); + auto *VecTy = cast(II.getType()); assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type"); // The immediate permute control byte looks like this: @@ -982,7 +982,7 @@ if (!V) return nullptr; - auto *VecTy = cast(II.getType()); + auto *VecTy = cast(II.getType()); unsigned NumElts = VecTy->getNumElements(); assert((NumElts == 16 || NumElts == 32 || NumElts == 64) && "Unexpected number of elements in shuffle mask!"); @@ -1027,7 +1027,7 @@ if (!V) return nullptr; - auto *VecTy = cast(II.getType()); + auto *VecTy = cast(II.getType()); unsigned NumElts = VecTy->getNumElements(); bool IsPD = VecTy->getScalarType()->isDoubleTy(); unsigned NumLaneElts = IsPD ? 2 : 4; @@ -1075,7 +1075,7 @@ if (!V) return nullptr; - auto *VecTy = cast(II.getType()); + auto *VecTy = cast(II.getType()); unsigned Size = VecTy->getNumElements(); assert((Size == 4 || Size == 8 || Size == 16 || Size == 32 || Size == 64) && "Unexpected shuffle mask size"); @@ -1468,7 +1468,7 @@ if (!C) return nullptr; - auto *VecTy = cast(II.getType()); + auto *VecTy = cast(II.getType()); unsigned NumElts = VecTy->getNumElements(); // Only perform this transformation for <8 x i8> vector types. @@ -2677,7 +2677,7 @@ // These intrinsics only demand the 0th element of their input vectors. If // we can simplify the input based on that, do so now. Value *Arg = II->getArgOperand(0); - unsigned VWidth = cast(Arg->getType())->getNumElements(); + unsigned VWidth = cast(Arg->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) return replaceOperand(*II, 0, V); break; @@ -2727,7 +2727,7 @@ bool MadeChange = false; Value *Arg0 = II->getArgOperand(0); Value *Arg1 = II->getArgOperand(1); - unsigned VWidth = cast(Arg0->getType())->getNumElements(); + unsigned VWidth = cast(Arg0->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) { replaceOperand(*II, 0, V); MadeChange = true; @@ -2946,7 +2946,7 @@ Value *Arg1 = II->getArgOperand(1); assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 && "Unexpected packed shift size"); - unsigned VWidth = cast(Arg1->getType())->getNumElements(); + unsigned VWidth = cast(Arg1->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) return replaceOperand(*II, 1, V); @@ -3013,7 +3013,8 @@ bool MadeChange = false; Value *Arg0 = II->getArgOperand(0); Value *Arg1 = II->getArgOperand(1); - unsigned VWidth = cast(Arg0->getType())->getNumElements(); + unsigned VWidth = + cast(Arg0->getType())->getNumElements(); APInt UndefElts1(VWidth, 0); APInt DemandedElts1 = APInt::getSplat(VWidth, @@ -3053,8 +3054,8 @@ case Intrinsic::x86_sse4a_extrq: { Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth0 = cast(Op0->getType())->getNumElements(); - unsigned VWidth1 = cast(Op1->getType())->getNumElements(); + unsigned VWidth0 = cast(Op0->getType())->getNumElements(); + unsigned VWidth1 = cast(Op1->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 && VWidth1 == 16 && "Unexpected operand sizes"); @@ -3092,7 +3093,7 @@ // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining // bits of the lower 64-bits. The upper 64-bits are undefined. Value *Op0 = II->getArgOperand(0); - unsigned VWidth = cast(Op0->getType())->getNumElements(); + unsigned VWidth = cast(Op0->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 && "Unexpected operand size"); @@ -3114,10 +3115,10 @@ case Intrinsic::x86_sse4a_insertq: { Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth = cast(Op0->getType())->getNumElements(); + unsigned VWidth = cast(Op0->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 && - cast(Op1->getType())->getNumElements() == 2 && + cast(Op1->getType())->getNumElements() == 2 && "Unexpected operand size"); // See if we're dealing with constant values. @@ -3148,8 +3149,8 @@ // undefined. Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth0 = cast(Op0->getType())->getNumElements(); - unsigned VWidth1 = cast(Op1->getType())->getNumElements(); + unsigned VWidth0 = cast(Op0->getType())->getNumElements(); + unsigned VWidth1 = cast(Op1->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 && VWidth1 == 2 && "Unexpected operand sizes"); @@ -3217,9 +3218,9 @@ "Not expecting mask and operands with different sizes"); unsigned NumMaskElts = - cast(Mask->getType())->getNumElements(); + cast(Mask->getType())->getNumElements(); unsigned NumOperandElts = - cast(II->getType())->getNumElements(); + cast(II->getType())->getNumElements(); if (NumMaskElts == NumOperandElts) return SelectInst::Create(BoolVec, Op1, Op0); @@ -3310,7 +3311,7 @@ // the permutation mask with respect to 31 and reverse the order of // V1 and V2. if (Constant *Mask = dyn_cast(II->getArgOperand(2))) { - assert(cast(Mask->getType())->getNumElements() == 16 && + assert(cast(Mask->getType())->getNumElements() == 16 && "Bad type for intrinsic!"); // Check that all of the elements are integer constants or undefs. @@ -3467,7 +3468,7 @@ if (auto *CI = dyn_cast(XorMask)) { if (CI->getValue().trunc(16).isAllOnesValue()) { auto TrueVector = Builder.CreateVectorSplat( - cast(II->getType())->getNumElements(), + cast(II->getType())->getNumElements(), Builder.getTrue()); return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -872,7 +872,7 @@ // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0 Value *VecOp; if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { - auto *VecOpTy = cast(VecOp->getType()); + auto *VecOpTy = cast(VecOp->getType()); unsigned VecNumElts = VecOpTy->getNumElements(); // A badly fit destination size would result in an invalid cast. @@ -1535,7 +1535,7 @@ Type *MinType = nullptr; - unsigned NumElts = CVVTy->getNumElements(); + unsigned NumElts = cast(CVVTy)->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { auto *CFP = dyn_cast_or_null(CV->getAggregateElement(i)); if (!CFP) @@ -1940,7 +1940,8 @@ if (auto *VTy = dyn_cast(Ty)) { // Handle vectors of pointers. // FIXME: what should happen for scalable vectors? - PtrTy = FixedVectorType::get(PtrTy, VTy->getNumElements()); + PtrTy = FixedVectorType::get(PtrTy, + cast(VTy)->getNumElements()); } Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy); @@ -1981,13 +1982,14 @@ return nullptr; SrcTy = - FixedVectorType::get(DestTy->getElementType(), SrcTy->getNumElements()); + FixedVectorType::get(DestTy->getElementType(), + cast(SrcTy)->getNumElements()); InVal = IC.Builder.CreateBitCast(InVal, SrcTy); } bool IsBigEndian = IC.getDataLayout().isBigEndian(); - unsigned SrcElts = SrcTy->getNumElements(); - unsigned DestElts = DestTy->getNumElements(); + unsigned SrcElts = cast(SrcTy)->getNumElements(); + unsigned DestElts = cast(DestTy)->getNumElements(); assert(SrcElts != DestElts && "Element counts should be different."); @@ -2166,7 +2168,7 @@ /// Into two insertelements that do "buildvector{%inc, %inc5}". static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, InstCombiner &IC) { - VectorType *DestVecTy = cast(CI.getType()); + auto *DestVecTy = cast(CI.getType()); Value *IntInput = CI.getOperand(0); SmallVector Elements(DestVecTy->getNumElements()); @@ -2206,7 +2208,8 @@ if (!VectorType::isValidElementType(DestType)) return nullptr; - unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements(); + unsigned NumElts = + cast(ExtElt->getVectorOperandType())->getNumElements(); auto *NewVecType = FixedVectorType::get(DestType, NumElts); auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), NewVecType, "bc"); @@ -2273,7 +2276,8 @@ if (auto *CondVTy = dyn_cast(CondTy)) { if (!DestTy->isVectorTy()) return nullptr; - if (cast(DestTy)->getNumElements() != CondVTy->getNumElements()) + if (cast(DestTy)->getNumElements() != + cast(CondVTy)->getNumElements()) return nullptr; } @@ -2608,11 +2612,12 @@ // a bitcast to a vector with the same # elts. Value *ShufOp0 = Shuf->getOperand(0); Value *ShufOp1 = Shuf->getOperand(1); - unsigned NumShufElts = Shuf->getType()->getNumElements(); + unsigned NumShufElts = + cast(Shuf->getType())->getNumElements(); unsigned NumSrcVecElts = - cast(ShufOp0->getType())->getNumElements(); + cast(ShufOp0->getType())->getNumElements(); if (Shuf->hasOneUse() && DestTy->isVectorTy() && - cast(DestTy)->getNumElements() == NumShufElts && + cast(DestTy)->getNumElements() == NumShufElts && NumShufElts == NumSrcVecElts) { BitCastInst *Tmp; // If either of the operands is a cast from CI.getType(), then @@ -2680,7 +2685,8 @@ if (VectorType *VT = dyn_cast(CI.getType())) { // Handle vectors of pointers. // FIXME: what should happen for scalable vectors? - MidTy = FixedVectorType::get(MidTy, VT->getNumElements()); + MidTy = FixedVectorType::get(MidTy, + cast(VT)->getNumElements()); } Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -897,7 +897,7 @@ // For vectors, we apply the same reasoning on a per-lane basis. auto *Base = GEPLHS->getPointerOperand(); if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) { - int NumElts = cast(GEPLHS->getType())->getNumElements(); + int NumElts = cast(GEPLHS->getType())->getNumElements(); Base = Builder.CreateVectorSplat(NumElts, Base); } return new ICmpInst(Cond, Base, @@ -1883,7 +1883,8 @@ if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); if (auto *AndVTy = dyn_cast(And->getType())) - NTy = FixedVectorType::get(NTy, AndVTy->getNumElements()); + NTy = FixedVectorType::get( + NTy, cast(AndVTy)->getNumElements()); Value *Trunc = Builder.CreateTrunc(X, NTy); auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE : CmpInst::ICMP_SLT; @@ -2173,7 +2174,8 @@ DL.isLegalInteger(TypeBits - Amt)) { Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); if (auto *ShVTy = dyn_cast(ShType)) - TruncTy = FixedVectorType::get(TruncTy, ShVTy->getNumElements()); + TruncTy = FixedVectorType::get( + TruncTy, cast(ShVTy)->getNumElements()); Constant *NewC = ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt)); return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC); @@ -2806,7 +2808,8 @@ Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits()); if (auto *XVTy = dyn_cast(XType)) - NewType = FixedVectorType::get(NewType, XVTy->getNumElements()); + NewType = FixedVectorType::get( + NewType, cast(XVTy)->getNumElements()); Value *NewBitcast = Builder.CreateBitCast(X, NewType); if (TrueIfSigned) return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast, @@ -3384,7 +3387,7 @@ Type *OpTy = M->getType(); auto *VecC = dyn_cast(M); if (OpTy->isVectorTy() && VecC && VecC->containsUndefElement()) { - auto *OpVTy = cast(OpTy); + auto *OpVTy = cast(OpTy); Constant *SafeReplacementConstant = nullptr; for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) { if (!isa(VecC->getAggregateElement(i))) { @@ -5210,7 +5213,7 @@ if (!ConstantIsOk(CI)) return llvm::None; } else if (auto *VTy = dyn_cast(Type)) { - unsigned NumElts = VTy->getNumElements(); + unsigned NumElts = cast(VTy)->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = C->getAggregateElement(i); if (!Elt) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -292,7 +292,7 @@ } } assert(SafeC && "Must have safe constant for binop"); - unsigned NumElts = InVTy->getNumElements(); + unsigned NumElts = cast(InVTy)->getNumElements(); SmallVector Out(NumElts); for (unsigned i = 0; i != NumElts; ++i) { Constant *C = In->getAggregateElement(i); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -1060,11 +1060,11 @@ return nullptr; } if (auto *AT = dyn_cast(VT)) { - if (AT->getNumElements() != UT->getNumElements()) + if (AT->getNumElements() != cast(UT)->getNumElements()) return nullptr; } else { auto *ST = cast(VT); - if (ST->getNumElements() != UT->getNumElements()) + if (ST->getNumElements() != cast(UT)->getNumElements()) return nullptr; for (const auto *EltT : ST->elements()) { if (EltT != UT->getElementType()) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -1498,7 +1498,7 @@ // If it's a constant vector, flip any negative values positive. if (isa(Op1) || isa(Op1)) { Constant *C = cast(Op1); - unsigned VWidth = cast(C->getType())->getNumElements(); + unsigned VWidth = cast(C->getType())->getNumElements(); bool hasNegative = false; bool hasMissing = false; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -304,8 +304,8 @@ if (auto *CondVTy = dyn_cast(CondTy)) { if (!FIOpndTy->isVectorTy()) return nullptr; - if (CondVTy->getNumElements() != - cast(FIOpndTy)->getNumElements()) + if (cast(CondVTy)->getNumElements() != + cast(FIOpndTy)->getNumElements()) return nullptr; // TODO: If the backend knew how to deal with casts better, we could @@ -1969,7 +1969,8 @@ if (!CondVal->getType()->isVectorTy() || !match(CondVal, m_Constant(CondC))) return nullptr; - unsigned NumElts = cast(CondVal->getType())->getNumElements(); + unsigned NumElts = + cast(CondVal->getType())->getNumElements(); SmallVector Mask; Mask.reserve(NumElts); for (unsigned i = 0; i != NumElts; ++i) { @@ -2015,7 +2016,7 @@ // select (extelt V, Index), T, F --> select (splat V, Index), T, F // Splatting the extracted condition reduces code (we could directly create a // splat shuffle of the source vector to eliminate the intermediate step). - unsigned NumElts = Ty->getNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); return IC.replaceOperand(Sel, 0, IC.Builder.CreateVectorSplat(NumElts, Cond)); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -391,7 +391,8 @@ if (VectorType *DstVTy = dyn_cast(I->getType())) { if (VectorType *SrcVTy = dyn_cast(I->getOperand(0)->getType())) { - if (DstVTy->getNumElements() != SrcVTy->getNumElements()) + if (cast(DstVTy)->getNumElements() != + cast(SrcVTy)->getNumElements()) // Don't touch a bitcast between vectors of different element counts. return nullptr; } else @@ -798,7 +799,7 @@ ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>. } else { auto Arg = II->getArgOperand(0); - auto ArgType = cast(Arg->getType()); + auto ArgType = cast(Arg->getType()); ArgWidth = ArgType->getNumElements(); } @@ -1036,7 +1037,7 @@ DemandedElts.getActiveBits() == 3) return nullptr; - auto *IIVTy = cast(II->getType()); + auto *IIVTy = cast(II->getType()); unsigned VWidth = IIVTy->getNumElements(); if (VWidth == 1) return nullptr; @@ -1349,8 +1350,8 @@ assert(Shuffle->getOperand(0)->getType() == Shuffle->getOperand(1)->getType() && "Expected shuffle operands to have same type"); - unsigned OpWidth = - cast(Shuffle->getOperand(0)->getType())->getNumElements(); + unsigned OpWidth = cast(Shuffle->getOperand(0)->getType()) + ->getNumElements(); // Handle trivial case of a splat. Only check the first element of LHS // operand. if (all_of(Shuffle->getShuffleMask(), [](int Elt) { return Elt == 0; }) && @@ -1451,7 +1452,8 @@ // this constant vector to single insertelement instruction. // shufflevector V, C, -> // insertelement V, C[ci], ci-n - if (OpWidth == Shuffle->getType()->getNumElements()) { + if (OpWidth == + cast(Shuffle->getType())->getNumElements()) { Value *Op = nullptr; Constant *Value = nullptr; unsigned Idx = -1u; @@ -1538,7 +1540,7 @@ // Vector->vector casts only. VectorType *VTy = dyn_cast(I->getOperand(0)->getType()); if (!VTy) break; - unsigned InVWidth = VTy->getNumElements(); + unsigned InVWidth = cast(VTy)->getNumElements(); APInt InputDemandedElts(InVWidth, 0); UndefElts2 = APInt(InVWidth, 0); unsigned Ratio; @@ -1763,7 +1765,7 @@ case Intrinsic::x86_avx512_packusdw_512: case Intrinsic::x86_avx512_packuswb_512: { auto *Ty0 = II->getArgOperand(0)->getType(); - unsigned InnerVWidth = cast(Ty0)->getNumElements(); + unsigned InnerVWidth = cast(Ty0)->getNumElements(); assert(VWidth == (InnerVWidth * 2) && "Unexpected input size"); unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -176,10 +176,11 @@ // If this extractelement is using a bitcast from a vector of the same number // of elements, see if we can find the source element from the source vector: // extelt (bitcast VecX), IndexC --> bitcast X[IndexC] - auto *SrcTy = cast(X->getType()); + auto *SrcTy = cast(X->getType()); Type *DestTy = Ext.getType(); unsigned NumSrcElts = SrcTy->getNumElements(); - unsigned NumElts = Ext.getVectorOperandType()->getNumElements(); + unsigned NumElts = + cast(Ext.getVectorOperandType())->getNumElements(); if (NumSrcElts == NumElts) if (Value *Elt = findScalarElement(X, ExtIndexC)) return new BitCastInst(Elt, DestTy); @@ -259,7 +260,7 @@ /// Find elements of V demanded by UserInstr. static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { - unsigned VWidth = cast(V->getType())->getNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); // Conservatively assume that all elements are needed. APInt UsedElts(APInt::getAllOnesValue(VWidth)); @@ -277,7 +278,7 @@ case Instruction::ShuffleVector: { ShuffleVectorInst *Shuffle = cast(UserInstr); unsigned MaskNumElts = - cast(UserInstr->getType())->getNumElements(); + cast(UserInstr->getType())->getNumElements(); UsedElts = APInt(VWidth, 0); for (unsigned i = 0; i < MaskNumElts; i++) { @@ -303,7 +304,7 @@ /// no user demands an element of V, then the corresponding bit /// remains unset in the returned value. static APInt findDemandedEltsByAllUsers(Value *V) { - unsigned VWidth = cast(V->getType())->getNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); APInt UnionUsedElts(VWidth, 0); for (const Use &U : V->uses()) { @@ -460,7 +461,7 @@ SmallVectorImpl &Mask) { assert(LHS->getType() == RHS->getType() && "Invalid CollectSingleShuffleElements"); - unsigned NumElts = cast(V->getType())->getNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { Mask.assign(NumElts, -1); @@ -502,7 +503,7 @@ unsigned ExtractedIdx = cast(EI->getOperand(1))->getZExtValue(); unsigned NumLHSElts = - cast(LHS->getType())->getNumElements(); + cast(LHS->getType())->getNumElements(); // This must be extracting from either LHS or RHS. if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) { @@ -532,8 +533,8 @@ static void replaceExtractElements(InsertElementInst *InsElt, ExtractElementInst *ExtElt, InstCombiner &IC) { - VectorType *InsVecType = InsElt->getType(); - VectorType *ExtVecType = ExtElt->getVectorOperandType(); + auto *InsVecType = cast(InsElt->getType()); + auto *ExtVecType = cast(ExtElt->getVectorOperandType()); unsigned NumInsElts = InsVecType->getNumElements(); unsigned NumExtElts = ExtVecType->getNumElements(); @@ -661,7 +662,7 @@ } unsigned NumLHSElts = - cast(RHS->getType())->getNumElements(); + cast(RHS->getType())->getNumElements(); Mask[InsertedIdx % NumElts] = NumLHSElts + ExtractedIdx; return std::make_pair(LR.first, RHS); } @@ -670,7 +671,8 @@ // We've gone as far as we can: anything on the other side of the // extractelement will already have been converted into a shuffle. unsigned NumLHSElts = - cast(EI->getOperand(0)->getType())->getNumElements(); + cast(EI->getOperand(0)->getType()) + ->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) Mask.push_back(i == InsertedIdx ? ExtractedIdx : NumLHSElts + i); return std::make_pair(EI->getOperand(0), PermittedRHS); @@ -854,7 +856,8 @@ // For example: // inselt (shuf (inselt undef, X, 0), undef, <0,undef,0,undef>), X, 1 // --> shuf (inselt undef, X, 0), undef, <0,0,0,undef> - unsigned NumMaskElts = Shuf->getType()->getNumElements(); + unsigned NumMaskElts = + cast(Shuf->getType())->getNumElements(); SmallVector NewMask(NumMaskElts); for (unsigned i = 0; i != NumMaskElts; ++i) NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i); @@ -892,7 +895,8 @@ // that same index value. // For example: // inselt (shuf X, IdMask), (extelt X, IdxC), IdxC --> shuf X, IdMask' - unsigned NumMaskElts = Shuf->getType()->getNumElements(); + unsigned NumMaskElts = + cast(Shuf->getType())->getNumElements(); SmallVector NewMask(NumMaskElts); ArrayRef OldMask = Shuf->getShuffleMask(); for (unsigned i = 0; i != NumMaskElts; ++i) { @@ -1222,7 +1226,7 @@ // longer vector ops, but that may result in more expensive codegen. Type *ITy = I->getType(); if (ITy->isVectorTy() && - Mask.size() > cast(ITy)->getNumElements()) + Mask.size() > cast(ITy)->getNumElements()) return false; for (Value *Operand : I->operands()) { if (!canEvaluateShuffled(Operand, Mask, Depth - 1)) @@ -1380,7 +1384,8 @@ case Instruction::GetElementPtr: { SmallVector NewOps; bool NeedsRebuild = - (Mask.size() != cast(I->getType())->getNumElements()); + (Mask.size() != + cast(I->getType())->getNumElements()); for (int i = 0, e = I->getNumOperands(); i != e; ++i) { Value *V; // Recursively call evaluateInDifferentElementOrder on vector arguments @@ -1435,7 +1440,7 @@ static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI, ArrayRef Mask) { unsigned LHSElems = - cast(SVI.getOperand(0)->getType())->getNumElements(); + cast(SVI.getOperand(0)->getType())->getNumElements(); unsigned MaskElems = Mask.size(); unsigned BegIdx = Mask.front(); unsigned EndIdx = Mask.back(); @@ -1567,7 +1572,8 @@ // For example: // shuf (inselt undef, X, 2), undef, <2,2,undef> // --> shuf (inselt undef, X, 0), undef, <0,0,undef> - unsigned NumMaskElts = Shuf.getType()->getNumElements(); + unsigned NumMaskElts = + cast(Shuf.getType())->getNumElements(); SmallVector NewMask(NumMaskElts, 0); for (unsigned i = 0; i != NumMaskElts; ++i) if (Mask[i] == UndefMaskElem) @@ -1585,7 +1591,7 @@ // Canonicalize to choose from operand 0 first unless operand 1 is undefined. // Commuting undef to operand 0 conflicts with another canonicalization. - unsigned NumElts = Shuf.getType()->getNumElements(); + unsigned NumElts = cast(Shuf.getType())->getNumElements(); if (!isa(Shuf.getOperand(1)) && Shuf.getMaskValue(0) >= (int)NumElts) { // TODO: Can we assert that both operands of a shuffle-select are not undef @@ -1719,8 +1725,8 @@ // and the source element type must be larger than the shuffle element type. Type *SrcType = X->getType(); if (!SrcType->isVectorTy() || !SrcType->isIntOrIntVectorTy() || - cast(SrcType)->getNumElements() != - cast(DestType)->getNumElements() || + cast(SrcType)->getNumElements() != + cast(DestType)->getNumElements() || SrcType->getScalarSizeInBits() % DestType->getScalarSizeInBits() != 0) return nullptr; @@ -1764,10 +1770,11 @@ // We need a narrow condition value. It must be extended with undef elements // and have the same number of elements as this shuffle. - unsigned NarrowNumElts = Shuf.getType()->getNumElements(); + unsigned NarrowNumElts = + cast(Shuf.getType())->getNumElements(); Value *NarrowCond; if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Undef()))) || - cast(NarrowCond->getType())->getNumElements() != + cast(NarrowCond->getType())->getNumElements() != NarrowNumElts || !cast(Cond)->isIdentityWithPadding()) return nullptr; @@ -1807,7 +1814,7 @@ // new shuffle mask. Otherwise, copy the original mask element. Example: // shuf (shuf X, Y, ), undef, <0, undef, 2, 3> --> // shuf X, Y, - unsigned NumElts = Shuf.getType()->getNumElements(); + unsigned NumElts = cast(Shuf.getType())->getNumElements(); SmallVector NewMask(NumElts); assert(NumElts < Mask.size() && "Identity with extract must have less elements than its inputs"); @@ -1832,7 +1839,7 @@ // TODO: This restriction could be removed if the insert has only one use // (because the transform would require a new length-changing shuffle). int NumElts = Mask.size(); - if (NumElts != (int)(cast(V0->getType())->getNumElements())) + if (NumElts != (int)(cast(V0->getType())->getNumElements())) return nullptr; // This is a specialization of a fold in SimplifyDemandedVectorElts. We may @@ -1927,9 +1934,10 @@ Value *X = Shuffle0->getOperand(0); Value *Y = Shuffle1->getOperand(0); if (X->getType() != Y->getType() || - !isPowerOf2_32(Shuf.getType()->getNumElements()) || - !isPowerOf2_32(Shuffle0->getType()->getNumElements()) || - !isPowerOf2_32(cast(X->getType())->getNumElements()) || + !isPowerOf2_32(cast(Shuf.getType())->getNumElements()) || + !isPowerOf2_32( + cast(Shuffle0->getType())->getNumElements()) || + !isPowerOf2_32(cast(X->getType())->getNumElements()) || isa(X) || isa(Y)) return nullptr; assert(isa(Shuffle0->getOperand(1)) && @@ -1940,8 +1948,8 @@ // operands directly by adjusting the shuffle mask to account for the narrower // types: // shuf (widen X), (widen Y), Mask --> shuf X, Y, Mask' - int NarrowElts = cast(X->getType())->getNumElements(); - int WideElts = Shuffle0->getType()->getNumElements(); + int NarrowElts = cast(X->getType())->getNumElements(); + int WideElts = cast(Shuffle0->getType())->getNumElements(); assert(WideElts > NarrowElts && "Unexpected types for identity with padding"); ArrayRef Mask = Shuf.getShuffleMask(); @@ -1983,8 +1991,8 @@ return replaceInstUsesWith(SVI, V); // shuffle x, x, mask --> shuffle x, undef, mask' - unsigned VWidth = SVI.getType()->getNumElements(); - unsigned LHSWidth = cast(LHS->getType())->getNumElements(); + unsigned VWidth = cast(SVI.getType())->getNumElements(); + unsigned LHSWidth = cast(LHS->getType())->getNumElements(); ArrayRef Mask = SVI.getShuffleMask(); Type *Int32Ty = Type::getInt32Ty(SVI.getContext()); @@ -1998,7 +2006,7 @@ if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_Undef()) && X->getType()->isVectorTy() && VWidth == LHSWidth) { // Try to create a scaled mask constant. - auto *XType = cast(X->getType()); + auto *XType = cast(X->getType()); unsigned XNumElts = XType->getNumElements(); SmallVector ScaledMask; if (XNumElts >= VWidth) { @@ -2106,7 +2114,7 @@ if (isShuffleExtractingFromLHS(SVI, Mask)) { Value *V = LHS; unsigned MaskElems = Mask.size(); - VectorType *SrcTy = cast(V->getType()); + auto *SrcTy = cast(V->getType()); unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedSize(); unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType()); assert(SrcElemBitWidth && "vector elements must have a bitwidth"); @@ -2224,11 +2232,11 @@ if (LHSShuffle) { LHSOp0 = LHSShuffle->getOperand(0); LHSOp1 = LHSShuffle->getOperand(1); - LHSOp0Width = cast(LHSOp0->getType())->getNumElements(); + LHSOp0Width = cast(LHSOp0->getType())->getNumElements(); } if (RHSShuffle) { RHSOp0 = RHSShuffle->getOperand(0); - RHSOp0Width = cast(RHSOp0->getType())->getNumElements(); + RHSOp0Width = cast(RHSOp0->getType())->getNumElements(); } Value* newLHS = LHS; Value* newRHS = RHS; diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -913,7 +913,8 @@ return nullptr; // If vectors, verify that they have the same number of elements. - if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements()) + if (SrcTy && cast(SrcTy)->getNumElements() != + cast(DestTy)->getNumElements()) return nullptr; } @@ -2313,7 +2314,7 @@ // gep (bitcast [c x ty]* X to *), Y, Z --> gep X, Y, Z auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy, const DataLayout &DL) { - auto *VecVTy = cast(VecTy); + auto *VecVTy = cast(VecTy); return ArrTy->getArrayElementType() == VecVTy->getElementType() && ArrTy->getArrayNumElements() == VecVTy->getNumElements() && DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy);