diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9452,8 +9452,8 @@ }; auto extractHalf = [](Value *FullV, Value *HalfV) { - auto *FullVT = cast(FullV->getType()); - auto *HalfVT = cast(HalfV->getType()); + auto *FullVT = cast(FullV->getType()); + auto *HalfVT = cast(HalfV->getType()); return FullVT->getNumElements() == 2 * HalfVT->getNumElements(); }; @@ -9473,7 +9473,7 @@ // elements. int M1Start = -1; int M2Start = -1; - int NumElements = cast(Op1->getType())->getNumElements() * 2; + int NumElements = cast(Op1->getType())->getNumElements() * 2; if (!ShuffleVectorInst::isExtractSubvectorMask(M1, NumElements, M1Start) || !ShuffleVectorInst::isExtractSubvectorMask(M2, NumElements, M2Start) || M1Start != M2Start || (M1Start != 0 && M2Start != (NumElements / 2))) @@ -9605,7 +9605,7 @@ unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); // Ensure the number of vector elements is greater than 1. - if (VecTy->getNumElements() < 2) + if (cast(VecTy)->getNumElements() < 2) return false; // Ensure the element type is legal. @@ -9639,7 +9639,7 @@ const DataLayout &DL = LI->getModule()->getDataLayout(); - VectorType *VecTy = Shuffles[0]->getType(); + auto *VecTy = Shuffles[0]->getType(); // Skip if we do not have NEON and skip illegal vector types. We can // "legalize" wide vector types into multiple interleaved accesses as long as @@ -9654,7 +9654,8 @@ Type *EltTy = VecTy->getElementType(); if (EltTy->isPointerTy()) VecTy = - FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy->getNumElements()); + FixedVectorType::get(DL.getIntPtrType(EltTy), + cast(VecTy)->getNumElements()); IRBuilder<> Builder(LI); @@ -9664,8 +9665,9 @@ if (NumLoads > 1) { // If we're going to generate more than one load, reset the sub-vector type // to something legal. - VecTy = FixedVectorType::get(VecTy->getElementType(), - VecTy->getNumElements() / NumLoads); + VecTy = FixedVectorType::get( + VecTy->getElementType(), + cast(VecTy)->getNumElements() / NumLoads); // We will compute the pointer operand of each load from the original base // address using GEPs. Cast the base address to a pointer to the scalar @@ -9693,8 +9695,9 @@ // If we're generating more than one load, compute the base address of // subsequent loads as an offset from the previous. if (LoadCount > 0) - BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, - VecTy->getNumElements() * Factor); + BaseAddr = Builder.CreateConstGEP1_32( + VecTy->getElementType(), BaseAddr, + cast(VecTy)->getNumElements() * Factor); CallInst *LdN = Builder.CreateCall( LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN"); @@ -9709,8 +9712,9 @@ // Convert the integer vector to pointer vector if the element is pointer. if (EltTy->isPointerTy()) SubVec = Builder.CreateIntToPtr( - SubVec, FixedVectorType::get(SVI->getType()->getElementType(), - VecTy->getNumElements())); + SubVec, FixedVectorType::get( + SVI->getType()->getElementType(), + cast(VecTy)->getNumElements())); SubVecs[SVI].push_back(SubVec); } } @@ -9761,7 +9765,7 @@ assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor"); - VectorType *VecTy = SVI->getType(); + auto *VecTy = cast(SVI->getType()); assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); unsigned LaneLen = VecTy->getNumElements() / Factor; @@ -9786,7 +9790,8 @@ // vectors to integer vectors. if (EltTy->isPointerTy()) { Type *IntTy = DL.getIntPtrType(EltTy); - unsigned NumOpElts = cast(Op0->getType())->getNumElements(); + unsigned NumOpElts = + cast(Op0->getType())->getNumElements(); // Convert to the corresponding integer vector. auto *IntVecTy = FixedVectorType::get(IntTy, NumOpElts); diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp --- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp +++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp @@ -264,8 +264,9 @@ Type *EltTy = VecTy->getElementType(); if (EltTy->isPointerTy()) { uint32_t EltSize = DL->getTypeSizeInBits(EltTy); - auto *NewTy = FixedVectorType::get(IntegerType::get(Ctx, EltSize), - VecTy->getNumElements()); + auto *NewTy = FixedVectorType::get( + IntegerType::get(Ctx, EltSize), + cast(VecTy)->getNumElements()); V = IRB.CreatePointerCast(V, NewTy); } } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -188,7 +188,8 @@ // the element type fits into a register and the number of elements is a // power of 2 > 1. if (auto *DataTypeVTy = dyn_cast(DataType)) { - unsigned NumElements = DataTypeVTy->getNumElements(); + unsigned NumElements = + cast(DataTypeVTy)->getNumElements(); unsigned EltSize = DataTypeVTy->getElementType()->getScalarSizeInBits(); return NumElements > 1 && isPowerOf2_64(NumElements) && EltSize >= 8 && EltSize <= 128 && isPowerOf2_64(EltSize); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -212,7 +212,7 @@ // elements in type Ty determine the vector width. auto toVectorTy = [&](Type *ArgTy) { return FixedVectorType::get(ArgTy->getScalarType(), - cast(DstTy)->getNumElements()); + cast(DstTy)->getNumElements()); }; // Exit early if DstTy is not a vector type whose elements are at least @@ -714,8 +714,8 @@ // have to promote the elements to v.2. ProfitableNumElements = 8; - if (cast(Ty)->getNumElements() < ProfitableNumElements) { - unsigned NumVecElts = cast(Ty)->getNumElements(); + if (cast(Ty)->getNumElements() < ProfitableNumElements) { + unsigned NumVecElts = cast(Ty)->getNumElements(); unsigned NumVectorizableInstsToAmortize = NumVecElts * 2; // We generate 2 instructions per vector element. return NumVectorizableInstsToAmortize * NumVecElts * 2; @@ -734,7 +734,7 @@ bool UseMaskForCond, bool UseMaskForGaps) { assert(Factor >= 2 && "Invalid interleave factor"); - auto *VecVTy = cast(VecTy); + auto *VecVTy = cast(VecTy); if (!UseMaskForCond && !UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) { @@ -761,7 +761,8 @@ for (auto *I : Tys) { if (!I->isVectorTy()) continue; - if (I->getScalarSizeInBits() * cast(I)->getNumElements() == 128) + if (I->getScalarSizeInBits() * cast(I)->getNumElements() == + 128) Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); } @@ -964,9 +965,10 @@ case Instruction::Mul: return false; case Instruction::Add: - return ScalarBits * VTy->getNumElements() >= 128; + return ScalarBits * cast(VTy)->getNumElements() >= 128; case Instruction::ICmp: - return (ScalarBits < 64) && (ScalarBits * VTy->getNumElements() >= 128); + return (ScalarBits < 64) && + (ScalarBits * cast(VTy)->getNumElements() >= 128); case Instruction::FCmp: return Flags.NoNaN; default: