diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -17757,7 +17757,7 @@ "Unmatched number of shufflevectors and indices"); VectorType *VecTy = Shuffles[0]->getType(); - Type *EltTy = VecTy->getVectorElementType(); + Type *EltTy = VecTy->getElementType(); const DataLayout &DL = LI->getModule()->getDataLayout(); @@ -17772,8 +17772,7 @@ // A pointer vector can not be the return type of the ldN intrinsics. Need to // load integer vectors first and then convert to pointer vectors. if (EltTy->isPointerTy()) - VecTy = - VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); + VecTy = VectorType::get(DL.getIntPtrType(EltTy), VecTy->getNumElements()); IRBuilder<> Builder(LI); @@ -17783,15 +17782,15 @@ if (NumLoads > 1) { // If we're going to generate more than one load, reset the sub-vector type // to something legal. - VecTy = VectorType::get(VecTy->getVectorElementType(), - VecTy->getVectorNumElements() / NumLoads); + VecTy = VectorType::get(VecTy->getElementType(), + VecTy->getNumElements() / NumLoads); // We will compute the pointer operand of each load from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( - BaseAddr, VecTy->getVectorElementType()->getPointerTo( - LI->getPointerAddressSpace())); + BaseAddr, + VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); } assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!"); @@ -17816,8 +17815,8 @@ "expected interleave factor of 2 or 4 for MVE"); Intrinsic::ID LoadInts = Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; - Type *VecEltTy = VecTy->getVectorElementType()->getPointerTo( - LI->getPointerAddressSpace()); + Type *VecEltTy = + VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()); Type *Tys[] = {VecTy, VecEltTy}; Function *VldnFunc = Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys); @@ -17837,9 +17836,8 @@ // If we're generating more than one load, compute the base address of // subsequent loads as an offset from the previous. if (LoadCount > 0) - BaseAddr = - Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr, - VecTy->getVectorNumElements() * Factor); + BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, + VecTy->getNumElements() * Factor); CallInst *VldN = createLoadIntrinsic(BaseAddr); @@ -17854,8 +17852,8 @@ // Convert the integer vector to pointer vector if the element is pointer. if (EltTy->isPointerTy()) SubVec = Builder.CreateIntToPtr( - SubVec, VectorType::get(SV->getType()->getVectorElementType(), - VecTy->getVectorNumElements())); + SubVec, VectorType::get(SV->getType()->getElementType(), + VecTy->getNumElements())); SubVecs[SV].push_back(SubVec); } @@ -17908,11 +17906,10 @@ "Invalid interleave factor"); VectorType *VecTy = SVI->getType(); - assert(VecTy->getVectorNumElements() % Factor == 0 && - "Invalid interleaved store"); + assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); - unsigned LaneLen = VecTy->getVectorNumElements() / Factor; - Type *EltTy = VecTy->getVectorElementType(); + unsigned LaneLen = VecTy->getNumElements() / Factor; + Type *EltTy = VecTy->getElementType(); VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); const DataLayout &DL = SI->getModule()->getDataLayout(); @@ -17935,8 +17932,8 @@ Type *IntTy = DL.getIntPtrType(EltTy); // Convert to the corresponding integer vector. - Type *IntVecTy = - VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); + Type *IntVecTy = VectorType::get( + IntTy, cast(Op0->getType())->getNumElements()); Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); @@ -17950,14 +17947,14 @@ // If we're going to generate more than one store, reset the lane length // and sub-vector type to something legal. LaneLen /= NumStores; - SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); + SubVecTy = VectorType::get(SubVecTy->getElementType(), LaneLen); // We will compute the pointer operand of each store from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( - BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( - SI->getPointerAddressSpace())); + BaseAddr, + SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); } assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!"); @@ -17987,7 +17984,7 @@ "expected interleave factor of 2 or 4 for MVE"); Intrinsic::ID StoreInts = Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; - Type *EltPtrTy = SubVecTy->getVectorElementType()->getPointerTo( + Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo( SI->getPointerAddressSpace()); Type *Tys[] = {EltPtrTy, SubVecTy}; Function *VstNFunc = @@ -18009,7 +18006,7 @@ // If we generating more than one store, we compute the base address of // subsequent stores as an offset from the previous. if (StoreCount > 0) - BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(), + BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), BaseAddr, LaneLen * Factor); SmallVector Shuffles; diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -434,7 +434,7 @@ Opcode == Instruction::ExtractElement)) { // Cross-class copies are expensive on many microarchitectures, // so assume they are expensive by default. - if (ValTy->getVectorElementType()->isIntegerTy()) + if (cast(ValTy)->getElementType()->isIntegerTy()) return 3; // Even if it's not a cross class copy, this likely leads to mixing @@ -452,7 +452,7 @@ // result anyway. return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), ST->getMVEVectorCostFactor()) * - ValTy->getVectorNumElements() / 2; + cast(ValTy)->getNumElements() / 2; } return BaseT::getVectorInstrCost(Opcode, ValTy, Index); @@ -794,8 +794,8 @@ return LT.first * BaseCost; // Else this is expand, assume that we need to scalarize this op. - if (Ty->isVectorTy()) { - unsigned Num = Ty->getVectorNumElements(); + if (auto *VTy = dyn_cast(Ty)) { + unsigned Num = VTy->getNumElements(); unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType()); // Return the cost of multiple scalar invocation plus the cost of // inserting and extracting the values. @@ -812,7 +812,7 @@ if (ST->hasNEON() && Src->isVectorTy() && (Alignment && *Alignment != Align(16)) && - Src->getVectorElementType()->isDoubleTy()) { + cast(Src)->getElementType()->isDoubleTy()) { // Unaligned loads/stores are extremely inefficient. // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. return LT.first * 4; @@ -835,7 +835,7 @@ if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits && !UseMaskForCond && !UseMaskForGaps) { - unsigned NumElts = VecTy->getVectorNumElements(); + unsigned NumElts = cast(VecTy)->getNumElements(); auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); // vldN/vstN only support legal vector types of size 64 or 128 in bits. @@ -1403,7 +1403,7 @@ case Instruction::ICmp: case Instruction::Add: return ScalarBits < 64 && - (ScalarBits * Ty->getVectorNumElements()) % 128 == 0; + (ScalarBits * cast(Ty)->getNumElements()) % 128 == 0; default: llvm_unreachable("Unhandled reduction opcode"); } diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp --- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp +++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp @@ -157,8 +157,8 @@ } Offsets = GEP->getOperand(1); // Paranoid check whether the number of parallel lanes is the same - assert(Ty->getVectorNumElements() == - Offsets->getType()->getVectorNumElements()); + assert(cast(Ty)->getNumElements() == + cast(Offsets->getType())->getNumElements()); // Only offsets can be integrated into an arm gather, any smaller // type would have to be sign extended by the gep - and arm gathers can only // zero extend. Additionally, the offsets do have to originate from a zext of @@ -168,7 +168,7 @@ return nullptr; if (ZExtInst *ZextOffs = dyn_cast(Offsets)) Offsets = ZextOffs->getOperand(0); - else if (!(Offsets->getType()->getVectorNumElements() == 4 && + else if (!(cast(Offsets->getType())->getNumElements() == 4 && Offsets->getType()->getScalarSizeInBits() == 32)) return nullptr; @@ -191,9 +191,9 @@ void MVEGatherScatterLowering::lookThroughBitcast(Value *&Ptr) { // Look through bitcast instruction if #elements is the same if (auto *BitCast = dyn_cast(Ptr)) { - Type *BCTy = BitCast->getType(); - Type *BCSrcTy = BitCast->getOperand(0)->getType(); - if (BCTy->getVectorNumElements() == BCSrcTy->getVectorNumElements()) { + auto *BCTy = cast(BitCast->getType()); + auto *BCSrcTy = cast(BitCast->getOperand(0)->getType()); + if (BCTy->getNumElements() == BCSrcTy->getNumElements()) { LLVM_DEBUG( dbgs() << "masked gathers/scatters: looking through bitcast\n"); Ptr = BitCast->getOperand(0); @@ -223,14 +223,14 @@ // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) // Attempt to turn the masked gather in I into a MVE intrinsic // Potentially optimising the addressing modes as we do so. - Type *Ty = I->getType(); + auto *Ty = cast(I->getType()); Value *Ptr = I->getArgOperand(0); unsigned Alignment = cast(I->getArgOperand(1))->getZExtValue(); Value *Mask = I->getArgOperand(2); Value *PassThru = I->getArgOperand(3); - if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(), - Ty->getScalarSizeInBits(), Alignment)) + if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(), + Alignment)) return nullptr; lookThroughBitcast(Ptr); assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); @@ -267,9 +267,9 @@ Value *Ptr, IRBuilder<> &Builder) { using namespace PatternMatch; - Type *Ty = I->getType(); + auto *Ty = cast(I->getType()); LLVM_DEBUG(dbgs() << "masked gathers: loading from vector of pointers\n"); - if (Ty->getVectorNumElements() != 4 || Ty->getScalarSizeInBits() != 32) + if (Ty->getNumElements() != 4 || Ty->getScalarSizeInBits() != 32) // Can't build an intrinsic for this return nullptr; Value *Mask = I->getArgOperand(2); @@ -357,11 +357,12 @@ Value *Input = I->getArgOperand(0); Value *Ptr = I->getArgOperand(1); unsigned Alignment = cast(I->getArgOperand(2))->getZExtValue(); - Type *Ty = Input->getType(); + auto *Ty = cast(Input->getType()); - if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(), - Ty->getScalarSizeInBits(), Alignment)) + if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(), + Alignment)) return nullptr; + lookThroughBitcast(Ptr); assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); @@ -386,9 +387,9 @@ using namespace PatternMatch; Value *Input = I->getArgOperand(0); Value *Mask = I->getArgOperand(3); - Type *Ty = Input->getType(); + auto *Ty = cast(Input->getType()); // Only QR variants allow truncating - if (!(Ty->getVectorNumElements() == 4 && Ty->getScalarSizeInBits() == 32)) { + if (!(Ty->getNumElements() == 4 && Ty->getScalarSizeInBits() == 32)) { // Can't build an intrinsic for this return nullptr; }