diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -255,7 +255,7 @@ if (T->isIntegerTy()) return B.getInt32Ty(); - return VectorType::get(B.getInt32Ty(), cast(T)->getNumElements()); + return FixedVectorType::get(B.getInt32Ty(), cast(T)); } bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const { @@ -477,7 +477,7 @@ static void extractValues(IRBuilder<> &Builder, SmallVectorImpl &Values, Value *V) { - VectorType *VT = dyn_cast(V->getType()); + auto *VT = dyn_cast(V->getType()); if (!VT) { Values.push_back(V); return; @@ -777,7 +777,7 @@ Value *Den = FDiv.getOperand(1); Value *NewFDiv = nullptr; - if (VectorType *VT = dyn_cast(FDiv.getType())) { + if (auto *VT = dyn_cast(FDiv.getType())) { NewFDiv = UndefValue::get(VT); // FIXME: Doesn't do the right thing for cases where the vector is partially @@ -1233,7 +1233,7 @@ IRBuilder<> Builder(&I); Builder.SetCurrentDebugLocation(I.getDebugLoc()); - if (VectorType *VT = dyn_cast(Ty)) { + if (auto *VT = dyn_cast(Ty)) { NewDiv = UndefValue::get(VT); for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp @@ -186,7 +186,7 @@ case Type::DoubleTyID: return "double"; case Type::FixedVectorTyID: { - auto VecTy = cast(Ty); + auto VecTy = cast(Ty); auto ElTy = VecTy->getElementType(); auto NumElements = VecTy->getNumElements(); return (Twine(getTypeName(ElTy, Signed)) + Twine(NumElements)).str(); @@ -633,7 +633,7 @@ case Type::DoubleTyID: return "double"; case Type::FixedVectorTyID: { - auto VecTy = cast(Ty); + auto VecTy = cast(Ty); auto ElTy = VecTy->getElementType(); auto NumElements = VecTy->getNumElements(); return (Twine(getTypeName(ElTy, Signed)) + Twine(NumElements)).str(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -1126,8 +1126,8 @@ Type* rTy = opr0->getType(); Type* nTyS = eltType->isDoubleTy() ? B.getInt64Ty() : B.getInt32Ty(); Type *nTy = nTyS; - if (const VectorType *vTy = dyn_cast(rTy)) - nTy = VectorType::get(nTyS, vTy->getNumElements()); + if (const auto *vTy = dyn_cast(rTy)) + nTy = FixedVectorType::get(nTyS, vTy); unsigned size = nTy->getScalarSizeInBits(); opr_n = CI->getArgOperand(1); if (opr_n->getType()->isIntegerTy()) diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp @@ -135,7 +135,7 @@ continue; } - VectorType *VT = dyn_cast(ArgTy); + auto *VT = dyn_cast(ArgTy); bool IsV3 = VT && VT->getNumElements() == 3; bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp @@ -218,10 +218,10 @@ // if (ArgSize % DWORD_ALIGN != 0) { llvm::Type *ResType = llvm::Type::getInt32Ty(Ctx); - VectorType *LLVMVecType = llvm::dyn_cast(ArgType); + auto *LLVMVecType = llvm::dyn_cast(ArgType); int NumElem = LLVMVecType ? LLVMVecType->getNumElements() : 1; if (LLVMVecType && NumElem > 1) - ResType = llvm::VectorType::get(ResType, NumElem); + ResType = llvm::FixedVectorType::get(ResType, NumElem); Builder.SetInsertPoint(CI); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); if (OpConvSpecifiers[ArgCount - 1] == 'x' || @@ -479,7 +479,7 @@ } } else if (isa(ArgType)) { Type *IType = NULL; - uint32_t EleCount = cast(ArgType)->getNumElements(); + uint32_t EleCount = cast(ArgType)->getNumElements(); uint32_t EleSize = ArgType->getScalarSizeInBits(); uint32_t TotalSize = EleCount * EleSize; if (EleCount == 3) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -297,9 +297,9 @@ return CI; } -static VectorType *arrayTypeToVecType(ArrayType *ArrayTy) { - return VectorType::get(ArrayTy->getElementType(), - ArrayTy->getNumElements()); +static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) { + return FixedVectorType::get(ArrayTy->getElementType(), + ArrayTy->getNumElements()); } static Value *stripBitcasts(Value *V) { @@ -390,7 +390,7 @@ } Type *AllocaTy = Alloca->getAllocatedType(); - VectorType *VectorTy = dyn_cast(AllocaTy); + auto *VectorTy = dyn_cast(AllocaTy); if (auto *ArrayTy = dyn_cast(AllocaTy)) { if (VectorType::isValidElementType(ArrayTy->getElementType()) && ArrayTy->getNumElements() > 0) diff --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp @@ -208,8 +208,8 @@ #ifndef NDEBUG bool AMDGPURewriteOutArguments::isVec3ToVec4Shuffle(Type *Ty0, Type* Ty1) const { - VectorType *VT0 = dyn_cast(Ty0); - VectorType *VT1 = dyn_cast(Ty1); + auto *VT0 = dyn_cast(Ty0); + auto *VT1 = dyn_cast(Ty1); if (!VT0 || !VT1) return false; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -909,7 +909,7 @@ unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *VT, int Index, VectorType *SubTp) { if (ST->hasVOP3PInsts()) { - if (VT->getNumElements() == 2 && + if (cast(VT)->getNumElements() == 2 && DL.getTypeSizeInBits(VT->getElementType()) == 16) { // With op_sel VOP3P instructions freely can access the low half or high // half of a register, so any swizzle is free. diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -938,9 +938,8 @@ static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) { assert(DMaskLanes != 0); - if (auto *VT = dyn_cast(Ty)) { - unsigned NumElts = std::min(DMaskLanes, - static_cast(VT->getNumElements())); + if (auto *VT = dyn_cast(Ty)) { + unsigned NumElts = std::min(DMaskLanes, VT->getNumElements()); return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(VT->getElementType()), NumElts);