diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp @@ -438,7 +438,7 @@ Type *const Ty = I.getType(); const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty); - Type *const VecTy = VectorType::get(B.getInt32Ty(), 2); + auto *const VecTy = FixedVectorType::get(B.getInt32Ty(), 2); // This is the value in the atomic operation we need to combine in order to // reduce the number of atomic operations. diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -598,7 +598,7 @@ if (Size <= 8) PtrElemTy = Type::getIntNTy(Ctx, Size * 8); else - PtrElemTy = VectorType::get(Type::getInt64Ty(Ctx), Size / 8); + PtrElemTy = FixedVectorType::get(Type::getInt64Ty(Ctx), Size / 8); unsigned PtrArgLoc = CI->getNumArgOperands() - 3; auto PtrArg = CI->getArgOperand(PtrArgLoc); unsigned PtrArgAS = PtrArg->getType()->getPointerAddressSpace(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp @@ -902,7 +902,7 @@ return nullptr; } if (P.VectorSize > 1) - T = VectorType::get(T, P.VectorSize); + T = FixedVectorType::get(T, P.VectorSize); if (P.PtrKind != AMDGPULibFunc::BYVALUE) T = useAddrSpace ? T->getPointerTo((P.PtrKind & AMDGPULibFunc::ADDR_SPACE) - 1) diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp @@ -167,7 +167,7 @@ } if (IsV3 && Size >= 32) { - V4Ty = VectorType::get(VT->getElementType(), 4); + V4Ty = FixedVectorType::get(VT->getElementType(), 4); // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads AdjustedArgTy = V4Ty; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp @@ -516,7 +516,7 @@ break; } if (EleCount > 1) { - IType = dyn_cast(VectorType::get(IType, EleCount)); + IType = FixedVectorType::get(IType, EleCount); } Arg = new BitCastInst(Arg, IType, "PrintArgVect", Brnch); WhatToStore.push_back(Arg); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -334,12 +334,12 @@ SrcAddrSpace == AMDGPUAS::REGION_ADDRESS || DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS || DestAddrSpace == AMDGPUAS::REGION_ADDRESS) { - return VectorType::get(Type::getInt32Ty(Context), 2); + return FixedVectorType::get(Type::getInt32Ty(Context), 2); } // Global memory works best with 16-byte accesses. Private memory will also // hit this, although they'll be decomposed. - return VectorType::get(Type::getInt32Ty(Context), 4); + return FixedVectorType::get(Type::getInt32Ty(Context), 4); } void GCNTTIImpl::getMemcpyLoopResidualLoweringType(