diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h --- a/llvm/include/llvm/IR/Type.h +++ b/llvm/include/llvm/IR/Type.h @@ -236,6 +236,9 @@ return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID; } + /// True if this is an instance of scalable types. + bool isScalableType() const { return getTypeID() == ScalableVectorTyID; } + /// Return true if this type could be converted with a lossless BitCast to /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the /// same size only where no re-interpretation of the bits is done. diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -492,7 +492,7 @@ // Don't attempt to analyze GEPs if index scale is not a compile-time // constant. - if (isa(GEPOp->getSourceElementType())) { + if (GEPOp->getSourceElementType()->isScalableType()) { Decomposed.Base = V; Decomposed.HasCompileTimeConstantScale = false; return Decomposed; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -542,7 +542,7 @@ Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, const DataLayout &DL) { // Bail out early. Not expect to load from scalable global variable. - if (isa(LoadTy)) + if (LoadTy->isScalableType()) return nullptr; auto *PTy = cast(C->getType()); @@ -871,7 +871,7 @@ Type *SrcElemTy = GEP->getSourceElementType(); Type *ResElemTy = GEP->getResultElementType(); Type *ResTy = GEP->getType(); - if (!SrcElemTy->isSized() || isa(SrcElemTy)) + if (!SrcElemTy->isSized() || SrcElemTy->isScalableType()) return nullptr; if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -4340,7 +4340,7 @@ if (Q.isUndefValue(Ops[0])) return UndefValue::get(GEPTy); - bool IsScalableVec = isa(SrcTy); + bool IsScalable = SrcTy->isScalableType(); if (Ops.size() == 2) { // getelementptr P, 0 -> P. @@ -4348,7 +4348,7 @@ return Ops[0]; Type *Ty = SrcTy; - if (!IsScalableVec && Ty->isSized()) { + if (!IsScalable && Ty->isSized()) { Value *P; uint64_t C; uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); @@ -4398,7 +4398,7 @@ } } - if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 && + if (!IsScalable && Q.DL.getTypeAllocSize(LastType) == 1 && all_of(Ops.slice(1).drop_back(1), [](Value *Idx) { return match(Idx, m_Zero()); })) { unsigned IdxWidth = diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -210,9 +210,9 @@ const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { - // For unsized types or scalable vectors we don't know exactly how many bytes + // For unsized types or scalable types we don't know exactly how many bytes // are dereferenced, so bail out. - if (!Ty->isSized() || isa(Ty)) + if (!Ty->isSized() || Ty->isScalableType()) return false; // When dereferenceability information is provided by a dereferenceable diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -664,7 +664,7 @@ if (!I.getAllocatedType()->isSized()) return unknown(); - if (isa(I.getAllocatedType())) + if (I.getAllocatedType()->isScalableType()) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -7401,7 +7401,7 @@ // whereas scalable vectors would have to be shifted by // <2log(vscale) + number of bits> in order to store the // low/high parts. Bailing out for now. - if (isa(StoreType)) + if (StoreType->isScalableType()) return false; if (!DL.typeSizeEqualsStoreSize(StoreType) || diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -102,8 +102,8 @@ // introduction of 'vscale' into the calculations. It seems better to // bail out for this case too until we've done a proper cost-benefit // analysis. - bool AllocIsScalable = isa(AllocElTy); - bool CastIsScalable = isa(CastElTy); + bool AllocIsScalable = AllocElTy->isScalableType(); + bool CastIsScalable = CastElTy->isScalableType(); if (AllocIsScalable != CastIsScalable) return nullptr; Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -847,7 +847,7 @@ Type *SourceElementType = GEPI->getSourceElementType(); // Size information about scalable vectors is not available, so we cannot // deduce whether indexing at n is undefined behaviour or not. Bail out. - if (isa(SourceElementType)) + if (SourceElementType->isScalableType()) return false; Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -130,7 +130,7 @@ if (Depth == MaxAnalysisRecursionDepth) return nullptr; - if (isa(VTy)) + if (VTy->isScalableType()) return nullptr; Instruction *I = dyn_cast(V); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1853,7 +1853,7 @@ SmallVector Ops(GEP.operands()); Type *GEPType = GEP.getType(); Type *GEPEltType = GEP.getSourceElementType(); - bool IsGEPSrcEleScalable = isa(GEPEltType); + bool IsGEPSrcEleScalable = GEPEltType->isScalableType(); if (Value *V = SimplifyGEPInst(GEPEltType, Ops, SQ.getWithInstruction(&GEP))) return replaceInstUsesWith(GEP, V); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -784,7 +784,7 @@ LI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) return PI.setAborted(&LI); - if (isa(LI.getType())) + if (LI.getType()->isScalableType()) return PI.setAborted(&LI); uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize(); @@ -802,7 +802,7 @@ SI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) return PI.setAborted(&SI); - if (isa(ValOp->getType())) + if (ValOp->getType()->isScalableType()) return PI.setAborted(&SI); uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize(); @@ -1549,7 +1549,7 @@ Type *ElementTy = Ty->getElementType(); if (!ElementTy->isSized()) return nullptr; // We can't GEP through an unsized element. - if (isa(ElementTy)) + if (ElementTy->isScalableType()) return nullptr; APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy).getFixedSize()); @@ -4607,7 +4607,7 @@ // Skip alloca forms that this analysis can't handle. auto *AT = AI.getAllocatedType(); - if (AI.isArrayAllocation() || !AT->isSized() || isa(AT) || + if (AI.isArrayAllocation() || !AT->isSized() || AT->isScalableType() || DL.getTypeAllocSize(AT).getFixedSize() == 0) return false; @@ -4730,7 +4730,7 @@ for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); I != E; ++I) { if (AllocaInst *AI = dyn_cast(I)) { - if (isa(AI->getAllocatedType())) { + if (AI->getAllocatedType()->isScalableType()) { if (isAllocaPromotable(AI)) PromotableAllocas.push_back(AI); } else { diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp --- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp @@ -932,9 +932,9 @@ IntrinsicInst *II = dyn_cast(CI); if (II) { // The scalarization code below does not work for scalable vectors. - if (isa(II->getType()) || + if (II->getType()->isScalableType() || any_of(II->arg_operands(), - [](Value *V) { return isa(V->getType()); })) + [](Value *V) { return V->getType()->isScalableType(); })) return false; switch (II->getIntrinsicID()) { diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -7557,7 +7557,7 @@ for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { // Skip instructions with scalable type. The num of elements is unknown at // compile-time for scalable type. - if (isa(it->getType())) + if (it->getType()->isScalableType()) continue; // Skip instructions marked for the deletion.