diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -533,8 +533,7 @@ // Don't attempt to analyze GEPs if index scale is not a compile-time // constant. - Type *SrcEleTy = GEPOp->getSourceElementType(); - if (SrcEleTy->isVectorTy() && cast(SrcEleTy)->isScalable()) { + if (isa(GEPOp->getSourceElementType())) { Decomposed.Base = V; Decomposed.HasCompileTimeConstantScale = false; return false; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -509,7 +509,7 @@ Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, const DataLayout &DL) { // Bail out early. Not expect to load from scalable global variable. - if (LoadTy->isVectorTy() && cast(LoadTy)->isScalable()) + if (isa(LoadTy)) return nullptr; auto *PTy = cast(C->getType()); @@ -836,8 +836,7 @@ Type *SrcElemTy = GEP->getSourceElementType(); Type *ResElemTy = GEP->getResultElementType(); Type *ResTy = GEP->getType(); - if (!SrcElemTy->isSized() || - (SrcElemTy->isVectorTy() && cast(SrcElemTy)->isScalable())) + if (!SrcElemTy->isSized() || isa(SrcElemTy)) return nullptr; if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, @@ -2572,7 +2571,7 @@ // Do not iterate on scalable vector. The number of elements is unknown at // compile-time. - if (VTy->isScalable()) + if (isa(VTy)) return nullptr; if (IntrinsicID == Intrinsic::masked_load) { diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -4151,8 +4151,7 @@ if (isa(Ops[0])) return UndefValue::get(GEPTy); - bool IsScalableVec = - isa(SrcTy) && cast(SrcTy)->isScalable(); + bool IsScalableVec = isa(SrcTy); if (Ops.size() == 2) { // getelementptr P, 0 -> P. @@ -4294,8 +4293,8 @@ // For fixed-length vector, fold into undef if index is out of bounds. if (auto *CI = dyn_cast(Idx)) { - if (!cast(Vec->getType())->isScalable() && - CI->uge(cast(Vec->getType())->getNumElements())) + if (isa(Vec->getType()) && + CI->uge(cast(Vec->getType())->getNumElements())) return UndefValue::get(Vec->getType()); } @@ -4368,7 +4367,8 @@ // find a previously computed scalar that was inserted into the vector. if (auto *IdxC = dyn_cast(Idx)) { // For fixed-length vector, fold into undef if index is out of bounds. - if (!VecVTy->isScalable() && IdxC->getValue().uge(VecVTy->getNumElements())) + if (isa(VecVTy) && + IdxC->getValue().uge(VecVTy->getNumElements())) return UndefValue::get(VecVTy->getElementType()); if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) return Elt; diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -148,8 +148,7 @@ const DominatorTree *DT) { // For unsized types or scalable vectors we don't know exactly how many bytes // are dereferenced, so bail out. - if (!Ty->isSized() || - (Ty->isVectorTy() && cast(Ty)->isScalable())) + if (!Ty->isSized() || isa(Ty)) return false; // When dereferenceability information is provided by a dereferenceable diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -650,8 +650,7 @@ if (!I.getAllocatedType()->isSized()) return unknown(); - if (I.getAllocatedType()->isVectorTy() && - cast(I.getAllocatedType())->isScalable()) + if (isa(I.getAllocatedType())) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3759,13 +3759,11 @@ // We can bypass creating a target-independent // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. - if (auto *VecTy = dyn_cast(AllocTy)) { - if (VecTy->isScalable()) { - Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); - Constant *One = ConstantInt::get(IntTy, 1); - Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); - return getSCEV(ConstantExpr::getPtrToInt(GEP, IntTy)); - } + if (isa(AllocTy)) { + Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); + Constant *One = ConstantInt::get(IntTy, 1); + Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); + return getSCEV(ConstantExpr::getPtrToInt(GEP, IntTy)); } return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); } diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -168,7 +168,7 @@ APInt &DemandedLHS, APInt &DemandedRHS) { // The length of scalable vectors is unknown at compile time, thus we // cannot check their values - if (Shuf->getType()->isScalable()) + if (isa(Shuf->getType())) return false; int NumElts = diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -263,10 +263,10 @@ assert(V->getType()->isVectorTy() && "Not looking at a vector?"); VectorType *VTy = cast(V->getType()); // For fixed-length vector, return undef for out of range access. - if (!VTy->isScalable()) { - unsigned Width = VTy->getNumElements(); + if (auto *FVTy = dyn_cast(VTy)) { + unsigned Width = FVTy->getNumElements(); if (EltNo >= Width) - return UndefValue::get(VTy->getElementType()); + return UndefValue::get(FVTy->getElementType()); } if (Constant *C = dyn_cast(V))