diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -861,7 +861,7 @@ } else { // If this operand is a scalable type, bail out early. // TODO: handle scalable vectors - if (isa(TargetType)) + if (TargetType->isScalableVectorType()) return TTI::TCC_Basic; int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize(); diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1252,7 +1252,7 @@ case Intrinsic::experimental_vector_extract: { // FIXME: Handle case where a scalable vector is extracted from a scalable // vector - if (isa(RetTy)) + if (RetTy->isScalableVectorType()) return BaseT::getIntrinsicInstrCost(ICA, CostKind); unsigned Index = cast(Args[1])->getZExtValue(); return thisT()->getShuffleCost(TTI::SK_ExtractSubvector, @@ -1262,7 +1262,7 @@ case Intrinsic::experimental_vector_insert: { // FIXME: Handle case where a scalable vector is inserted into a scalable // vector - if (isa(Args[1]->getType())) + if (Args[1]->getType()->isScalableVectorType()) return BaseT::getIntrinsicInstrCost(ICA, CostKind); unsigned Index = cast(Args[2])->getZExtValue(); return thisT()->getShuffleCost( @@ -1296,7 +1296,7 @@ } case Intrinsic::fshl: case Intrinsic::fshr: { - if (isa(RetTy)) + if (RetTy->isScalableVectorType()) return BaseT::getIntrinsicInstrCost(ICA, CostKind); const Value *X = Args[0]; const Value *Y = Args[1]; @@ -1339,7 +1339,7 @@ } } // TODO: Handle the remaining intrinsic with scalable vector type - if (isa(RetTy)) + if (RetTy->isScalableVectorType()) return BaseT::getIntrinsicInstrCost(ICA, CostKind); // Assume that we need to scalarize this intrinsic. diff --git a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h --- a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h +++ b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h @@ -80,7 +80,7 @@ NumElements = ATy->getNumElements(); } else if (auto *VTy = dyn_cast(Ty)) { CurTy = VTy->getElementType(); - if (isa(VTy)) + if (VTy->isScalableVectorType()) NumElements = Unbounded; else NumElements = cast(VTy)->getNumElements(); diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -2261,7 +2261,7 @@ assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); // Not possible to express a shuffle mask for a scalable vector for this // case. - if (isa(Mask->getType())) + if (Mask->getType()->isScalableVectorType()) return false; SmallVector MaskAsInts; getShuffleMask(Mask, MaskAsInts); @@ -2272,7 +2272,7 @@ bool isExtractSubvectorMask(int &Index) const { // Not possible to express a shuffle mask for a scalable vector for this // case. - if (isa(getType())) + if (getType()->isScalableVectorType()) return false; int NumSrcElts = diff --git a/llvm/include/llvm/IR/MatrixBuilder.h b/llvm/include/llvm/IR/MatrixBuilder.h --- a/llvm/include/llvm/IR/MatrixBuilder.h +++ b/llvm/include/llvm/IR/MatrixBuilder.h @@ -39,13 +39,13 @@ assert((LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy()) && "One of the operands must be a matrix (embedded in a vector)"); if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) { - assert(!isa(LHS->getType()) && + assert(!LHS->getType()->isScalableVectorType() && "LHS Assumed to be fixed width"); RHS = B.CreateVectorSplat( cast(LHS->getType())->getElementCount(), RHS, "scalar.splat"); } else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) { - assert(!isa(RHS->getType()) && + assert(!RHS->getType()->isScalableVectorType() && "RHS Assumed to be fixed width"); LHS = B.CreateVectorSplat( cast(RHS->getType())->getElementCount(), LHS, @@ -161,13 +161,13 @@ Value *CreateAdd(Value *LHS, Value *RHS) { assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy()); if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) { - assert(!isa(LHS->getType()) && + assert(!LHS->getType()->isScalableVectorType() && "LHS Assumed to be fixed width"); RHS = B.CreateVectorSplat( cast(LHS->getType())->getElementCount(), RHS, "scalar.splat"); } else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) { - assert(!isa(RHS->getType()) && + assert(!RHS->getType()->isScalableVectorType() && "RHS Assumed to be fixed width"); LHS = B.CreateVectorSplat( cast(RHS->getType())->getElementCount(), LHS, @@ -186,13 +186,13 @@ Value *CreateSub(Value *LHS, Value *RHS) { assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy()); if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) { - assert(!isa(LHS->getType()) && + assert(!LHS->getType()->isScalableVectorType() && "LHS Assumed to be fixed width"); RHS = B.CreateVectorSplat( cast(LHS->getType())->getElementCount(), RHS, "scalar.splat"); } else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) { - assert(!isa(RHS->getType()) && + assert(!RHS->getType()->isScalableVectorType() && "RHS Assumed to be fixed width"); LHS = B.CreateVectorSplat( cast(RHS->getType())->getElementCount(), LHS, diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -2389,7 +2389,7 @@ if (m_PtrToInt(m_OffsetGep(m_Zero(), m_SpecificInt(1))).match(V)) { Type *PtrTy = cast(V)->getOperand(0)->getType(); auto *DerefTy = PtrTy->getPointerElementType(); - if (isa(DerefTy) && + if (DerefTy->isScalableVectorType() && DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8) return true; } diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h --- a/llvm/include/llvm/IR/Type.h +++ b/llvm/include/llvm/IR/Type.h @@ -236,6 +236,13 @@ return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID; } + /// True if this is an instance of ScalableVectorType. + bool isScalableVectorType() const { + return getTypeID() == ScalableVectorTyID; + } + /// True if this is an instance of scalable types. + bool isScalableType() const { return isScalableVectorType(); } + /// Return true if this type could be converted with a lossless BitCast to /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the /// same size only where no re-interpretation of the bits is done. diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -492,7 +492,7 @@ // Don't attempt to analyze GEPs if index scale is not a compile-time // constant. - if (isa(GEPOp->getSourceElementType())) { + if (GEPOp->getSourceElementType()->isScalableType()) { Decomposed.Base = V; Decomposed.HasCompileTimeConstantScale = false; return Decomposed; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -542,7 +542,7 @@ Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, const DataLayout &DL) { // Bail out early. Not expect to load from scalable global variable. - if (isa(LoadTy)) + if (LoadTy->isScalableType()) return nullptr; auto *PTy = cast(C->getType()); @@ -871,7 +871,7 @@ Type *SrcElemTy = GEP->getSourceElementType(); Type *ResElemTy = GEP->getResultElementType(); Type *ResTy = GEP->getType(); - if (!SrcElemTy->isSized() || isa(SrcElemTy)) + if (!SrcElemTy->isSized() || SrcElemTy->isScalableType()) return nullptr; if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, @@ -2867,7 +2867,7 @@ const CallBase *Call) { // Do not iterate on scalable vector. The number of elements is unknown at // compile-time. - if (isa(VTy)) + if (VTy->isScalableType()) return nullptr; auto *FVTy = cast(VTy); diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -4340,7 +4340,7 @@ if (Q.isUndefValue(Ops[0])) return UndefValue::get(GEPTy); - bool IsScalableVec = isa(SrcTy); + bool IsScalableVec = SrcTy->isScalableType(); if (Ops.size() == 2) { // getelementptr P, 0 -> P. diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -212,7 +212,7 @@ const TargetLibraryInfo *TLI) { // For unsized types or scalable vectors we don't know exactly how many bytes // are dereferenced, so bail out. - if (!Ty->isSized() || isa(Ty)) + if (!Ty->isSized() || Ty->isScalableType()) return false; // When dereferenceability information is provided by a dereferenceable diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -664,7 +664,7 @@ if (!I.getAllocatedType()->isSized()) return unknown(); - if (isa(I.getAllocatedType())) + if (I.getAllocatedType()->isScalableType()) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -184,7 +184,7 @@ APInt &DemandedLHS, APInt &DemandedRHS) { // The length of scalable vectors is unknown at compile time, thus we // cannot check their values - if (isa(Shuf->getType())) + if (Shuf->getType()->isScalableVectorType()) return false; int NumElts = @@ -224,7 +224,7 @@ const Query &Q) { // FIXME: We currently have no way to represent the DemandedElts of a scalable // vector - if (isa(V->getType())) { + if (V->getType()->isScalableType()) { Known.resetAll(); return; } @@ -398,7 +398,7 @@ const Query &Q) { // FIXME: We currently have no way to represent the DemandedElts of a scalable // vector - if (isa(V->getType())) + if (V->getType()->isScalableType()) return 1; auto *FVTy = dyn_cast(V->getType()); @@ -1729,7 +1729,7 @@ const Value *Vec = I->getOperand(0); const Value *Idx = I->getOperand(1); auto *CIdx = dyn_cast(Idx); - if (isa(Vec->getType())) { + if (Vec->getType()->isScalableVectorType()) { // FIXME: there's probably *something* we can do with scalable vectors Known.resetAll(); break; @@ -1811,7 +1811,7 @@ /// for all of the demanded elements in the vector specified by DemandedElts. void computeKnownBits(const Value *V, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, const Query &Q) { - if (!DemandedElts || isa(V->getType())) { + if (!DemandedElts || V->getType()->isScalableType()) { // No demanded elts or V is a scalable vector, better to assume we don't // know anything. Known.resetAll(); @@ -2217,7 +2217,7 @@ const Query &Q) { // FIXME: We currently have no way to represent the DemandedElts of a scalable // vector - if (isa(V->getType())) + if (V->getType()->isScalableType()) return false; if (auto *C = dyn_cast(V)) { @@ -2505,7 +2505,7 @@ bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { // FIXME: We currently have no way to represent the DemandedElts of a scalable // vector - if (isa(V->getType())) + if (V->getType()->isScalableType()) return false; auto *FVTy = dyn_cast(V->getType()); @@ -2703,7 +2703,7 @@ // FIXME: We currently have no way to represent the DemandedElts of a scalable // vector - if (isa(Ty)) + if (Ty->isScalableType()) return 1; #ifndef NDEBUG @@ -4759,7 +4759,7 @@ unsigned NumElts = FVTy->getNumElements(); for (unsigned i = 0; i < NumElts; ++i) ShiftAmounts.push_back(C->getAggregateElement(i)); - } else if (isa(C->getType())) + } else if (C->getType()->isScalableVectorType()) return true; // Can't tell, just return true to be safe else ShiftAmounts.push_back(C); diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -877,7 +877,7 @@ return false; if (ConstMask->isNullValue() || isa(ConstMask)) return true; - if (isa(ConstMask->getType())) + if (ConstMask->getType()->isScalableVectorType()) return false; for (unsigned I = 0, @@ -904,7 +904,7 @@ return false; if (ConstMask->isAllOnesValue() || isa(ConstMask)) return true; - if (isa(ConstMask->getType())) + if (ConstMask->getType()->isScalableVectorType()) return false; for (unsigned I = 0, diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -989,7 +989,7 @@ Code = bitc::TYPE_CODE_VECTOR; TypeVals.push_back(VT->getElementCount().getKnownMinValue()); TypeVals.push_back(VE.getTypeID(VT->getElementType())); - if (isa(VT)) + if (VT->isScalableVectorType()) TypeVals.push_back(true); break; } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -7401,7 +7401,7 @@ // whereas scalable vectors would have to be shifted by // <2log(vscale) + number of bits> in order to store the // low/high parts. Bailing out for now. - if (isa(StoreType)) + if (StoreType->isScalableType()) return false; if (!DL.typeSizeEqualsStoreSize(StoreType) || diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -290,7 +290,7 @@ bool InterleavedAccess::lowerInterleavedLoad( LoadInst *LI, SmallVector &DeadInsts) { - if (!LI->isSimple() || isa(LI->getType())) + if (!LI->isSimple() || LI->getType()->isScalableType()) return false; // Check if all users of this load are shufflevectors. If we encounter any @@ -494,7 +494,7 @@ return false; auto *SVI = dyn_cast(SI->getValueOperand()); - if (!SVI || !SVI->hasOneUse() || isa(SVI->getType())) + if (!SVI || !SVI->hasOneUse() || SVI->getType()->isScalableType()) return false; // Check if the shufflevector is RE-interleave shuffle. diff --git a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp --- a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp +++ b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp @@ -1268,7 +1268,7 @@ for (Instruction &I : BB) { if (auto SVI = dyn_cast(&I)) { // We don't support scalable vectors in this pass. - if (isa(SVI->getType())) + if (SVI->getType()->isScalableVectorType()) continue; Candidates.emplace_back(cast(SVI->getType())); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1538,7 +1538,7 @@ else Op = DAG.getConstant(0, getCurSDLoc(), EltVT); - if (isa(VecTy)) + if (VecTy->isScalableVectorType()) return NodeMap[V] = DAG.getSplatVector(VT, getCurSDLoc(), Op); else { SmallVector Ops; diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp --- a/llvm/lib/CodeGen/ValueTypes.cpp +++ b/llvm/lib/CodeGen/ValueTypes.cpp @@ -114,7 +114,7 @@ } bool EVT::isExtendedScalableVector() const { - return isExtendedVector() && isa(LLVMTy); + return isExtendedVector() && LLVMTy->isScalableVectorType(); } EVT EVT::getExtendedVectorElementType() const { diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -49,7 +49,7 @@ // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. - if (isa(DstTy)) + if (DstTy->isScalableVectorType()) return nullptr; // If this cast changes element count then we can't handle it here: @@ -931,7 +931,7 @@ // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. - if (isa(Val->getType())) + if (Val->getType()->isScalableVectorType()) return nullptr; auto *ValTy = cast(Val->getType()); @@ -961,8 +961,7 @@ ArrayRef Mask) { auto *V1VTy = cast(V1->getType()); unsigned MaskNumElts = Mask.size(); - auto MaskEltCount = - ElementCount::get(MaskNumElts, isa(V1VTy)); + auto MaskEltCount = ElementCount::get(MaskNumElts, V1VTy->isScalableType()); Type *EltTy = V1VTy->getElementType(); // Undefined shuffle mask -> undefined value. @@ -981,7 +980,7 @@ } // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. - if (isa(V1VTy)) + if (V1VTy->isScalableType()) return nullptr; unsigned SrcNumElts = V1VTy->getElementCount().getKnownMinValue(); @@ -1058,7 +1057,7 @@ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length // vectors are always evaluated per element. - bool IsScalableVector = isa(C->getType()); + bool IsScalableVector = C->getType()->isScalableType(); bool HasScalarUndefOrScalableVectorUndef = (!C->getType()->isVectorTy() || IsScalableVector) && isa(C); @@ -1139,7 +1138,7 @@ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length // vectors are always evaluated per element. - bool IsScalableVector = isa(C1->getType()); + bool IsScalableVector = C1->getType()->isScalableType(); bool HasScalarUndefOrScalableVectorUndef = (!C1->getType()->isVectorTy() || IsScalableVector) && (isa(C1) || isa(C2)); @@ -2107,7 +2106,7 @@ // Do not iterate on scalable vector. The number of elements is unknown at // compile-time. - if (isa(C1VTy)) + if (C1VTy->isScalableType()) return nullptr; // If we can constant fold the comparison of each element, constant fold @@ -2398,8 +2397,8 @@ // its arguments is a vector. for (unsigned i = 0, e = Idxs.size(); i != e; ++i) { if (auto *VT = dyn_cast(Idxs[i]->getType())) { - assert((!isa(GEPTy) || isa(GEPTy) == - isa(VT)) && + assert((!isa(GEPTy) || + GEPTy->isScalableType() == VT->isScalableType()) && "Mismatched GEPTy vector types"); GEPTy = VectorType::get(OrigGEPTy, VT->getElementCount()); break; diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -312,7 +312,7 @@ return true; if (isa(C)) return false; - if (isa(C->getType())) + if (C->getType()->isScalableVectorType()) return false; for (unsigned i = 0, e = cast(VTy)->getNumElements(); @@ -423,7 +423,7 @@ return Elt < CC->getNumOperands() ? CC->getOperand(Elt) : nullptr; // FIXME: getNumElements() will fail for non-fixed vector types. - if (isa(getType())) + if (getType()->isScalableVectorType()) return nullptr; if (const auto *CAZ = dyn_cast(this)) @@ -2555,7 +2555,7 @@ unsigned NElts = Mask.size(); auto V1VTy = cast(V1->getType()); Type *EltTy = V1VTy->getElementType(); - bool TypeIsScalable = isa(V1VTy); + bool TypeIsScalable = V1VTy->isScalableVectorType(); Type *ShufTy = VectorType::get(EltTy, NElts, TypeIsScalable); if (OnlyIfReducedTy == ShufTy) diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h --- a/llvm/lib/IR/ConstantsContext.h +++ b/llvm/lib/IR/ConstantsContext.h @@ -184,10 +184,10 @@ class ShuffleVectorConstantExpr final : public ConstantExpr { public: ShuffleVectorConstantExpr(Constant *C1, Constant *C2, ArrayRef Mask) - : ConstantExpr(VectorType::get( - cast(C1->getType())->getElementType(), - Mask.size(), isa(C1->getType())), - Instruction::ShuffleVector, &Op<0>(), 2) { + : ConstantExpr( + VectorType::get(cast(C1->getType())->getElementType(), + Mask.size(), C1->getType()->isScalableVectorType()), + Instruction::ShuffleVector, &Op<0>(), 2) { assert(ShuffleVectorInst::isValidOperands(C1, C2, Mask) && "Invalid shuffle vector instruction operands!"); Op<0>() = C1; diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1924,7 +1924,7 @@ Instruction *InsertBefore) : Instruction( VectorType::get(cast(V1->getType())->getElementType(), - Mask.size(), isa(V1->getType())), + Mask.size(), V1->getType()->isScalableVectorType()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), InsertBefore) { assert(isValidOperands(V1, V2, Mask) && @@ -1939,7 +1939,7 @@ const Twine &Name, BasicBlock *InsertAtEnd) : Instruction( VectorType::get(cast(V1->getType())->getElementType(), - Mask.size(), isa(V1->getType())), + Mask.size(), V1->getType()->isScalableVectorType()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), InsertAtEnd) { assert(isValidOperands(V1, V2, Mask) && @@ -1982,7 +1982,7 @@ if (Elem != UndefMaskElem && Elem >= V1Size * 2) return false; - if (isa(V1->getType())) + if (V1->getType()->isScalableVectorType()) if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask)) return false; @@ -1999,7 +1999,7 @@ // input vectors auto *MaskTy = dyn_cast(Mask->getType()); if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) || - isa(MaskTy) != isa(V1->getType())) + MaskTy->isScalableVectorType() != V1->getType()->isScalableVectorType()) return false; // Check to see if Mask is valid. @@ -2072,7 +2072,7 @@ Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef Mask, Type *ResultTy) { Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext()); - if (isa(ResultTy)) { + if (ResultTy->isScalableVectorType()) { assert(is_splat(Mask) && "Unexpected shuffle"); Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true); if (Mask[0] == 0) @@ -2237,7 +2237,7 @@ // FIXME: Not currently possible to express a shuffle mask for a scalable // vector for this case. - if (isa(getType())) + if (getType()->isScalableVectorType()) return false; int NumOpElts = cast(Op<0>()->getType())->getNumElements(); @@ -2264,7 +2264,7 @@ // FIXME: Not currently possible to express a shuffle mask for a scalable // vector for this case. - if (isa(getType())) + if (getType()->isScalableVectorType()) return false; int NumOpElts = cast(Op<0>()->getType())->getNumElements(); @@ -2283,7 +2283,7 @@ // FIXME: Not currently possible to express a shuffle mask for a scalable // vector for this case. - if (isa(getType())) + if (getType()->isScalableVectorType()) return false; int NumOpElts = cast(Op<0>()->getType())->getNumElements(); diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp --- a/llvm/lib/IR/Operator.cpp +++ b/llvm/lib/IR/Operator.cpp @@ -98,7 +98,7 @@ for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) { // Scalable vectors are multiplied by a runtime constant. bool ScalableType = false; - if (isa(GTI.getIndexedType())) + if (GTI.getIndexedType()->isScalableType()) ScalableType = true; Value *V = GTI.getOperand(); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -323,7 +323,7 @@ return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment); } - if (isa(ConstMask->getType())) + if (ConstMask->getType()->isScalableVectorType()) return nullptr; // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts @@ -362,7 +362,7 @@ if (ConstMask->isNullValue()) return eraseInstFromFunction(II); - if (isa(ConstMask->getType())) + if (ConstMask->getType()->isScalableVectorType()) return nullptr; // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -102,8 +102,8 @@ // introduction of 'vscale' into the calculations. It seems better to // bail out for this case too until we've done a proper cost-benefit // analysis. - bool AllocIsScalable = isa(AllocElTy); - bool CastIsScalable = isa(CastElTy); + bool AllocIsScalable = AllocElTy->isScalableType(); + bool CastIsScalable = CastElTy->isScalableType(); if (AllocIsScalable != CastIsScalable) return nullptr; Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -847,7 +847,7 @@ Type *SourceElementType = GEPI->getSourceElementType(); // Size information about scalable vectors is not available, so we cannot // deduce whether indexing at n is undefined behaviour or not. Bail out. - if (isa(SourceElementType)) + if (SourceElementType->isScalableType()) return false; Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -130,7 +130,7 @@ if (Depth == MaxAnalysisRecursionDepth) return nullptr; - if (isa(VTy)) + if (VTy->isScalableType()) return nullptr; Instruction *I = dyn_cast(V); @@ -1044,7 +1044,7 @@ bool AllowMultipleUsers) { // Cannot analyze scalable type. The number of vector elements is not a // compile-time constant. - if (isa(V->getType())) + if (V->getType()->isScalableVectorType()) return nullptr; unsigned VWidth = cast(V->getType())->getNumElements(); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -1026,7 +1026,7 @@ static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf) { // Can not analyze scalable type, the number of elements is not a compile-time // constant. - if (isa(Shuf.getOperand(0)->getType())) + if (Shuf.getOperand(0)->getType()->isScalableVectorType()) return false; int MaskSize = Shuf.getShuffleMask().size(); @@ -1060,7 +1060,7 @@ VectorType *VecTy = InsElt.getType(); // Can not handle scalable type, the number of elements is not a compile-time // constant. - if (isa(VecTy)) + if (VecTy->isScalableVectorType()) return nullptr; unsigned NumElements = cast(VecTy)->getNumElements(); @@ -1132,7 +1132,7 @@ // Bail out early if shuffle is scalable type. The number of elements in // shuffle mask is unknown at compile-time. - if (isa(Shuf->getType())) + if (Shuf->getType()->isScalableVectorType()) return nullptr; // Check for a constant insertion index. @@ -1170,7 +1170,7 @@ // Bail out early if shuffle is scalable type. The number of elements in // shuffle mask is unknown at compile-time. - if (isa(Shuf->getType())) + if (Shuf->getType()->isScalableVectorType()) return nullptr; // Check for a constant insertion index. @@ -1299,7 +1299,7 @@ // a single shuffle op. // Can not handle scalable type, the number of elements needed to create // shuffle mask is not a compile-time constant. - if (isa(InsElt.getType())) + if (InsElt.getType()->isScalableVectorType()) return nullptr; unsigned NumElts = cast(InsElt.getType())->getNumElements(); @@ -2284,7 +2284,7 @@ return replaceInstUsesWith(SVI, V); // Bail out for scalable vectors - if (isa(LHS->getType())) + if (LHS->getType()->isScalableVectorType()) return nullptr; // shuffle x, x, mask --> shuffle x, undef, mask' diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1853,7 +1853,7 @@ SmallVector Ops(GEP.operands()); Type *GEPType = GEP.getType(); Type *GEPEltType = GEP.getSourceElementType(); - bool IsGEPSrcEleScalable = isa(GEPEltType); + bool IsGEPSrcEleScalable = GEPEltType->isScalableType(); if (Value *V = SimplifyGEPInst(GEPEltType, Ops, SQ.getWithInstruction(&GEP))) return replaceInstUsesWith(GEP, V); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -784,7 +784,7 @@ LI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) return PI.setAborted(&LI); - if (isa(LI.getType())) + if (LI.getType()->isScalableType()) return PI.setAborted(&LI); uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize(); @@ -802,7 +802,7 @@ SI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) return PI.setAborted(&SI); - if (isa(ValOp->getType())) + if (ValOp->getType()->isScalableType()) return PI.setAborted(&SI); uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize(); @@ -1549,7 +1549,7 @@ Type *ElementTy = Ty->getElementType(); if (!ElementTy->isSized()) return nullptr; // We can't GEP through an unsized element. - if (isa(ElementTy)) + if (ElementTy->isScalableType()) return nullptr; APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy).getFixedSize()); @@ -4607,7 +4607,7 @@ // Skip alloca forms that this analysis can't handle. auto *AT = AI.getAllocatedType(); - if (AI.isArrayAllocation() || !AT->isSized() || isa(AT) || + if (AI.isArrayAllocation() || !AT->isSized() || AT->isScalableType() || DL.getTypeAllocSize(AT).getFixedSize() == 0) return false; @@ -4730,7 +4730,7 @@ for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); I != E; ++I) { if (AllocaInst *AI = dyn_cast(I)) { - if (isa(AI->getAllocatedType())) { + if (AI->getAllocatedType()->isScalableType()) { if (isAllocaPromotable(AI)) PromotableAllocas.push_back(AI); } else { diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp --- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp @@ -932,9 +932,9 @@ IntrinsicInst *II = dyn_cast(CI); if (II) { // The scalarization code below does not work for scalable vectors. - if (isa(II->getType()) || + if (II->getType()->isScalableType() || any_of(II->arg_operands(), - [](Value *V) { return isa(V->getType()); })) + [](Value *V) { return V->getType()->isScalableType(); })) return false; switch (II->getIntrinsicID()) { diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -10,7 +10,7 @@ namespace VNCoercion { static bool isFirstClassAggregateOrScalableType(Type *Ty) { - return Ty->isStructTy() || Ty->isArrayTy() || isa(Ty); + return Ty->isStructTy() || Ty->isArrayTy() || Ty->isScalableType(); } /// Return true if coerceAvailableValueToLoadType will succeed. diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -7557,7 +7557,7 @@ for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { // Skip instructions with scalable type. The num of elements is unknown at // compile-time for scalable type. - if (isa(it->getType())) + if (it->getType()->isScalableType()) continue; // Skip instructions marked for the deletion.