diff --git a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h --- a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h +++ b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h @@ -80,7 +80,7 @@ NumElements = ATy->getNumElements(); } else if (auto *VTy = dyn_cast(Ty)) { CurTy = VTy->getElementType(); - if (VTy->isScalable()) + if (isa(VTy)) NumElements = Unbounded; else NumElements = VTy->getNumElements(); diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -2173,8 +2173,8 @@ if (m_PtrToInt(m_OffsetGep(m_Zero(), m_SpecificInt(1))).match(V)) { Type *PtrTy = cast(V)->getOperand(0)->getType(); - auto *DerefTy = dyn_cast(PtrTy->getPointerElementType()); - if (DerefTy && DerefTy->isScalable() && + auto *DerefTy = PtrTy->getPointerElementType(); + if (isa(DerefTy) && DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8) return true; } diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -464,7 +464,7 @@ static void PrintShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef Mask) { Out << ", <"; - if (cast(Ty)->isScalable()) + if (isa(Ty)) Out << "vscale x "; Out << Mask.size() << " x i32> "; bool FirstElt = true; diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -49,7 +49,7 @@ // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. - if (DstTy->isScalable()) + if (isa(DstTy)) return nullptr; // If this cast changes element count then we can't handle it here: @@ -848,7 +848,7 @@ // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. VectorType *ValTy = cast(Val->getType()); - if (ValTy->isScalable()) + if (isa(ValTy)) return nullptr; unsigned NumElts = cast(Val->getType())->getNumElements(); @@ -876,7 +876,7 @@ ArrayRef Mask) { auto *V1VTy = cast(V1->getType()); unsigned MaskNumElts = Mask.size(); - ElementCount MaskEltCount = {MaskNumElts, V1VTy->isScalable()}; + ElementCount MaskEltCount = {MaskNumElts, isa(V1VTy)}; Type *EltTy = V1VTy->getElementType(); // Undefined shuffle mask -> undefined value. @@ -895,7 +895,7 @@ } // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. - if (V1VTy->isScalable()) + if (isa(V1VTy)) return nullptr; unsigned SrcNumElts = V1VTy->getNumElements(); @@ -972,8 +972,7 @@ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length // vectors are always evaluated per element. - bool IsScalableVector = isa(C->getType()) && - cast(C->getType())->isScalable(); + bool IsScalableVector = isa(C->getType()); bool HasScalarUndefOrScalableVectorUndef = (!C->getType()->isVectorTy() || IsScalableVector) && isa(C); @@ -1046,8 +1045,7 @@ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length // vectors are always evaluated per element. - bool IsScalableVector = isa(C1->getType()) && - cast(C1->getType())->isScalable(); + bool IsScalableVector = isa(C1->getType()); bool HasScalarUndefOrScalableVectorUndef = (!C1->getType()->isVectorTy() || IsScalableVector) && (isa(C1) || isa(C2)); @@ -2000,7 +1998,7 @@ // Do not iterate on scalable vector. The number of elements is unknown at // compile-time. - if (C1VTy->isScalable()) + if (isa(C1VTy)) return nullptr; // Fast path for splatted constants. diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -241,7 +241,7 @@ bool Constant::isNormalFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().isNormal(); - auto *VTy = dyn_cast(getType()); + auto *VTy = dyn_cast(getType()); if (!VTy) return false; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { @@ -255,7 +255,7 @@ bool Constant::hasExactInverseFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().getExactInverse(nullptr); - auto *VTy = dyn_cast(getType()); + auto *VTy = dyn_cast(getType()); if (!VTy) return false; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { @@ -269,7 +269,7 @@ bool Constant::isNaN() const { if (auto *CFP = dyn_cast(this)) return CFP->isNaN(); - auto *VTy = dyn_cast(getType()); + auto *VTy = dyn_cast(getType()); if (!VTy) return false; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { @@ -644,7 +644,7 @@ } // Don't know how to deal with this constant. - auto *VTy = dyn_cast(Ty); + auto *VTy = dyn_cast(Ty); if (!VTy) return C; @@ -2287,7 +2287,7 @@ unsigned NElts = Mask.size(); auto V1VTy = cast(V1->getType()); Type *EltTy = V1VTy->getElementType(); - bool TypeIsScalable = V1VTy->isScalable(); + bool TypeIsScalable = isa(V1VTy); Type *ShufTy = VectorType::get(EltTy, NElts, TypeIsScalable); if (OnlyIfReducedTy == ShufTy) diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h --- a/llvm/lib/IR/ConstantsContext.h +++ b/llvm/lib/IR/ConstantsContext.h @@ -148,11 +148,10 @@ class ShuffleVectorConstantExpr : public ConstantExpr { public: ShuffleVectorConstantExpr(Constant *C1, Constant *C2, ArrayRef Mask) - : ConstantExpr( - VectorType::get(cast(C1->getType())->getElementType(), - Mask.size(), - cast(C1->getType())->isScalable()), - Instruction::ShuffleVector, &Op<0>(), 2) { + : ConstantExpr(VectorType::get( + cast(C1->getType())->getElementType(), + Mask.size(), isa(C1->getType())), + Instruction::ShuffleVector, &Op<0>(), 2) { assert(ShuffleVectorInst::isValidOperands(C1, C2, Mask) && "Invalid shuffle vector instruction operands!"); Op<0>() = C1; diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -1354,10 +1354,9 @@ return true; } case IITDescriptor::ScalableVecArgument: { - VectorType *VTy = dyn_cast(Ty); - if (!VTy || !VTy->isScalable()) + if (!isa(Ty)) return true; - return matchIntrinsicType(VTy, Infos, ArgTys, DeferredChecks, + return matchIntrinsicType(Ty, Infos, ArgTys, DeferredChecks, IsDeferredCheck); } case IITDescriptor::VecOfBitcastsToInt: { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1879,8 +1879,7 @@ Instruction *InsertBefore) : Instruction( VectorType::get(cast(V1->getType())->getElementType(), - Mask.size(), - cast(V1->getType())->isScalable()), + Mask.size(), isa(V1->getType())), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), InsertBefore) { assert(isValidOperands(V1, V2, Mask) && @@ -1895,8 +1894,7 @@ const Twine &Name, BasicBlock *InsertAtEnd) : Instruction( VectorType::get(cast(V1->getType())->getElementType(), - Mask.size(), - cast(V1->getType())->isScalable()), + Mask.size(), isa(V1->getType())), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), InsertAtEnd) { assert(isValidOperands(V1, V2, Mask) && @@ -1938,7 +1936,7 @@ if (Elem != UndefMaskElem && Elem >= V1Size * 2) return false; - if (cast(V1->getType())->isScalable()) + if (isa(V1->getType())) if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask)) return false; @@ -1951,10 +1949,11 @@ if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType()) return false; - // Mask must be vector of i32. + // Mask must be vector of i32, and must be the same kind of vector as the + // input vectors auto *MaskTy = dyn_cast(Mask->getType()); if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) || - MaskTy->isScalable() != cast(V1->getType())->isScalable()) + isa(MaskTy) != isa(V1->getType())) return false; // Check to see if Mask is valid. @@ -2012,7 +2011,7 @@ Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef Mask, Type *ResultTy) { Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext()); - if (cast(ResultTy)->isScalable()) { + if (isa(ResultTy)) { assert(is_splat(Mask) && "Unexpected shuffle"); Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true); if (Mask[0] == 0) diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp --- a/llvm/lib/IR/Operator.cpp +++ b/llvm/lib/IR/Operator.cpp @@ -46,9 +46,8 @@ continue; // Scalable vectors have are multiplied by a runtime constant. - if (auto *VecTy = dyn_cast(GTI.getIndexedType())) - if (VecTy->isScalable()) - return false; + if (isa(GTI.getIndexedType())) + return false; // Handle a struct index, which adds its field offset to the pointer. if (StructType *STy = GTI.getStructTypeOrNull()) { diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -511,11 +511,9 @@ } bool StructType::isValidElementType(Type *ElemTy) { - if (auto *VTy = dyn_cast(ElemTy)) - return !VTy->isScalable(); return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() && !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() && - !ElemTy->isTokenTy(); + !ElemTy->isTokenTy() && !isa(ElemTy); } bool StructType::isLayoutIdentical(StructType *Other) const { @@ -573,11 +571,9 @@ } bool ArrayType::isValidElementType(Type *ElemTy) { - if (auto *VTy = dyn_cast(ElemTy)) - return !VTy->isScalable(); return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() && !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() && - !ElemTy->isTokenTy(); + !ElemTy->isTokenTy() && !isa(ElemTy); } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -698,8 +698,8 @@ // the runtime size. If the global is a struct or an array containing // scalable vectors, that will be caught by the isValidElementType methods // in StructType or ArrayType instead. - if (auto *VTy = dyn_cast(GV.getValueType())) - Assert(!VTy->isScalable(), "Globals cannot contain scalable vectors", &GV); + Assert(!isa(GV.getValueType()), + "Globals cannot contain scalable vectors", &GV); if (!GV.hasInitializer()) { visitGlobalValue(GV); diff --git a/llvm/unittests/IR/VectorTypesTest.cpp b/llvm/unittests/IR/VectorTypesTest.cpp --- a/llvm/unittests/IR/VectorTypesTest.cpp +++ b/llvm/unittests/IR/VectorTypesTest.cpp @@ -23,63 +23,63 @@ Type *Float64Ty = Type::getDoubleTy(Ctx); VectorType *V8Int32Ty = VectorType::get(Int32Ty, 8); - ASSERT_FALSE(V8Int32Ty->isScalable()); + ASSERT_TRUE(isa(V8Int32Ty)); EXPECT_EQ(V8Int32Ty->getNumElements(), 8U); EXPECT_EQ(V8Int32Ty->getElementType()->getScalarSizeInBits(), 32U); VectorType *V8Int16Ty = VectorType::get(Int16Ty, {8, false}); - ASSERT_FALSE(V8Int16Ty->isScalable()); + ASSERT_TRUE(isa(V8Int16Ty)); EXPECT_EQ(V8Int16Ty->getNumElements(), 8U); EXPECT_EQ(V8Int16Ty->getElementType()->getScalarSizeInBits(), 16U); ElementCount EltCnt(4, false); VectorType *V4Int64Ty = VectorType::get(Int64Ty, EltCnt); - ASSERT_FALSE(V4Int64Ty->isScalable()); + ASSERT_TRUE(isa(V4Int64Ty)); EXPECT_EQ(V4Int64Ty->getNumElements(), 4U); EXPECT_EQ(V4Int64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *V2Int64Ty = VectorType::get(Int64Ty, EltCnt/2); - ASSERT_FALSE(V2Int64Ty->isScalable()); + ASSERT_TRUE(isa(V2Int64Ty)); EXPECT_EQ(V2Int64Ty->getNumElements(), 2U); EXPECT_EQ(V2Int64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *V8Int64Ty = VectorType::get(Int64Ty, EltCnt*2); - ASSERT_FALSE(V8Int64Ty->isScalable()); + ASSERT_TRUE(isa(V8Int64Ty)); EXPECT_EQ(V8Int64Ty->getNumElements(), 8U); EXPECT_EQ(V8Int64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *V4Float64Ty = VectorType::get(Float64Ty, EltCnt); - ASSERT_FALSE(V4Float64Ty->isScalable()); + ASSERT_TRUE(isa(V4Float64Ty)); EXPECT_EQ(V4Float64Ty->getNumElements(), 4U); EXPECT_EQ(V4Float64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *ExtTy = VectorType::getExtendedElementVectorType(V8Int16Ty); EXPECT_EQ(ExtTy, V8Int32Ty); - ASSERT_FALSE(ExtTy->isScalable()); + ASSERT_TRUE(isa(ExtTy)); EXPECT_EQ(ExtTy->getNumElements(), 8U); EXPECT_EQ(ExtTy->getElementType()->getScalarSizeInBits(), 32U); VectorType *TruncTy = VectorType::getTruncatedElementVectorType(V8Int32Ty); EXPECT_EQ(TruncTy, V8Int16Ty); - ASSERT_FALSE(TruncTy->isScalable()); + ASSERT_TRUE(isa(TruncTy)); EXPECT_EQ(TruncTy->getNumElements(), 8U); EXPECT_EQ(TruncTy->getElementType()->getScalarSizeInBits(), 16U); VectorType *HalvedTy = VectorType::getHalfElementsVectorType(V4Int64Ty); EXPECT_EQ(HalvedTy, V2Int64Ty); - ASSERT_FALSE(HalvedTy->isScalable()); + ASSERT_TRUE(isa(HalvedTy)); EXPECT_EQ(HalvedTy->getNumElements(), 2U); EXPECT_EQ(HalvedTy->getElementType()->getScalarSizeInBits(), 64U); VectorType *DoubledTy = VectorType::getDoubleElementsVectorType(V4Int64Ty); EXPECT_EQ(DoubledTy, V8Int64Ty); - ASSERT_FALSE(DoubledTy->isScalable()); + ASSERT_TRUE(isa(DoubledTy)); EXPECT_EQ(DoubledTy->getNumElements(), 8U); EXPECT_EQ(DoubledTy->getElementType()->getScalarSizeInBits(), 64U); VectorType *ConvTy = VectorType::getInteger(V4Float64Ty); EXPECT_EQ(ConvTy, V4Int64Ty); - ASSERT_FALSE(ConvTy->isScalable()); + ASSERT_TRUE(isa(ConvTy)); EXPECT_EQ(ConvTy->getNumElements(), 4U); EXPECT_EQ(ConvTy->getElementType()->getScalarSizeInBits(), 64U); @@ -97,63 +97,63 @@ Type *Float64Ty = Type::getDoubleTy(Ctx); VectorType *ScV8Int32Ty = VectorType::get(Int32Ty, 8, true); - ASSERT_TRUE(ScV8Int32Ty->isScalable()); + ASSERT_TRUE(isa(ScV8Int32Ty)); EXPECT_EQ(ScV8Int32Ty->getNumElements(), 8U); EXPECT_EQ(ScV8Int32Ty->getElementType()->getScalarSizeInBits(), 32U); VectorType *ScV8Int16Ty = VectorType::get(Int16Ty, {8, true}); - ASSERT_TRUE(ScV8Int16Ty->isScalable()); + ASSERT_TRUE(isa(ScV8Int16Ty)); EXPECT_EQ(ScV8Int16Ty->getNumElements(), 8U); EXPECT_EQ(ScV8Int16Ty->getElementType()->getScalarSizeInBits(), 16U); ElementCount EltCnt(4, true); VectorType *ScV4Int64Ty = VectorType::get(Int64Ty, EltCnt); - ASSERT_TRUE(ScV4Int64Ty->isScalable()); + ASSERT_TRUE(isa(ScV4Int64Ty)); EXPECT_EQ(ScV4Int64Ty->getNumElements(), 4U); EXPECT_EQ(ScV4Int64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *ScV2Int64Ty = VectorType::get(Int64Ty, EltCnt/2); - ASSERT_TRUE(ScV2Int64Ty->isScalable()); + ASSERT_TRUE(isa(ScV2Int64Ty)); EXPECT_EQ(ScV2Int64Ty->getNumElements(), 2U); EXPECT_EQ(ScV2Int64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *ScV8Int64Ty = VectorType::get(Int64Ty, EltCnt*2); - ASSERT_TRUE(ScV8Int64Ty->isScalable()); + ASSERT_TRUE(isa(ScV8Int64Ty)); EXPECT_EQ(ScV8Int64Ty->getNumElements(), 8U); EXPECT_EQ(ScV8Int64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *ScV4Float64Ty = VectorType::get(Float64Ty, EltCnt); - ASSERT_TRUE(ScV4Float64Ty->isScalable()); + ASSERT_TRUE(isa(ScV4Float64Ty)); EXPECT_EQ(ScV4Float64Ty->getNumElements(), 4U); EXPECT_EQ(ScV4Float64Ty->getElementType()->getScalarSizeInBits(), 64U); VectorType *ExtTy = VectorType::getExtendedElementVectorType(ScV8Int16Ty); EXPECT_EQ(ExtTy, ScV8Int32Ty); - ASSERT_TRUE(ExtTy->isScalable()); + ASSERT_TRUE(isa(ExtTy)); EXPECT_EQ(ExtTy->getNumElements(), 8U); EXPECT_EQ(ExtTy->getElementType()->getScalarSizeInBits(), 32U); VectorType *TruncTy = VectorType::getTruncatedElementVectorType(ScV8Int32Ty); EXPECT_EQ(TruncTy, ScV8Int16Ty); - ASSERT_TRUE(TruncTy->isScalable()); + ASSERT_TRUE(isa(TruncTy)); EXPECT_EQ(TruncTy->getNumElements(), 8U); EXPECT_EQ(TruncTy->getElementType()->getScalarSizeInBits(), 16U); VectorType *HalvedTy = VectorType::getHalfElementsVectorType(ScV4Int64Ty); EXPECT_EQ(HalvedTy, ScV2Int64Ty); - ASSERT_TRUE(HalvedTy->isScalable()); + ASSERT_TRUE(isa(HalvedTy)); EXPECT_EQ(HalvedTy->getNumElements(), 2U); EXPECT_EQ(HalvedTy->getElementType()->getScalarSizeInBits(), 64U); VectorType *DoubledTy = VectorType::getDoubleElementsVectorType(ScV4Int64Ty); EXPECT_EQ(DoubledTy, ScV8Int64Ty); - ASSERT_TRUE(DoubledTy->isScalable()); + ASSERT_TRUE(isa(DoubledTy)); EXPECT_EQ(DoubledTy->getNumElements(), 8U); EXPECT_EQ(DoubledTy->getElementType()->getScalarSizeInBits(), 64U); VectorType *ConvTy = VectorType::getInteger(ScV4Float64Ty); EXPECT_EQ(ConvTy, ScV4Int64Ty); - ASSERT_TRUE(ConvTy->isScalable()); + ASSERT_TRUE(isa(ConvTy)); EXPECT_EQ(ConvTy->getNumElements(), 4U); EXPECT_EQ(ConvTy->getElementType()->getScalarSizeInBits(), 64U);