diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h --- a/llvm/include/llvm/CodeGen/ValueTypes.h +++ b/llvm/include/llvm/CodeGen/ValueTypes.h @@ -18,6 +18,7 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/MachineValueType.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -209,7 +210,8 @@ /// Return true if the bit size is a multiple of 8. bool isByteSized() const { - return (getSizeInBits() & 7) == 0; + ScalableSize Bits = getScalableSizeInBits(); + return (Bits.MinSize & 7) == 0; } /// Return true if the size is a power-of-two number of bytes. @@ -221,31 +223,31 @@ /// Return true if this has the same number of bits as VT. bool bitsEq(EVT VT) const { if (EVT::operator==(VT)) return true; - return getSizeInBits() == VT.getSizeInBits(); + return getScalableSizeInBits() == VT.getScalableSizeInBits(); } /// Return true if this has more bits than VT. bool bitsGT(EVT VT) const { if (EVT::operator==(VT)) return false; - return getSizeInBits() > VT.getSizeInBits(); + return getScalableSizeInBits() > VT.getScalableSizeInBits(); } /// Return true if this has no less bits than VT. bool bitsGE(EVT VT) const { if (EVT::operator==(VT)) return true; - return getSizeInBits() >= VT.getSizeInBits(); + return getScalableSizeInBits() >= VT.getScalableSizeInBits(); } /// Return true if this has less bits than VT. bool bitsLT(EVT VT) const { if (EVT::operator==(VT)) return false; - return getSizeInBits() < VT.getSizeInBits(); + return getScalableSizeInBits() < VT.getScalableSizeInBits(); } /// Return true if this has no more bits than VT. bool bitsLE(EVT VT) const { if (EVT::operator==(VT)) return true; - return getSizeInBits() <= VT.getSizeInBits(); + return getScalableSizeInBits() <= VT.getScalableSizeInBits(); } /// Return the SimpleValueType held in the specified simple EVT. @@ -294,6 +296,18 @@ return getExtendedSizeInBits(); } + ScalableSize getScalableSizeInBits() const { + if (isSimple()) + return V.getScalableSizeInBits(); + return getScalableExtendedSizeInBits(); + } + + unsigned getMinSizeInBits() const { + if (isSimple()) + return V.getMinSizeInBits(); + return getMinExtendedSizeInBits(); + } + unsigned getScalarSizeInBits() const { return getScalarType().getSizeInBits(); } @@ -304,12 +318,29 @@ return (getSizeInBits() + 7) / 8; } + ScalableSize getScalableStoreSize() const { + ScalableSize SizeInBits = getScalableSizeInBits(); + return { (SizeInBits.MinSize + 7) / 8, SizeInBits.Scalable }; + } + + unsigned getMinStoreSize() const { + return (getMinSizeInBits() + 7) / 8; + } + /// Return the number of bits overwritten by a store of the specified value /// type. unsigned getStoreSizeInBits() const { return getStoreSize() * 8; } + ScalableSize getScalableStoreSizeInBits() const { + return getScalableStoreSize() * 8; + } + + unsigned getMinStoreSizeInBits() const { + return getMinStoreSize() * 8; + } + /// Rounds the bit-width of the given integer EVT up to the nearest power of /// two (and at least to eight), and returns the integer EVT with that /// number of bits. @@ -429,6 +460,8 @@ EVT getExtendedVectorElementType() const; unsigned getExtendedVectorNumElements() const LLVM_READONLY; unsigned getExtendedSizeInBits() const LLVM_READONLY; + ScalableSize getScalableExtendedSizeInBits() const LLVM_READONLY; + unsigned getMinExtendedSizeInBits() const LLVM_READONLY; }; } // end namespace llvm diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -29,6 +29,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -437,6 +438,9 @@ /// have a size (Type::isSized() must return true). uint64_t getTypeSizeInBits(Type *Ty) const; + ScalableSize getScalableTypeSizeInBits(Type *Ty) const; + uint64_t getMinTypeSizeInBits(Type *Ty) const; + /// Returns the maximum number of bytes that may be overwritten by /// storing the specified type. /// @@ -445,6 +449,16 @@ return (getTypeSizeInBits(Ty) + 7) / 8; } + ScalableSize getScalableTypeStoreSize(Type *Ty) const { + // Is overloading bits/bytes wise? + auto Bits = getScalableTypeSizeInBits(Ty); + return ScalableSize((Bits.MinSize+7)/8, Bits.Scalable); + } + + uint64_t getMinTypeStoreSize(Type *Ty) const { + return (getScalableTypeSizeInBits(Ty).getMinSize()+7)/8; + } + /// Returns the maximum number of bits that may be overwritten by /// storing the specified type; always a multiple of 8. /// @@ -453,12 +467,21 @@ return 8 * getTypeStoreSize(Ty); } + ScalableSize getScalableTypeStoreSizeInBits(Type *Ty) const { + auto Bytes = getScalableTypeStoreSize(Ty); + return {Bytes.MinSize * 8, Bytes.Scalable}; + } + + uint64_t getMinTypeStoreSizeInBits(Type *Ty) const { + return 8 * getMinTypeStoreSize(Ty); + } + /// Returns true if no extra padding bits are needed when storing the /// specified type. /// /// For example, returns false for i19 that has a 24-bit store size. bool typeSizeEqualsStoreSize(Type *Ty) const { - return getTypeSizeInBits(Ty) == getTypeStoreSizeInBits(Ty); + return getScalableTypeSizeInBits(Ty) == getScalableTypeStoreSizeInBits(Ty); } /// Returns the offset in bytes between successive objects of the @@ -471,6 +494,17 @@ return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); } + ScalableSize getScalableTypeAllocSize(Type *Ty) const { + auto Bytes = getScalableTypeStoreSize(Ty); + Bytes.MinSize = alignTo(Bytes.MinSize, getABITypeAlignment(Ty)); + + return Bytes; + } + + uint64_t getMinTypeAllocSize(Type *Ty) const { + return alignTo(getMinTypeStoreSize(Ty), getABITypeAlignment(Ty)); + } + /// Returns the offset in bits between successive objects of the /// specified type, including alignment padding; always a multiple of 8. /// @@ -480,6 +514,15 @@ return 8 * getTypeAllocSize(Ty); } + ScalableSize getScalableTypeAllocSizeInBits(Type *Ty) const { + auto Bytes = getScalableTypeAllocSize(Ty); + return {Bytes.MinSize * 8, Bytes.Scalable}; + } + + uint64_t getMinTypeAllocSizeInBits(Type *Ty) const { + return 8 * getMinTypeAllocSize(Ty); + } + /// Returns the minimum ABI-required alignment for the specified type. unsigned getABITypeAlignment(Type *Ty) const; @@ -631,6 +674,8 @@ return 80; case Type::VectorTyID: { VectorType *VTy = cast(Ty); + assert(!VTy->isScalable() && + "Scalable vector sizes cannot be represented by a scalar"); return VTy->getNumElements() * getTypeSizeInBits(VTy->getElementType()); } default: @@ -638,6 +683,23 @@ } } +inline ScalableSize DataLayout::getScalableTypeSizeInBits(Type *Ty) const { + switch(Ty->getTypeID()) { + default: + return {getTypeSizeInBits(Ty), false}; + case Type::VectorTyID: { + VectorType *VTy = cast(Ty); + auto EltCnt = VTy->getElementCount(); + uint64_t MinBits = EltCnt.Min * getTypeSizeInBits(VTy->getElementType()); + return {MinBits, EltCnt.Scalable}; + } + } +} + +inline uint64_t DataLayout::getMinTypeSizeInBits(Type *Ty) const { + return getScalableTypeSizeInBits(Ty).getMinSize(); +} + } // end namespace llvm #endif // LLVM_IR_DATALAYOUT_H diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -975,7 +975,7 @@ static Type* makeCmpResultType(Type* opnd_type) { if (VectorType* vt = dyn_cast(opnd_type)) { return VectorType::get(Type::getInt1Ty(opnd_type->getContext()), - vt->getNumElements()); + vt->getElementCount()); } return Type::getInt1Ty(opnd_type->getContext()); } diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h --- a/llvm/include/llvm/IR/Type.h +++ b/llvm/include/llvm/IR/Type.h @@ -21,6 +21,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -288,6 +289,19 @@ /// unsigned getPrimitiveSizeInBits() const LLVM_READONLY; + // Returns a ScalableSize for the type in question. This should be used in + // place of getPrimitiveSizeInBits in places where the type may be a + // VectorType with the Scalable flag set. + ScalableSize getScalableSizeInBits() const LLVM_READONLY; + + /// Returns the minimum known size in bits, ignoring whether the type might + /// be a scalable vector. + unsigned getMinSizeInBits() const LLVM_READONLY; + + /// Returns the minimum known size in bits, asserting if called on a scalable + /// vector type. + unsigned getFixedSizeInBits() const LLVM_READONLY; + /// If this is a vector type, return the getPrimitiveSizeInBits value for the /// element type. Otherwise return the getPrimitiveSizeInBits value for this /// type. diff --git a/llvm/include/llvm/Support/MachineValueType.h b/llvm/include/llvm/Support/MachineValueType.h --- a/llvm/include/llvm/Support/MachineValueType.h +++ b/llvm/include/llvm/Support/MachineValueType.h @@ -17,6 +17,7 @@ #include "llvm/ADT/iterator_range.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ScalableSize.h" #include namespace llvm { @@ -668,7 +669,60 @@ return { getVectorNumElements(), isScalableVector() }; } + ScalableSize getScalableSizeInBits() const { + switch (SimpleTy) { + default: return { getSizeInBits(), false }; + case nxv1i1: return { 1U, true }; + case nxv2i1: return { 2U, true }; + case nxv4i1: return { 4U, true }; + case nxv1i8: + case nxv8i1: return { 8U, true }; + case nxv16i1: + case nxv2i8: + case nxv1i16: return { 16U, true }; + case nxv32i1: + case nxv4i8: + case nxv2i16: + case nxv1i32: + case nxv2f16: + case nxv1f32: return { 32U, true }; + case nxv8i8: + case nxv4i16: + case nxv2i32: + case nxv1i64: + case nxv4f16: + case nxv2f32: + case nxv1f64: return { 64U, true }; + case nxv16i8: + case nxv8i16: + case nxv4i32: + case nxv2i64: + case nxv8f16: + case nxv4f32: + case nxv2f64: return { 128U, true }; + case nxv32i8: + case nxv16i16: + case nxv8i32: + case nxv4i64: + case nxv8f32: + case nxv4f64: return { 256U, true }; + case nxv32i16: + case nxv16i32: + case nxv8i64: + case nxv16f32: + case nxv8f64: return { 512U, true }; + case nxv32i32: + case nxv16i64: return { 1024U, true }; + case nxv32i64: return { 2048U, true }; + } + } + + unsigned getMinSizeInBits() const { + return getScalableSizeInBits().getMinSize(); + } + unsigned getSizeInBits() const { + assert(!isScalableVector() && "getSizeInBits called on scalable vector"); switch (SimpleTy) { default: llvm_unreachable("getSizeInBits called on extended MVT."); @@ -688,25 +742,17 @@ case Metadata: llvm_unreachable("Value type is metadata."); case i1: - case v1i1: - case nxv1i1: return 1; - case v2i1: - case nxv2i1: return 2; - case v4i1: - case nxv4i1: return 4; + case v1i1: return 1; + case v2i1: return 2; + case v4i1: return 4; case i8 : case v1i8: - case v8i1: - case nxv1i8: - case nxv8i1: return 8; + case v8i1: return 8; case i16 : case f16: case v16i1: case v2i8: - case v1i16: - case nxv16i1: - case nxv2i8: - case nxv1i16: return 16; + case v1i16: return 16; case f32 : case i32 : case v32i1: @@ -714,13 +760,7 @@ case v2i16: case v2f16: case v1f32: - case v1i32: - case nxv32i1: - case nxv4i8: - case nxv2i16: - case nxv1i32: - case nxv2f16: - case nxv1f32: return 32; + case v1i32: return 32; case x86mmx: case f64 : case i64 : @@ -731,14 +771,7 @@ case v1i64: case v4f16: case v2f32: - case v1f64: - case nxv8i8: - case nxv4i16: - case nxv2i32: - case nxv1i64: - case nxv4f16: - case nxv2f32: - case nxv1f64: return 64; + case v1f64: return 64; case f80 : return 80; case v3i32: case v3f32: return 96; @@ -753,14 +786,7 @@ case v1i128: case v8f16: case v4f32: - case v2f64: - case nxv16i8: - case nxv8i16: - case nxv4i32: - case nxv2i64: - case nxv8f16: - case nxv4f32: - case nxv2f64: return 128; + case v2f64: return 128; case v5i32: case v5f32: return 160; case v32i8: @@ -768,39 +794,25 @@ case v8i32: case v4i64: case v8f32: - case v4f64: - case nxv32i8: - case nxv16i16: - case nxv8i32: - case nxv4i64: - case nxv8f32: - case nxv4f64: return 256; + case v4f64: return 256; case v512i1: case v64i8: case v32i16: case v16i32: case v8i64: case v16f32: - case v8f64: - case nxv32i16: - case nxv16i32: - case nxv8i64: - case nxv16f32: - case nxv8f64: return 512; + case v8f64: return 512; case v1024i1: case v128i8: case v64i16: case v32i32: case v16i64: - case v32f32: - case nxv32i32: - case nxv16i64: return 1024; + case v32f32: return 1024; case v256i8: case v128i16: case v64i32: case v32i64: - case v64f32: - case nxv32i64: return 2048; + case v64f32: return 2048; case v128i32: case v128f32: return 4096; case v256i32: @@ -825,30 +837,48 @@ return (getSizeInBits() + 7) / 8; } + ScalableSize getScalableStoreSize() const { + ScalableSize SizeInBits = getScalableSizeInBits(); + return { (SizeInBits.MinSize + 7) / 8, SizeInBits.Scalable }; + } + + unsigned getMinStoreSize() const { + return getScalableStoreSize().MinSize; + } + /// Return the number of bits overwritten by a store of the specified value /// type. unsigned getStoreSizeInBits() const { return getStoreSize() * 8; } + ScalableSize getScalableStoreSizeInBits() const { + ScalableSize SizeInBytes = getScalableStoreSize(); + return { SizeInBytes.MinSize * 8, SizeInBytes.Scalable }; + } + + unsigned getMinStoreSizeInBits() const { + return getScalableStoreSizeInBits().MinSize; + } + /// Return true if this has more bits than VT. bool bitsGT(MVT VT) const { - return getSizeInBits() > VT.getSizeInBits(); + return getScalableSizeInBits() > VT.getScalableSizeInBits(); } /// Return true if this has no less bits than VT. bool bitsGE(MVT VT) const { - return getSizeInBits() >= VT.getSizeInBits(); + return getScalableSizeInBits() >= VT.getScalableSizeInBits(); } /// Return true if this has less bits than VT. bool bitsLT(MVT VT) const { - return getSizeInBits() < VT.getSizeInBits(); + return getScalableSizeInBits() < VT.getScalableSizeInBits(); } /// Return true if this has no more bits than VT. bool bitsLE(MVT VT) const { - return getSizeInBits() <= VT.getSizeInBits(); + return getScalableSizeInBits() <= VT.getScalableSizeInBits(); } static MVT getFloatingPointVT(unsigned BitWidth) { diff --git a/llvm/include/llvm/Support/ScalableSize.h b/llvm/include/llvm/Support/ScalableSize.h --- a/llvm/include/llvm/Support/ScalableSize.h +++ b/llvm/include/llvm/Support/ScalableSize.h @@ -38,6 +38,79 @@ } }; +struct ScalableSize { + uint64_t MinSize; + bool Scalable; + + constexpr ScalableSize(uint64_t MinSize, bool Scalable) + : MinSize(MinSize), Scalable(Scalable) {} + + ScalableSize() = delete; + + bool operator==(const ScalableSize& RHS) const { + if (Scalable == RHS.Scalable) + return MinSize == RHS.MinSize; + + return false; + } + + bool operator!=(const ScalableSize& RHS) const { + if (Scalable == RHS.Scalable) + return MinSize != RHS.MinSize; + + return true; + } + + bool operator<(const ScalableSize& RHS) const { + if (Scalable == RHS.Scalable) + return MinSize < RHS.MinSize; + + llvm_unreachable("Size comparison of scalable and fixed types"); + } + + bool operator<=(const ScalableSize& RHS) const { + if (Scalable == RHS.Scalable) + return MinSize <= RHS.MinSize; + + llvm_unreachable("Size comparison of scalable and fixed types"); + } + + bool operator>(const ScalableSize& RHS) const { + if (Scalable == RHS.Scalable) + return MinSize > RHS.MinSize; + + llvm_unreachable("Size comparison of scalable and fixed types"); + } + + bool operator>=(const ScalableSize& RHS) const { + if (Scalable == RHS.Scalable) + return MinSize >= RHS.MinSize; + + llvm_unreachable("Size comparison of scalable and fixed types"); + } + + ScalableSize operator*(unsigned RHS) const { + return { MinSize * RHS, Scalable }; + } + + ScalableSize operator/(unsigned RHS) const { + return { MinSize / RHS, Scalable }; + } + + uint64_t getFixedSize() const { + assert(!Scalable && "Request for a fixed size on a scalable object"); + return MinSize; + } + + uint64_t getMinSize() const { + return MinSize; + } + + bool isScalable() const { + return Scalable; + } +}; + } // end namespace llvm #endif // LLVM_SUPPORT_SCALABLESIZE_H diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -23,6 +23,7 @@ #include "llvm/IR/DataLayout.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Support/ScalableSize.h" using namespace llvm; #define DEBUG_TYPE "legalize-types" @@ -4597,7 +4598,8 @@ unsigned Width, EVT WidenVT, unsigned Align = 0, unsigned WidenEx = 0) { EVT WidenEltVT = WidenVT.getVectorElementType(); - unsigned WidenWidth = WidenVT.getSizeInBits(); + const bool Scalable = WidenVT.isScalableVector(); + unsigned WidenWidth = WidenVT.getMinSizeInBits(); unsigned WidenEltWidth = WidenEltVT.getSizeInBits(); unsigned AlignInBits = Align*8; @@ -4607,24 +4609,28 @@ return RetVT; // See if there is larger legal integer than the element type to load/store. + // Don't bother looking for an integer type if the vector is scalable, skip + // to vector types. unsigned VT; - for (VT = (unsigned)MVT::LAST_INTEGER_VALUETYPE; - VT >= (unsigned)MVT::FIRST_INTEGER_VALUETYPE; --VT) { - EVT MemVT((MVT::SimpleValueType) VT); - unsigned MemVTWidth = MemVT.getSizeInBits(); - if (MemVT.getSizeInBits() <= WidenEltWidth) - break; - auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); - if ((Action == TargetLowering::TypeLegal || - Action == TargetLowering::TypePromoteInteger) && - (WidenWidth % MemVTWidth) == 0 && - isPowerOf2_32(WidenWidth / MemVTWidth) && - (MemVTWidth <= Width || - (Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) { - if (MemVTWidth == WidenWidth) - return MemVT; - RetVT = MemVT; - break; + if (!Scalable) { + for (VT = (unsigned)MVT::LAST_INTEGER_VALUETYPE; + VT >= (unsigned)MVT::FIRST_INTEGER_VALUETYPE; --VT) { + EVT MemVT((MVT::SimpleValueType) VT); + unsigned MemVTWidth = MemVT.getSizeInBits(); + if (MemVT.getSizeInBits() <= WidenEltWidth) + break; + auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); + if ((Action == TargetLowering::TypeLegal || + Action == TargetLowering::TypePromoteInteger) && + (WidenWidth % MemVTWidth) == 0 && + isPowerOf2_32(WidenWidth / MemVTWidth) && + (MemVTWidth <= Width || + (Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) { + if (MemVTWidth == WidenWidth) + return MemVT; + RetVT = MemVT; + break; + } } } @@ -4633,7 +4639,10 @@ for (VT = (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT >= (unsigned)MVT::FIRST_VECTOR_VALUETYPE; --VT) { EVT MemVT = (MVT::SimpleValueType) VT; - unsigned MemVTWidth = MemVT.getSizeInBits(); + // Skip vector MVTs which don't match the scalable property of WidenVT. + if (Scalable != MemVT.isScalableVector()) + continue; + unsigned MemVTWidth = MemVT.getMinSizeInBits(); auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); if ((Action == TargetLowering::TypeLegal || Action == TargetLowering::TypePromoteInteger) && diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp --- a/llvm/lib/CodeGen/ValueTypes.cpp +++ b/llvm/lib/CodeGen/ValueTypes.cpp @@ -11,6 +11,7 @@ #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Type.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ScalableSize.h" using namespace llvm; EVT EVT::changeExtendedTypeToInteger() const { @@ -110,6 +111,18 @@ llvm_unreachable("Unrecognized extended type!"); } +ScalableSize EVT::getScalableExtendedSizeInBits() const { + assert(isExtended() && "Type is not extended!"); + if (VectorType *VTy = dyn_cast(LLVMTy)) + return VTy->getScalableSizeInBits(); + return { getExtendedSizeInBits(), false }; +} + +unsigned EVT::getMinExtendedSizeInBits() const { + assert(isExtended() && "Type is not extended!"); + return getScalableExtendedSizeInBits().getMinSize(); +} + /// getEVTString - This function returns value type as a string, e.g. "i32". std::string EVT::getEVTString() const { switch (V.SimpleTy) { diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -29,6 +29,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -740,7 +741,8 @@ llvm_unreachable("Bad type for getAlignment!!!"); } - return getAlignmentInfo(AlignType, getTypeSizeInBits(Ty), abi_or_pref, Ty); + // We only care about the minimum size for alignment + return getAlignmentInfo(AlignType, getMinTypeSizeInBits(Ty), abi_or_pref, Ty); } unsigned DataLayout::getABITypeAlignment(Type *Ty) const { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -38,6 +38,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -1778,7 +1779,7 @@ const Twine &Name, Instruction *InsertBefore) : Instruction(VectorType::get(cast(V1->getType())->getElementType(), - cast(Mask->getType())->getNumElements()), + cast(Mask->getType())->getElementCount()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), @@ -1795,7 +1796,7 @@ const Twine &Name, BasicBlock *InsertAtEnd) : Instruction(VectorType::get(cast(V1->getType())->getElementType(), - cast(Mask->getType())->getNumElements()), + cast(Mask->getType())->getElementCount()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), @@ -2968,8 +2969,8 @@ } // Get the bit sizes, we'll need these - unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr - unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr + auto SrcBits = SrcTy->getScalableSizeInBits(); // 0 for ptr + auto DestBits = DestTy->getScalableSizeInBits(); // 0 for ptr // Run through the possibilities ... if (DestTy->isIntegerTy()) { // Casting to integral @@ -3030,12 +3031,12 @@ } } - unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr - unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr + auto SrcBits = SrcTy->getScalableSizeInBits(); // 0 for ptr + auto DestBits = DestTy->getScalableSizeInBits(); // 0 for ptr // Could still have vectors of pointers if the number of elements doesn't // match - if (SrcBits == 0 || DestBits == 0) + if (SrcBits.MinSize == 0 || DestBits.MinSize == 0) return false; if (SrcBits != DestBits) @@ -3245,7 +3246,7 @@ // For non-pointer cases, the cast is okay if the source and destination bit // widths are identical. if (!SrcPtrTy) - return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); + return SrcTy->getScalableSizeInBits() == DstTy->getScalableSizeInBits(); // If both are pointers then the address spaces must match. if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -26,6 +26,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Support/ScalableSize.h" #include #include @@ -121,7 +122,11 @@ case Type::PPC_FP128TyID: return 128; case Type::X86_MMXTyID: return 64; case Type::IntegerTyID: return cast(this)->getBitWidth(); - case Type::VectorTyID: return cast(this)->getBitWidth(); + case Type::VectorTyID: { + const VectorType *VTy = cast(this); + assert(!VTy->isScalable() && "Scalable vectors are not a primitive type"); + return VTy->getBitWidth(); + } default: return 0; } } @@ -130,6 +135,23 @@ return getScalarType()->getPrimitiveSizeInBits(); } +ScalableSize Type::getScalableSizeInBits() const { + if (auto *VTy = dyn_cast(this)) + return {VTy->getBitWidth(), VTy->isScalable()}; + + return {getPrimitiveSizeInBits(), false}; +} + +unsigned Type::getMinSizeInBits() const { + return getScalableSizeInBits().MinSize; +} + +unsigned Type::getFixedSizeInBits() const { + auto Size = getScalableSizeInBits(); + assert(!Size.Scalable && "Request for a fixed size on a scalable vector"); + return Size.MinSize; +} + int Type::getFPMantissaWidth() const { if (auto *VTy = dyn_cast(this)) return VTy->getElementType()->getFPMantissaWidth(); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1435,6 +1435,10 @@ }; for (MVT VT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; + for (unsigned VectExpOp : VectExpOps) setOperationAction(VectExpOp, VT, Expand); @@ -1848,7 +1852,7 @@ TargetLoweringBase::LegalizeTypeAction HexagonTargetLowering::getPreferredVectorAction(MVT VT) const { - if (VT.getVectorNumElements() == 1) + if (VT.getVectorNumElements() == 1 || VT.isScalableVector()) return TargetLoweringBase::TypeScalarizeVector; // Always widen vectors of i1. diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h @@ -228,7 +228,7 @@ } bool isHVXVectorType(MVT VecTy, bool IncludeBool = false) const { - if (!VecTy.isVector() || !useHVXOps()) + if (!VecTy.isVector() || !useHVXOps() || VecTy.isScalableVector()) return false; MVT ElemTy = VecTy.getVectorElementType(); if (!IncludeBool && ElemTy == MVT::i1) diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -45,6 +45,8 @@ bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const { assert(VecTy->isVectorTy()); + if (cast(VecTy)->isScalable()) + return false; // Avoid types like <2 x i32*>. if (!cast(VecTy)->getElementType()->isIntegerTy()) return false; diff --git a/llvm/test/Other/scalable-vectors-core-ir.ll b/llvm/test/Other/scalable-vectors-core-ir.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Other/scalable-vectors-core-ir.ll @@ -0,0 +1,393 @@ +; RUN: opt -S -verify < %s | FileCheck %s +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-gnu" + +;; Check supported instructions are accepted without dropping 'vscale'. +;; Same order as the LangRef + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Unary Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + +define @fneg( %val) { +; CHECK-LABEL: @fneg +; CHECK: %r = fneg %val +; CHECK-NEXT: ret %r + %r = fneg %val + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Binary Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @add( %a, %b) { +; CHECK-LABEL: @add +; CHECK: %r = add %a, %b +; CHECK-NEXT: ret %r + %r = add %a, %b + ret %r +} + +define @fadd( %a, %b) { +; CHECK-LABEL: @fadd +; CHECK: %r = fadd %a, %b +; CHECK-NEXT: ret %r + %r = fadd %a, %b + ret %r +} + +define @sub( %a, %b) { +; CHECK-LABEL: @sub +; CHECK: %r = sub %a, %b +; CHECK-NEXT: ret %r + %r = sub %a, %b + ret %r +} + +define @fsub( %a, %b) { +; CHECK-LABEL: @fsub +; CHECK: %r = fsub %a, %b +; CHECK-NEXT: ret %r + %r = fsub %a, %b + ret %r +} + +define @mul( %a, %b) { +; CHECK-LABEL: @mul +; CHECK: %r = mul %a, %b +; CHECK-NEXT: ret %r + %r = mul %a, %b + ret %r +} + +define @fmul( %a, %b) { +; CHECK-LABEL: @fmul +; CHECK: %r = fmul %a, %b +; CHECK-NEXT: ret %r + %r = fmul %a, %b + ret %r +} + +define @udiv( %a, %b) { +; CHECK-LABEL: @udiv +; CHECK: %r = udiv %a, %b +; CHECK-NEXT: ret %r + %r = udiv %a, %b + ret %r +} + +define @sdiv( %a, %b) { +; CHECK-LABEL: @sdiv +; CHECK: %r = sdiv %a, %b +; CHECK-NEXT: ret %r + %r = sdiv %a, %b + ret %r +} + +define @fdiv( %a, %b) { +; CHECK-LABEL: @fdiv +; CHECK: %r = fdiv %a, %b +; CHECK-NEXT: ret %r + %r = fdiv %a, %b + ret %r +} + +define @urem( %a, %b) { +; CHECK-LABEL: @urem +; CHECK: %r = urem %a, %b +; CHECK-NEXT: ret %r + %r = urem %a, %b + ret %r +} + +define @srem( %a, %b) { +; CHECK-LABEL: @srem +; CHECK: %r = srem %a, %b +; CHECK-NEXT: ret %r + %r = srem %a, %b + ret %r +} + +define @frem( %a, %b) { +; CHECK-LABEL: @frem +; CHECK: %r = frem %a, %b +; CHECK-NEXT: ret %r + %r = frem %a, %b + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Bitwise Binary Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @shl( %a, %b) { +; CHECK-LABEL: @shl +; CHECK: %r = shl %a, %b +; CHECK-NEXT: ret %r + %r = shl %a, %b + ret %r +} + +define @lshr( %a, %b) { +; CHECK-LABEL: @lshr +; CHECK: %r = lshr %a, %b +; CHECK-NEXT: ret %r + %r = lshr %a, %b + ret %r +} + +define @ashr( %a, %b) { +; CHECK-LABEL: @ashr +; CHECK: %r = ashr %a, %b +; CHECK-NEXT: ret %r + %r = ashr %a, %b + ret %r +} + +define @and( %a, %b) { +; CHECK-LABEL: @and +; CHECK: %r = and %a, %b +; CHECK-NEXT: ret %r + %r = and %a, %b + ret %r +} + +define @or( %a, %b) { +; CHECK-LABEL: @or +; CHECK: %r = or %a, %b +; CHECK-NEXT: ret %r + %r = or %a, %b + ret %r +} + +define @xor( %a, %b) { +; CHECK-LABEL: @xor +; CHECK: %r = xor %a, %b +; CHECK-NEXT: ret %r + %r = xor %a, %b + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Vector Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define i64 @extractelement( %val) { +; CHECK-LABEL: @extractelement +; CHECK: %r = extractelement %val, i32 0 +; CHECK-NEXT: ret i64 %r + %r = extractelement %val, i32 0 + ret i64 %r +} + +define @insertelement( %vec, i8 %ins) { +; CHECK-LABEL: @insertelement +; CHECK: %r = insertelement %vec, i8 %ins, i32 0 +; CHECK-NEXT: ret %r + %r = insertelement %vec, i8 %ins, i32 0 + ret %r +} + +define @shufflevector(half %val) { +; CHECK-LABEL: @shufflevector +; CHECK: %insvec = insertelement undef, half %val, i32 0 +; CHECK-NEXT: %r = shufflevector %insvec, undef, zeroinitializer +; CHECK-NEXT: ret %r + %insvec = insertelement undef, half %val, i32 0 + %r = shufflevector %insvec, undef, zeroinitializer + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Memory Access and Addressing Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define void @alloca() { +; CHECK-LABEL: @alloca +; CHECK: %vec = alloca +; CHECK-NEXT: ret void + %vec = alloca + ret void +} + +define @load(* %ptr) { +; CHECK-LABEL: @load +; CHECK: %r = load , * %ptr +; CHECK-NEXT: ret %r + %r = load , * %ptr + ret %r +} + +define void @store( %data, * %ptr) { +; CHECK-LABEL: @store +; CHECK: store %data, * %ptr +; CHECK-NEXT: ret void + store %data, * %ptr + ret void +} + +define * @getelementptr(* %base) { +; CHECK-LABEL: @getelementptr +; CHECK: %r = getelementptr , * %base, i64 0 +; CHECK-NEXT: ret * %r + %r = getelementptr , * %base, i64 0 + ret * %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Conversion Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @truncto( %val) { +; CHECK-LABEL: @truncto +; CHECK: %r = trunc %val to +; CHECK-NEXT: ret %r + %r = trunc %val to + ret %r +} + +define @zextto( %val) { +; CHECK-LABEL: @zextto +; CHECK: %r = zext %val to +; CHECK-NEXT: ret %r + %r = zext %val to + ret %r +} + +define @sextto( %val) { +; CHECK-LABEL: @sextto +; CHECK: %r = sext %val to +; CHECK-NEXT: ret %r + %r = sext %val to + ret %r +} + +define @fptruncto( %val) { +; CHECK-LABEL: @fptruncto +; CHECK: %r = fptrunc %val to +; CHECK-NEXT: ret %r + %r = fptrunc %val to + ret %r +} + +define @fpextto( %val) { +; CHECK-LABEL: @fpextto +; CHECK: %r = fpext %val to +; CHECK-NEXT: ret %r + %r = fpext %val to + ret %r +} + +define @fptouito( %val) { +; CHECK-LABEL: @fptoui +; CHECK: %r = fptoui %val to +; CHECK-NEXT: ret %r + %r = fptoui %val to + ret %r +} + +define @fptosito( %val) { +; CHECK-LABEL: @fptosi +; CHECK: %r = fptosi %val to +; CHECK-NEXT: ret %r + %r = fptosi %val to + ret %r +} + +define @uitofpto( %val) { +; CHECK-LABEL: @uitofp +; CHECK: %r = uitofp %val to +; CHECK-NEXT: ret %r + %r = uitofp %val to + ret %r +} + +define @sitofpto( %val) { +; CHECK-LABEL: @sitofp +; CHECK: %r = sitofp %val to +; CHECK-NEXT: ret %r + %r = sitofp %val to + ret %r +} + +define @ptrtointto( %val) { +; CHECK-LABEL: @ptrtointto +; CHECK: %r = ptrtoint %val to +; CHECK-NEXT: ret %r + %r = ptrtoint %val to + ret %r +} + +define @inttoptrto( %val) { +; CHECK-LABEL: @inttoptrto +; CHECK: %r = inttoptr %val to +; CHECK-NEXT: ret %r + %r = inttoptr %val to + ret %r +} + +define @bitcastto( %a) { +; CHECK-LABEL: @bitcast +; CHECK: %r = bitcast %a to +; CHECK-NEXT: ret %r + %r = bitcast %a to + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Other Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @icmp( %a, %b) { +; CHECK-LABEL: @icmp +; CHECK: %r = icmp eq %a, %b +; CHECK-NEXT: ret %r + %r = icmp eq %a, %b + ret %r +} + +define @fcmp( %a, %b) { +; CHECK-LABEL: @fcmp +; CHECK: %r = fcmp une %a, %b +; CHECK-NEXT: ret %r + %r = fcmp une %a, %b + ret %r +} + +define @phi( %a, i32 %val) { +; CHECK-LABEL: @phi +; CHECK: %r = phi [ %a, %entry ], [ %added, %iszero ] +; CHECK-NEXT: ret %r +entry: + %cmp = icmp eq i32 %val, 0 + br i1 %cmp, label %iszero, label %end + +iszero: + %ins = insertelement undef, i8 1, i32 0 + %splatone = shufflevector %ins, undef, zeroinitializer + %added = add %a, %splatone + br label %end + +end: + %r = phi [ %a, %entry ], [ %added, %iszero ] + ret %r +} + +define @select( %a, %b, %sval) { +; CHECK-LABEL: @select +; CHECK: %r = select %sval, %a, %b +; CHECK-NEXT: ret %r + %r = select %sval, %a, %b + ret %r +} + +declare @callee() +define @call( %val) { +; CHECK-LABEL: @call +; CHECK: %r = call @callee( %val) +; CHECK-NEXT: ret %r + %r = call @callee( %val) + ret %r +} \ No newline at end of file diff --git a/llvm/unittests/IR/VectorTypesTest.cpp b/llvm/unittests/IR/VectorTypesTest.cpp --- a/llvm/unittests/IR/VectorTypesTest.cpp +++ b/llvm/unittests/IR/VectorTypesTest.cpp @@ -160,5 +160,78 @@ EXPECT_EQ(EltCnt.Min, 8U); ASSERT_TRUE(EltCnt.Scalable); } +TEST(VectorTypesTest, FixedLenComparisons) { + LLVMContext Ctx; + + Type *Int32Ty = Type::getInt32Ty(Ctx); + Type *Int64Ty = Type::getInt64Ty(Ctx); + + VectorType *V2Int32Ty = VectorType::get(Int32Ty, 2); + VectorType *V4Int32Ty = VectorType::get(Int32Ty, 4); + + VectorType *V2Int64Ty = VectorType::get(Int64Ty, 2); + + ScalableSize V2I32Len = V2Int32Ty->getScalableSizeInBits(); + EXPECT_EQ(V2I32Len.MinSize, 64U); + EXPECT_FALSE(V2I32Len.Scalable); + + EXPECT_LT(V2Int32Ty->getScalableSizeInBits(), + V4Int32Ty->getScalableSizeInBits()); + EXPECT_GT(V2Int64Ty->getScalableSizeInBits(), + V2Int32Ty->getScalableSizeInBits()); + EXPECT_EQ(V4Int32Ty->getScalableSizeInBits(), + V2Int64Ty->getScalableSizeInBits()); + EXPECT_NE(V2Int32Ty->getScalableSizeInBits(), + V2Int64Ty->getScalableSizeInBits()); + + // Check that a fixed-only comparison works for fixed size vectors. + EXPECT_EQ(V2Int64Ty->getFixedSizeInBits(), + V4Int32Ty->getFixedSizeInBits()); +} + +TEST(VectorTypesTest, ScalableComparisons) { + LLVMContext Ctx; + + Type *Int32Ty = Type::getInt32Ty(Ctx); + Type *Int64Ty = Type::getInt64Ty(Ctx); + + VectorType *ScV2Int32Ty = VectorType::get(Int32Ty, {2, true}); + VectorType *ScV4Int32Ty = VectorType::get(Int32Ty, {4, true}); + + VectorType *ScV2Int64Ty = VectorType::get(Int64Ty, {2, true}); + + ScalableSize ScV2I32Len = ScV2Int32Ty->getScalableSizeInBits(); + EXPECT_EQ(ScV2I32Len.MinSize, 64U); + EXPECT_TRUE(ScV2I32Len.Scalable); + + EXPECT_LT(ScV2Int32Ty->getScalableSizeInBits(), + ScV4Int32Ty->getScalableSizeInBits()); + EXPECT_GT(ScV2Int64Ty->getScalableSizeInBits(), + ScV2Int32Ty->getScalableSizeInBits()); + EXPECT_EQ(ScV4Int32Ty->getScalableSizeInBits(), + ScV2Int64Ty->getScalableSizeInBits()); + EXPECT_NE(ScV2Int32Ty->getScalableSizeInBits(), + ScV2Int64Ty->getScalableSizeInBits()); +} + +TEST(VectorTypesTest, CrossComparisons) { + LLVMContext Ctx; + + Type *Int32Ty = Type::getInt32Ty(Ctx); + + VectorType *V4Int32Ty = VectorType::get(Int32Ty, {4, false}); + VectorType *ScV4Int32Ty = VectorType::get(Int32Ty, {4, true}); + + // Even though the minimum size is the same, a scalable vector could be + // larger so we don't consider them to be the same size. + EXPECT_NE(V4Int32Ty->getScalableSizeInBits(), + ScV4Int32Ty->getScalableSizeInBits()); + // If we are only checking the minimum, then they are the same size. + EXPECT_EQ(V4Int32Ty->getMinSizeInBits(), + ScV4Int32Ty->getMinSizeInBits()); + + // We can't use ordering comparisons (<,<=,>,>=) between scalable and + // non-scalable vector sizes. +} } // end anonymous namespace diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp --- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp @@ -23,6 +23,7 @@ #include "llvm/ADT/Twine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ScalableSize.h" #include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include @@ -503,9 +504,16 @@ } auto LT = [](MVT A, MVT B) -> bool { + // Always treat non-scalable MVTs as smaller than scalable MVTs for the + // purposes of ordering. + if (A.isScalableVector() && !B.isScalableVector()) + return false; + if (!A.isScalableVector() && B.isScalableVector()) + return true; + return A.getScalarSizeInBits() < B.getScalarSizeInBits() || (A.getScalarSizeInBits() == B.getScalarSizeInBits() && - A.getSizeInBits() < B.getSizeInBits()); + A.getScalableSizeInBits() < B.getScalableSizeInBits()); }; auto LE = [<](MVT A, MVT B) -> bool { // This function is used when removing elements: when a vector is compared @@ -513,8 +521,13 @@ if (A.isVector() != B.isVector()) return false; + // We also don't want to remove elements when they're both vectors with the + // same minimum number of lanes, but one is scalable and the other not. + if (A.isScalableVector() != B.isScalableVector()) + return false; + return LT(A, B) || (A.getScalarSizeInBits() == B.getScalarSizeInBits() && - A.getSizeInBits() == B.getSizeInBits()); + A.getScalableSizeInBits() == B.getScalableSizeInBits()); }; for (unsigned M : Modes) {