diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -30,6 +30,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/Alignment.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -436,30 +437,77 @@ /// /// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must /// have a size (Type::isSized() must return true). + /// + /// An assert will occur if this is called on a scalable vector type. uint64_t getTypeSizeInBits(Type *Ty) const; + /// Returns the minimum number of bits necessary to hold the specified type + /// and a boolean indicating whether the runtime size is exactly that size + /// (if false) or if it's an integer multiple of that minimum (true). + ScalableSize getScalableTypeSizeInBits(Type *Ty) const; + + /// Returns the size of the type in bits. If the type is scalable, this + /// quantity represents the known minimum size. If the type is not scalable, + /// it represents the exact size. + uint64_t getKnownMinTypeSizeInBits(Type *Ty) const; + /// Returns the maximum number of bytes that may be overwritten by /// storing the specified type. /// /// For example, returns 5 for i36 and 10 for x86_fp80. + /// + /// An assert will occur if this is called on a scalable vector type. uint64_t getTypeStoreSize(Type *Ty) const { return (getTypeSizeInBits(Ty) + 7) / 8; } + /// Returns the number of bytes overwritten by a store of the specified type, + /// along with a boolean indicating whether the runtime size written to is + /// exactly that size (if false) or if it's an integer multiple of that + /// size (true). + ScalableSize getScalableTypeStoreSize(Type *Ty) const { + auto Bits = getScalableTypeSizeInBits(Ty); + return ScalableSize((Bits.getKnownMinSize()+7)/8, Bits.isScalable()); + } + + /// Returns the number of bytes overwritten by a store of the specified type. + /// If the type is scalable, this quantity represents the known minimum size. + /// If not scalable, it represents the exact size. + uint64_t getKnownMinTypeStoreSize(Type *Ty) const { + return (getScalableTypeSizeInBits(Ty).getKnownMinSize()+7)/8; + } + /// Returns the maximum number of bits that may be overwritten by /// storing the specified type; always a multiple of 8. /// /// For example, returns 40 for i36 and 80 for x86_fp80. + /// + /// An assert will occur if this is called on a scalable vector type. uint64_t getTypeStoreSizeInBits(Type *Ty) const { return 8 * getTypeStoreSize(Ty); } + /// Returns the number of bits overwritten by a store of the specified type, + /// along with a boolean indicating whether the runtime size written to is + /// exactly that size (if false) or if it's an integer multiple of that + /// size (true). + ScalableSize getScalableTypeStoreSizeInBits(Type *Ty) const { + auto Bytes = getScalableTypeStoreSize(Ty); + return {Bytes.getKnownMinSize() * 8, Bytes.isScalable()}; + } + + /// Returns the number of bits overwritten by a store of the specified type. + /// If the type is scalable, this quantity represents the known minimum + /// size. If not scalable, it represents the exact size. + uint64_t getKnownMinTypeStoreSizeInBits(Type *Ty) const { + return 8 * getKnownMinTypeStoreSize(Ty); + } /// Returns true if no extra padding bits are needed when storing the /// specified type. /// /// For example, returns false for i19 that has a 24-bit store size. bool typeSizeEqualsStoreSize(Type *Ty) const { - return getTypeSizeInBits(Ty) == getTypeStoreSizeInBits(Ty); + return getScalableTypeSizeInBits(Ty) == getScalableTypeStoreSizeInBits(Ty); } /// Returns the offset in bytes between successive objects of the @@ -467,20 +515,60 @@ /// /// This is the amount that alloca reserves for this type. For example, /// returns 12 or 16 for x86_fp80, depending on alignment. + /// + /// An assert will occur if this is called on a scalable vector type. uint64_t getTypeAllocSize(Type *Ty) const { // Round up to the next alignment boundary. return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); } + /// Returns the offset in bytes between successive object of the specified + /// type (including alignment padding), along with a boolean indicating + /// whether the runtime size written to is exactly that size (if false) or if + /// it's an integer multiple of that size (true). + ScalableSize getScalableTypeAllocSize(Type *Ty) const { + auto Bytes = getScalableTypeStoreSize(Ty); + uint64_t MinAlignedSize = alignTo(Bytes.getKnownMinSize(), + getABITypeAlignment(Ty)); + return ScalableSize(MinAlignedSize, Bytes.isScalable()); + } + + /// Returns the offset in bytes between successive objects of the + /// specified type, including alignment padding. + /// If the type is scalable, this quantity represents the known minimum size. + /// If not scalable, it represents the exact size. + uint64_t getKnownMinTypeAllocSize(Type *Ty) const { + return alignTo(getKnownMinTypeStoreSize(Ty), getABITypeAlignment(Ty)); + } + /// Returns the offset in bits between successive objects of the /// specified type, including alignment padding; always a multiple of 8. /// /// This is the amount that alloca reserves for this type. For example, /// returns 96 or 128 for x86_fp80, depending on alignment. + /// + /// An assert will occur if this is called on a scalable vector type. uint64_t getTypeAllocSizeInBits(Type *Ty) const { return 8 * getTypeAllocSize(Ty); } + /// Returns the offset in bits between successive object of the specified + /// type (including alignment padding), along with a boolean indicating + /// whether the runtime size written to is exactly that size (if false) or if + /// it's an integer multiple of that size (true). + ScalableSize getScalableTypeAllocSizeInBits(Type *Ty) const { + auto Bytes = getScalableTypeAllocSize(Ty); + return {Bytes.getKnownMinSize() * 8, Bytes.isScalable()}; + } + + /// Returns the offset in bits between successive objects of the + /// specified type, including alignment padding. + /// If the type is scalable, this quantity represents the known minimum size. + /// If not scalable, it represents the exact size. + uint64_t getKnownMinTypeAllocSizeInBits(Type *Ty) const { + return 8 * getKnownMinTypeAllocSize(Ty); + } + /// Returns the minimum ABI-required alignment for the specified type. unsigned getABITypeAlignment(Type *Ty) const; @@ -628,6 +716,8 @@ return 80; case Type::VectorTyID: { VectorType *VTy = cast(Ty); + assert(!VTy->isScalable() && + "Scalable vector sizes cannot be represented by a scalar"); return VTy->getNumElements() * getTypeSizeInBits(VTy->getElementType()); } default: @@ -635,6 +725,23 @@ } } +inline ScalableSize DataLayout::getScalableTypeSizeInBits(Type *Ty) const { + switch(Ty->getTypeID()) { + default: + return {getTypeSizeInBits(Ty), false}; + case Type::VectorTyID: { + VectorType *VTy = cast(Ty); + auto EltCnt = VTy->getElementCount(); + uint64_t MinBits = EltCnt.Min * getTypeSizeInBits(VTy->getElementType()); + return {MinBits, EltCnt.Scalable}; + } + } +} + +inline uint64_t DataLayout::getKnownMinTypeSizeInBits(Type *Ty) const { + return getScalableTypeSizeInBits(Ty).getKnownMinSize(); +} + } // end namespace llvm #endif // LLVM_IR_DATALAYOUT_H diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -975,7 +975,7 @@ static Type* makeCmpResultType(Type* opnd_type) { if (VectorType* vt = dyn_cast(opnd_type)) { return VectorType::get(Type::getInt1Ty(opnd_type->getContext()), - vt->getNumElements()); + vt->getElementCount()); } return Type::getInt1Ty(opnd_type->getContext()); } diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h --- a/llvm/include/llvm/IR/Type.h +++ b/llvm/include/llvm/IR/Type.h @@ -21,6 +21,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -286,8 +287,22 @@ /// instance of the type is stored to memory. The DataLayout class provides /// additional query functions to provide this information. /// + /// An assert will occur if this is called on a scalable vector type. unsigned getPrimitiveSizeInBits() const LLVM_READONLY; + // Returns a ScalableSize for the type in question. This should be used in + // place of getPrimitiveSizeInBits in places where the type may be a + // VectorType with the Scalable flag set. + ScalableSize getScalableSizeInBits() const LLVM_READONLY; + + /// Returns the minimum known size in bits, ignoring whether the type might + /// be a scalable vector. + unsigned getKnownMinSizeInBits() const LLVM_READONLY; + + /// Returns the minimum known size in bits, asserting if called on a scalable + /// vector type. + unsigned getFixedSizeInBits() const LLVM_READONLY; + /// If this is a vector type, return the getPrimitiveSizeInBits value for the /// element type. Otherwise return the getPrimitiveSizeInBits value for this /// type. diff --git a/llvm/include/llvm/Support/ScalableSize.h b/llvm/include/llvm/Support/ScalableSize.h --- a/llvm/include/llvm/Support/ScalableSize.h +++ b/llvm/include/llvm/Support/ScalableSize.h @@ -15,6 +15,8 @@ #ifndef LLVM_SUPPORT_SCALABLESIZE_H #define LLVM_SUPPORT_SCALABLESIZE_H +#include + namespace llvm { class ElementCount { @@ -38,6 +40,93 @@ } }; +// This class is used to represent the size of types. If the type is of fixed +// size, it will represent the exact size. If the type is a scalable vector, +// it will represent the known minimum size. +class ScalableSize { + uint64_t MinSize; // The known minimum size. + bool Scalable; // If true, then the runtime size is an integer multiple + // of MinSize. + +public: + constexpr ScalableSize(uint64_t MinSize, bool Scalable) + : MinSize(MinSize), Scalable(Scalable) {} + + // Scalable vector types with the same minimum size as a fixed size type are + // not guaranteed to be the same size at runtime, so they are never + // considered to be equal. + friend bool operator==(const ScalableSize &LHS, const ScalableSize &RHS) { + return std::tie(LHS.MinSize, LHS.Scalable) == + std::tie(RHS.MinSize, RHS.Scalable); + } + + friend bool operator!=(const ScalableSize &LHS, const ScalableSize &RHS) { + return !(LHS == RHS); + } + + // For many cases, size ordering between scalable and fixed size types cannot + // be determined at compile time, so such comparisons aren't allowed. + // + // e.g. could be bigger than <4 x i32> with a runtime + // vscale >= 5, equal sized with a vscale of 4, and smaller with + // a vscale <= 3. + // + // If the scalable flags match, just perform the requested comparison + // between the minimum sizes. + friend bool operator<(const ScalableSize &LHS, const ScalableSize &RHS) { + assert(LHS.Scalable == RHS.Scalable && + "Ordering comparison of scalable and fixed types"); + + return LHS.MinSize < RHS.MinSize; + } + + friend bool operator>(const ScalableSize &LHS, const ScalableSize &RHS) { + return RHS < LHS; + } + + friend bool operator<=(const ScalableSize &LHS, const ScalableSize &RHS) { + return !(RHS < LHS); + } + + friend bool operator>=(const ScalableSize &LHS, const ScalableSize& RHS) { + return !(LHS < RHS); + } + + // Convenience operators to obtain relative sizes independently of + // the scalable flag. + ScalableSize operator*(unsigned RHS) const { + return { MinSize * RHS, Scalable }; + } + + friend ScalableSize operator*(const unsigned LHS, const ScalableSize &RHS) { + return { RHS.MinSize * LHS, RHS.Scalable }; + } + + ScalableSize operator/(unsigned RHS) const { + return { MinSize / RHS, Scalable }; + } + + // Return the minimum size with the assumption that the size is exact. + // Use in places where a scalable size doesn't make sense (e.g. non-vector + // types, or vectors in backends which don't support scalable vectors) + uint64_t getFixedSize() const { + assert(!Scalable && "Request for a fixed size on a scalable object"); + return MinSize; + } + + // Return the known minimum size. Use in places where the scalable property + // doesn't matter (e.g. determining alignment) or in conjunction with the + // isScalable method below. + uint64_t getKnownMinSize() const { + return MinSize; + } + + // Return whether or not the size is scalable. + bool isScalable() const { + return Scalable; + } +}; + } // end namespace llvm #endif // LLVM_SUPPORT_SCALABLESIZE_H diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -29,6 +29,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -746,7 +747,10 @@ llvm_unreachable("Bad type for getAlignment!!!"); } - return getAlignmentInfo(AlignType, getTypeSizeInBits(Ty), abi_or_pref, Ty); + // If we're dealing with a scalable vector, we just need the known minimum + // size for determining alignment. If not, we'll get the exact size. + return getAlignmentInfo(AlignType, getKnownMinTypeSizeInBits(Ty), + abi_or_pref, Ty); } unsigned DataLayout::getABITypeAlignment(Type *Ty) const { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -38,6 +38,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ScalableSize.h" #include #include #include @@ -1778,7 +1779,7 @@ const Twine &Name, Instruction *InsertBefore) : Instruction(VectorType::get(cast(V1->getType())->getElementType(), - cast(Mask->getType())->getNumElements()), + cast(Mask->getType())->getElementCount()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), @@ -1795,7 +1796,7 @@ const Twine &Name, BasicBlock *InsertAtEnd) : Instruction(VectorType::get(cast(V1->getType())->getElementType(), - cast(Mask->getType())->getNumElements()), + cast(Mask->getType())->getElementCount()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), @@ -2968,8 +2969,8 @@ } // Get the bit sizes, we'll need these - unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr - unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr + auto SrcBits = SrcTy->getScalableSizeInBits(); // 0 for ptr + auto DestBits = DestTy->getScalableSizeInBits(); // 0 for ptr // Run through the possibilities ... if (DestTy->isIntegerTy()) { // Casting to integral @@ -3016,7 +3017,7 @@ if (VectorType *SrcVecTy = dyn_cast(SrcTy)) { if (VectorType *DestVecTy = dyn_cast(DestTy)) { - if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { + if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) { // An element by element cast. Valid if casting the elements is valid. SrcTy = SrcVecTy->getElementType(); DestTy = DestVecTy->getElementType(); @@ -3030,12 +3031,12 @@ } } - unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr - unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr + auto SrcBits = SrcTy->getScalableSizeInBits(); // 0 for ptr + auto DestBits = DestTy->getScalableSizeInBits(); // 0 for ptr // Could still have vectors of pointers if the number of elements doesn't // match - if (SrcBits == 0 || DestBits == 0) + if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0) return false; if (SrcBits != DestBits) @@ -3245,7 +3246,7 @@ // For non-pointer cases, the cast is okay if the source and destination bit // widths are identical. if (!SrcPtrTy) - return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits(); + return SrcTy->getScalableSizeInBits() == DstTy->getScalableSizeInBits(); // If both are pointers then the address spaces must match. if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -26,6 +26,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Support/ScalableSize.h" #include #include @@ -121,7 +122,11 @@ case Type::PPC_FP128TyID: return 128; case Type::X86_MMXTyID: return 64; case Type::IntegerTyID: return cast(this)->getBitWidth(); - case Type::VectorTyID: return cast(this)->getBitWidth(); + case Type::VectorTyID: { + const VectorType *VTy = cast(this); + assert(!VTy->isScalable() && "Scalable vectors are not a primitive type"); + return VTy->getBitWidth(); + } default: return 0; } } @@ -130,6 +135,21 @@ return getScalarType()->getPrimitiveSizeInBits(); } +ScalableSize Type::getScalableSizeInBits() const { + if (auto *VTy = dyn_cast(this)) + return {VTy->getBitWidth(), VTy->isScalable()}; + + return {getPrimitiveSizeInBits(), false}; +} + +unsigned Type::getKnownMinSizeInBits() const { + return getScalableSizeInBits().getKnownMinSize(); +} + +unsigned Type::getFixedSizeInBits() const { + return getScalableSizeInBits().getFixedSize(); +} + int Type::getFPMantissaWidth() const { if (auto *VTy = dyn_cast(this)) return VTy->getElementType()->getFPMantissaWidth(); diff --git a/llvm/test/Other/scalable-vectors-core-ir.ll b/llvm/test/Other/scalable-vectors-core-ir.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Other/scalable-vectors-core-ir.ll @@ -0,0 +1,393 @@ +; RUN: opt -S -verify < %s | FileCheck %s +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-gnu" + +;; Check supported instructions are accepted without dropping 'vscale'. +;; Same order as the LangRef + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Unary Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + +define @fneg( %val) { +; CHECK-LABEL: @fneg +; CHECK: %r = fneg %val +; CHECK-NEXT: ret %r + %r = fneg %val + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Binary Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @add( %a, %b) { +; CHECK-LABEL: @add +; CHECK: %r = add %a, %b +; CHECK-NEXT: ret %r + %r = add %a, %b + ret %r +} + +define @fadd( %a, %b) { +; CHECK-LABEL: @fadd +; CHECK: %r = fadd %a, %b +; CHECK-NEXT: ret %r + %r = fadd %a, %b + ret %r +} + +define @sub( %a, %b) { +; CHECK-LABEL: @sub +; CHECK: %r = sub %a, %b +; CHECK-NEXT: ret %r + %r = sub %a, %b + ret %r +} + +define @fsub( %a, %b) { +; CHECK-LABEL: @fsub +; CHECK: %r = fsub %a, %b +; CHECK-NEXT: ret %r + %r = fsub %a, %b + ret %r +} + +define @mul( %a, %b) { +; CHECK-LABEL: @mul +; CHECK: %r = mul %a, %b +; CHECK-NEXT: ret %r + %r = mul %a, %b + ret %r +} + +define @fmul( %a, %b) { +; CHECK-LABEL: @fmul +; CHECK: %r = fmul %a, %b +; CHECK-NEXT: ret %r + %r = fmul %a, %b + ret %r +} + +define @udiv( %a, %b) { +; CHECK-LABEL: @udiv +; CHECK: %r = udiv %a, %b +; CHECK-NEXT: ret %r + %r = udiv %a, %b + ret %r +} + +define @sdiv( %a, %b) { +; CHECK-LABEL: @sdiv +; CHECK: %r = sdiv %a, %b +; CHECK-NEXT: ret %r + %r = sdiv %a, %b + ret %r +} + +define @fdiv( %a, %b) { +; CHECK-LABEL: @fdiv +; CHECK: %r = fdiv %a, %b +; CHECK-NEXT: ret %r + %r = fdiv %a, %b + ret %r +} + +define @urem( %a, %b) { +; CHECK-LABEL: @urem +; CHECK: %r = urem %a, %b +; CHECK-NEXT: ret %r + %r = urem %a, %b + ret %r +} + +define @srem( %a, %b) { +; CHECK-LABEL: @srem +; CHECK: %r = srem %a, %b +; CHECK-NEXT: ret %r + %r = srem %a, %b + ret %r +} + +define @frem( %a, %b) { +; CHECK-LABEL: @frem +; CHECK: %r = frem %a, %b +; CHECK-NEXT: ret %r + %r = frem %a, %b + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Bitwise Binary Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @shl( %a, %b) { +; CHECK-LABEL: @shl +; CHECK: %r = shl %a, %b +; CHECK-NEXT: ret %r + %r = shl %a, %b + ret %r +} + +define @lshr( %a, %b) { +; CHECK-LABEL: @lshr +; CHECK: %r = lshr %a, %b +; CHECK-NEXT: ret %r + %r = lshr %a, %b + ret %r +} + +define @ashr( %a, %b) { +; CHECK-LABEL: @ashr +; CHECK: %r = ashr %a, %b +; CHECK-NEXT: ret %r + %r = ashr %a, %b + ret %r +} + +define @and( %a, %b) { +; CHECK-LABEL: @and +; CHECK: %r = and %a, %b +; CHECK-NEXT: ret %r + %r = and %a, %b + ret %r +} + +define @or( %a, %b) { +; CHECK-LABEL: @or +; CHECK: %r = or %a, %b +; CHECK-NEXT: ret %r + %r = or %a, %b + ret %r +} + +define @xor( %a, %b) { +; CHECK-LABEL: @xor +; CHECK: %r = xor %a, %b +; CHECK-NEXT: ret %r + %r = xor %a, %b + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Vector Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define i64 @extractelement( %val) { +; CHECK-LABEL: @extractelement +; CHECK: %r = extractelement %val, i32 0 +; CHECK-NEXT: ret i64 %r + %r = extractelement %val, i32 0 + ret i64 %r +} + +define @insertelement( %vec, i8 %ins) { +; CHECK-LABEL: @insertelement +; CHECK: %r = insertelement %vec, i8 %ins, i32 0 +; CHECK-NEXT: ret %r + %r = insertelement %vec, i8 %ins, i32 0 + ret %r +} + +define @shufflevector(half %val) { +; CHECK-LABEL: @shufflevector +; CHECK: %insvec = insertelement undef, half %val, i32 0 +; CHECK-NEXT: %r = shufflevector %insvec, undef, zeroinitializer +; CHECK-NEXT: ret %r + %insvec = insertelement undef, half %val, i32 0 + %r = shufflevector %insvec, undef, zeroinitializer + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Memory Access and Addressing Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define void @alloca() { +; CHECK-LABEL: @alloca +; CHECK: %vec = alloca +; CHECK-NEXT: ret void + %vec = alloca + ret void +} + +define @load(* %ptr) { +; CHECK-LABEL: @load +; CHECK: %r = load , * %ptr +; CHECK-NEXT: ret %r + %r = load , * %ptr + ret %r +} + +define void @store( %data, * %ptr) { +; CHECK-LABEL: @store +; CHECK: store %data, * %ptr +; CHECK-NEXT: ret void + store %data, * %ptr + ret void +} + +define * @getelementptr(* %base) { +; CHECK-LABEL: @getelementptr +; CHECK: %r = getelementptr , * %base, i64 0 +; CHECK-NEXT: ret * %r + %r = getelementptr , * %base, i64 0 + ret * %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Conversion Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @truncto( %val) { +; CHECK-LABEL: @truncto +; CHECK: %r = trunc %val to +; CHECK-NEXT: ret %r + %r = trunc %val to + ret %r +} + +define @zextto( %val) { +; CHECK-LABEL: @zextto +; CHECK: %r = zext %val to +; CHECK-NEXT: ret %r + %r = zext %val to + ret %r +} + +define @sextto( %val) { +; CHECK-LABEL: @sextto +; CHECK: %r = sext %val to +; CHECK-NEXT: ret %r + %r = sext %val to + ret %r +} + +define @fptruncto( %val) { +; CHECK-LABEL: @fptruncto +; CHECK: %r = fptrunc %val to +; CHECK-NEXT: ret %r + %r = fptrunc %val to + ret %r +} + +define @fpextto( %val) { +; CHECK-LABEL: @fpextto +; CHECK: %r = fpext %val to +; CHECK-NEXT: ret %r + %r = fpext %val to + ret %r +} + +define @fptouito( %val) { +; CHECK-LABEL: @fptoui +; CHECK: %r = fptoui %val to +; CHECK-NEXT: ret %r + %r = fptoui %val to + ret %r +} + +define @fptosito( %val) { +; CHECK-LABEL: @fptosi +; CHECK: %r = fptosi %val to +; CHECK-NEXT: ret %r + %r = fptosi %val to + ret %r +} + +define @uitofpto( %val) { +; CHECK-LABEL: @uitofp +; CHECK: %r = uitofp %val to +; CHECK-NEXT: ret %r + %r = uitofp %val to + ret %r +} + +define @sitofpto( %val) { +; CHECK-LABEL: @sitofp +; CHECK: %r = sitofp %val to +; CHECK-NEXT: ret %r + %r = sitofp %val to + ret %r +} + +define @ptrtointto( %val) { +; CHECK-LABEL: @ptrtointto +; CHECK: %r = ptrtoint %val to +; CHECK-NEXT: ret %r + %r = ptrtoint %val to + ret %r +} + +define @inttoptrto( %val) { +; CHECK-LABEL: @inttoptrto +; CHECK: %r = inttoptr %val to +; CHECK-NEXT: ret %r + %r = inttoptr %val to + ret %r +} + +define @bitcastto( %a) { +; CHECK-LABEL: @bitcast +; CHECK: %r = bitcast %a to +; CHECK-NEXT: ret %r + %r = bitcast %a to + ret %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Other Operations +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define @icmp( %a, %b) { +; CHECK-LABEL: @icmp +; CHECK: %r = icmp eq %a, %b +; CHECK-NEXT: ret %r + %r = icmp eq %a, %b + ret %r +} + +define @fcmp( %a, %b) { +; CHECK-LABEL: @fcmp +; CHECK: %r = fcmp une %a, %b +; CHECK-NEXT: ret %r + %r = fcmp une %a, %b + ret %r +} + +define @phi( %a, i32 %val) { +; CHECK-LABEL: @phi +; CHECK: %r = phi [ %a, %entry ], [ %added, %iszero ] +; CHECK-NEXT: ret %r +entry: + %cmp = icmp eq i32 %val, 0 + br i1 %cmp, label %iszero, label %end + +iszero: + %ins = insertelement undef, i8 1, i32 0 + %splatone = shufflevector %ins, undef, zeroinitializer + %added = add %a, %splatone + br label %end + +end: + %r = phi [ %a, %entry ], [ %added, %iszero ] + ret %r +} + +define @select( %a, %b, %sval) { +; CHECK-LABEL: @select +; CHECK: %r = select %sval, %a, %b +; CHECK-NEXT: ret %r + %r = select %sval, %a, %b + ret %r +} + +declare @callee() +define @call( %val) { +; CHECK-LABEL: @call +; CHECK: %r = call @callee( %val) +; CHECK-NEXT: ret %r + %r = call @callee( %val) + ret %r +} \ No newline at end of file diff --git a/llvm/unittests/IR/VectorTypesTest.cpp b/llvm/unittests/IR/VectorTypesTest.cpp --- a/llvm/unittests/IR/VectorTypesTest.cpp +++ b/llvm/unittests/IR/VectorTypesTest.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/ScalableSize.h" @@ -160,5 +161,117 @@ EXPECT_EQ(EltCnt.Min, 8U); ASSERT_TRUE(EltCnt.Scalable); } +TEST(VectorTypesTest, FixedLenComparisons) { + LLVMContext Ctx; + DataLayout DL(""); + + Type *Int32Ty = Type::getInt32Ty(Ctx); + Type *Int64Ty = Type::getInt64Ty(Ctx); + + VectorType *V2Int32Ty = VectorType::get(Int32Ty, 2); + VectorType *V4Int32Ty = VectorType::get(Int32Ty, 4); + + VectorType *V2Int64Ty = VectorType::get(Int64Ty, 2); + + ScalableSize V2I32Len = V2Int32Ty->getScalableSizeInBits(); + EXPECT_EQ(V2I32Len.getKnownMinSize(), 64U); + EXPECT_FALSE(V2I32Len.isScalable()); + + EXPECT_LT(V2Int32Ty->getScalableSizeInBits(), + V4Int32Ty->getScalableSizeInBits()); + EXPECT_GT(V2Int64Ty->getScalableSizeInBits(), + V2Int32Ty->getScalableSizeInBits()); + EXPECT_EQ(V4Int32Ty->getScalableSizeInBits(), + V2Int64Ty->getScalableSizeInBits()); + EXPECT_NE(V2Int32Ty->getScalableSizeInBits(), + V2Int64Ty->getScalableSizeInBits()); + + // Check that a fixed-only comparison works for fixed size vectors. + EXPECT_EQ(V2Int64Ty->getFixedSizeInBits(), + V4Int32Ty->getFixedSizeInBits()); + + // Check the DataLayout interfaces. + EXPECT_EQ(DL.getScalableTypeSizeInBits(V2Int64Ty), + DL.getScalableTypeSizeInBits(V4Int32Ty)); + EXPECT_EQ(DL.getKnownMinTypeSizeInBits(V2Int32Ty), 64U); + EXPECT_EQ(DL.getTypeSizeInBits(V2Int64Ty), 128U); + EXPECT_EQ(DL.getScalableTypeStoreSize(V2Int64Ty), + DL.getScalableTypeStoreSize(V4Int32Ty)); + EXPECT_NE(DL.getScalableTypeStoreSizeInBits(V2Int32Ty), + DL.getScalableTypeStoreSizeInBits(V2Int64Ty)); + EXPECT_EQ(DL.getKnownMinTypeStoreSizeInBits(V2Int32Ty), 64U); + EXPECT_EQ(DL.getKnownMinTypeStoreSize(V2Int64Ty), 16U); + EXPECT_EQ(DL.getScalableTypeAllocSize(V4Int32Ty), + DL.getScalableTypeAllocSize(V2Int64Ty)); + EXPECT_NE(DL.getScalableTypeAllocSizeInBits(V2Int32Ty), + DL.getScalableTypeAllocSizeInBits(V2Int64Ty)); + EXPECT_EQ(DL.getKnownMinTypeAllocSizeInBits(V4Int32Ty), 128U); + EXPECT_EQ(DL.getKnownMinTypeAllocSize(V2Int32Ty), 8U); + ASSERT_TRUE(DL.typeSizeEqualsStoreSize(V4Int32Ty)); +} + +TEST(VectorTypesTest, ScalableComparisons) { + LLVMContext Ctx; + DataLayout DL(""); + + Type *Int32Ty = Type::getInt32Ty(Ctx); + Type *Int64Ty = Type::getInt64Ty(Ctx); + + VectorType *ScV2Int32Ty = VectorType::get(Int32Ty, {2, true}); + VectorType *ScV4Int32Ty = VectorType::get(Int32Ty, {4, true}); + + VectorType *ScV2Int64Ty = VectorType::get(Int64Ty, {2, true}); + + ScalableSize ScV2I32Len = ScV2Int32Ty->getScalableSizeInBits(); + EXPECT_EQ(ScV2I32Len.getKnownMinSize(), 64U); + EXPECT_TRUE(ScV2I32Len.isScalable()); + + EXPECT_LT(ScV2Int32Ty->getScalableSizeInBits(), + ScV4Int32Ty->getScalableSizeInBits()); + EXPECT_GT(ScV2Int64Ty->getScalableSizeInBits(), + ScV2Int32Ty->getScalableSizeInBits()); + EXPECT_EQ(ScV4Int32Ty->getScalableSizeInBits(), + ScV2Int64Ty->getScalableSizeInBits()); + EXPECT_NE(ScV2Int32Ty->getScalableSizeInBits(), + ScV2Int64Ty->getScalableSizeInBits()); + + // Check the DataLayout interfaces. + EXPECT_EQ(DL.getScalableTypeSizeInBits(ScV2Int64Ty), + DL.getScalableTypeSizeInBits(ScV4Int32Ty)); + EXPECT_EQ(DL.getKnownMinTypeSizeInBits(ScV2Int32Ty), 64U); + EXPECT_EQ(DL.getScalableTypeStoreSize(ScV2Int64Ty), + DL.getScalableTypeStoreSize(ScV4Int32Ty)); + EXPECT_NE(DL.getScalableTypeStoreSizeInBits(ScV2Int32Ty), + DL.getScalableTypeStoreSizeInBits(ScV2Int64Ty)); + EXPECT_EQ(DL.getKnownMinTypeStoreSizeInBits(ScV2Int32Ty), 64U); + EXPECT_EQ(DL.getKnownMinTypeStoreSize(ScV2Int64Ty), 16U); + EXPECT_EQ(DL.getScalableTypeAllocSize(ScV4Int32Ty), + DL.getScalableTypeAllocSize(ScV2Int64Ty)); + EXPECT_NE(DL.getScalableTypeAllocSizeInBits(ScV2Int32Ty), + DL.getScalableTypeAllocSizeInBits(ScV2Int64Ty)); + EXPECT_EQ(DL.getKnownMinTypeAllocSizeInBits(ScV4Int32Ty), 128U); + EXPECT_EQ(DL.getKnownMinTypeAllocSize(ScV2Int32Ty), 8U); + ASSERT_TRUE(DL.typeSizeEqualsStoreSize(ScV4Int32Ty)); +} + +TEST(VectorTypesTest, CrossComparisons) { + LLVMContext Ctx; + + Type *Int32Ty = Type::getInt32Ty(Ctx); + + VectorType *V4Int32Ty = VectorType::get(Int32Ty, {4, false}); + VectorType *ScV4Int32Ty = VectorType::get(Int32Ty, {4, true}); + + // Even though the minimum size is the same, a scalable vector could be + // larger so we don't consider them to be the same size. + EXPECT_NE(V4Int32Ty->getScalableSizeInBits(), + ScV4Int32Ty->getScalableSizeInBits()); + // If we are only checking the minimum, then they are the same size. + EXPECT_EQ(V4Int32Ty->getKnownMinSizeInBits(), + ScV4Int32Ty->getKnownMinSizeInBits()); + + // We can't use ordering comparisons (<,<=,>,>=) between scalable and + // non-scalable vector sizes. +} } // end anonymous namespace