diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -2642,7 +2642,13 @@ ``v:[:]`` This specifies the alignment for a vector type of a given bit ````. The value of ```` must be in the range [1,2^23). - ```` is optional and defaults to ````. + ```` is optional and defaults to ````. This cannot be used in + conjunction with ``ve``, below. +``ve:[:]`` + This specifies the alignment for vector types with an element type of a + given bit ````. The value of ```` must be in the range + [1,2^23). ```` is optional and defaults to ````. This cannot be + used in conjunction with ``v``, above. ``f:[:]`` This specifies the alignment for a floating-point type of a given bit ````. Only values of ```` that are supported by the target diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -121,7 +121,9 @@ private: /// Defaults to false. bool BigEndian; - + /// Whether vectors in this DataLayout are specified by element size or total + /// size. Defaults to false (total size). + bool VectorsByEltSize; unsigned AllocaAddrSpace; MaybeAlign StackNaturalAlign; unsigned ProgramAddrSpace; @@ -214,6 +216,7 @@ clear(); StringRepresentation = DL.StringRepresentation; BigEndian = DL.isBigEndian(); + VectorsByEltSize = DL.VectorsByEltSize; AllocaAddrSpace = DL.AllocaAddrSpace; StackNaturalAlign = DL.StackNaturalAlign; FunctionPtrAlign = DL.FunctionPtrAlign; diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -182,6 +182,7 @@ LayoutMap = nullptr; BigEndian = false; + VectorsByEltSize = false; AllocaAddrSpace = 0; StackNaturalAlign.reset(); ProgramAddrSpace = 0; @@ -257,6 +258,7 @@ Error DataLayout::parseSpecifier(StringRef Desc) { StringRepresentation = std::string(Desc); + bool ExplicitVectorsBySize = false; while (!Desc.empty()) { // Split at '-'. std::pair Split; @@ -379,6 +381,33 @@ case 'a': AlignType = AGGREGATE_ALIGN; break; } + // Check whether this is actually defining vectors by element size (ve). + if (AlignType == VECTOR_ALIGN) { + if (Tok.front() == 'e') { + // Vectors by element: consume the 'e'. + Tok = Tok.substr(1); + if (ExplicitVectorsBySize) + return reportError("Vectors must be explicitly set by total size " + "(v) OR element size (ve)"); + // If we're setting vectors-by-element, we must wipe any pre-existing + // vector settings: the defaults implicitly set v64 and v128 by size. + if (!VectorsByEltSize) { + VectorsByEltSize = true; + Alignments.erase(llvm::remove_if(Alignments, + [](const LayoutAlignElem &LE) { + return LE.AlignType == + VECTOR_ALIGN; + }), + Alignments.end()); + } + } else { + if (VectorsByEltSize) + return reportError("Vectors must be explicitly set by total size " + "(v) OR element size (ve)"); + ExplicitVectorsBySize = true; + } + } + // Bit size. unsigned Size = 0; if (!Tok.empty()) @@ -539,6 +568,7 @@ bool DataLayout::operator==(const DataLayout &Other) const { bool Ret = BigEndian == Other.BigEndian && + VectorsByEltSize == Other.VectorsByEltSize && AllocaAddrSpace == Other.AllocaAddrSpace && StackNaturalAlign == Other.StackNaturalAlign && ProgramAddrSpace == Other.ProgramAddrSpace && @@ -800,11 +830,21 @@ case Type::X86_MMXTyID: case Type::FixedVectorTyID: case Type::ScalableVectorTyID: { - unsigned BitWidth = getTypeSizeInBits(Ty).getKnownMinSize(); - auto I = findAlignmentLowerBound(VECTOR_ALIGN, BitWidth); - if (I != Alignments.end() && I->AlignType == VECTOR_ALIGN && - I->TypeBitWidth == BitWidth) - return abi_or_pref ? I->ABIAlign : I->PrefAlign; + // FIXME: Can X86_MMXTyID and VectorsByEltSize coexist? + Optional BitWidth; + if (VectorsByEltSize && Ty->getTypeID() != Type::X86_MMXTyID) { + auto *EltTy = cast(Ty)->getElementType(); + BitWidth = getTypeSizeInBits(EltTy).getFixedSize(); + } else if (!VectorsByEltSize) { + BitWidth = getTypeSizeInBits(Ty).getKnownMinSize(); + } + + if (BitWidth) { + const auto *I = findAlignmentLowerBound(VECTOR_ALIGN, *BitWidth); + if (I != Alignments.end() && I->AlignType == VECTOR_ALIGN && + I->TypeBitWidth == BitWidth) + return abi_or_pref ? I->ABIAlign : I->PrefAlign; + } // By default, use natural alignment for vector types. This is consistent // with what clang and llvm-gcc do. diff --git a/llvm/unittests/IR/DataLayoutTest.cpp b/llvm/unittests/IR/DataLayoutTest.cpp --- a/llvm/unittests/IR/DataLayoutTest.cpp +++ b/llvm/unittests/IR/DataLayoutTest.cpp @@ -102,6 +102,76 @@ // the natural alignment as a fallback. EXPECT_EQ(Align(4 * 8), DL->getABITypeAlign(V8F32Ty)); EXPECT_EQ(Align(4 * 8), DL->getPrefTypeAlign(V8F32Ty)); + + // Test vectors by total size in isolation. + EXPECT_THAT_EXPECTED(DataLayout::parse("v64:64"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("v64:64:64"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("v64:64-v128:128"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("v64:64-v128:128:128"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("v64:64:64-v128:128"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("v64:64:64-v128:128:128"), + Succeeded()); + // Test vectors by element in isolation. + EXPECT_THAT_EXPECTED(DataLayout::parse("ve8:8"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("ve8:8:8"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("ve8:8-ve32:32"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("ve8:8-ve32:32:32"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("ve8:8:8-ve32:32"), Succeeded()); + EXPECT_THAT_EXPECTED(DataLayout::parse("ve8:8:8-ve32:32:32"), Succeeded()); + // We can't mix vectors by total size and by element size. + EXPECT_THAT_EXPECTED(DataLayout::parse("v64:64-ve32:32"), Failed()); + EXPECT_THAT_EXPECTED(DataLayout::parse("ve8:8-v128:128"), Failed()); + + Type *const I8Ty = Type::getInt8Ty(Context); + Type *const I16Ty = Type::getInt16Ty(Context); + Type *const I32Ty = Type::getInt32Ty(Context); + Type *const HalfTy = Type::getHalfTy(Context); + + DL = DataLayout::parse("ve8:16:32-ve16:64"); + EXPECT_THAT_EXPECTED(DL, Succeeded()); + + for (unsigned NumElts : {1, 2, 3, 4, 6, 8, 16, 64, 80, 128, 1024}) { + auto FEC = ElementCount::getFixed(NumElts); + auto SEC = ElementCount::getScalable(NumElts); + // 8-bit exact matches. + auto *FixVI8Ty = VectorType::get(I8Ty, FEC); + auto *ScaVI8Ty = VectorType::get(I8Ty, SEC); + EXPECT_EQ(Align(2), DL->getABITypeAlign(FixVI8Ty)); + EXPECT_EQ(Align(4), DL->getPrefTypeAlign(FixVI8Ty)); + EXPECT_EQ(Align(2), DL->getABITypeAlign(ScaVI8Ty)); + EXPECT_EQ(Align(4), DL->getPrefTypeAlign(ScaVI8Ty)); + + // 16-bit exact matches. + auto *FixVI16Ty = VectorType::get(I16Ty, FEC); + auto *ScaVI16Ty = VectorType::get(I16Ty, SEC); + EXPECT_EQ(Align(8), DL->getABITypeAlign(FixVI16Ty)); + EXPECT_EQ(Align(8), DL->getPrefTypeAlign(FixVI16Ty)); + EXPECT_EQ(Align(8), DL->getABITypeAlign(ScaVI16Ty)); + EXPECT_EQ(Align(8), DL->getPrefTypeAlign(ScaVI16Ty)); + + auto *FixVF16Ty = VectorType::get(HalfTy, FEC); + auto *ScaVF16Ty = VectorType::get(HalfTy, SEC); + EXPECT_EQ(Align(8), DL->getABITypeAlign(FixVF16Ty)); + EXPECT_EQ(Align(8), DL->getPrefTypeAlign(FixVF16Ty)); + EXPECT_EQ(Align(8), DL->getABITypeAlign(ScaVF16Ty)); + EXPECT_EQ(Align(8), DL->getPrefTypeAlign(ScaVF16Ty)); + + // 32-bit types fall back to the natural alignment. + auto *FixVI32Ty = VectorType::get(I32Ty, FEC); + auto *ScaVI32Ty = VectorType::get(I32Ty, SEC); + Align ExpectedAlign(PowerOf2Ceil(NumElts * 4)); + EXPECT_EQ(ExpectedAlign, DL->getABITypeAlign(FixVI32Ty)); + EXPECT_EQ(ExpectedAlign, DL->getPrefTypeAlign(FixVI32Ty)); + EXPECT_EQ(ExpectedAlign, DL->getABITypeAlign(ScaVI32Ty)); + EXPECT_EQ(ExpectedAlign, DL->getPrefTypeAlign(ScaVI32Ty)); + + auto *FixVF32Ty = VectorType::get(FloatTy, FEC); + auto *ScaVF32Ty = VectorType::get(FloatTy, SEC); + EXPECT_EQ(ExpectedAlign, DL->getABITypeAlign(FixVF32Ty)); + EXPECT_EQ(ExpectedAlign, DL->getPrefTypeAlign(FixVF32Ty)); + EXPECT_EQ(ExpectedAlign, DL->getABITypeAlign(ScaVF32Ty)); + EXPECT_EQ(ExpectedAlign, DL->getPrefTypeAlign(ScaVF32Ty)); + } } } // anonymous namespace