diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h --- a/llvm/include/llvm/IR/Metadata.h +++ b/llvm/include/llvm/IR/Metadata.h @@ -934,38 +934,101 @@ /// If an unresolved node is part of a cycle, \a resolveCycles() needs /// to be called on some member of the cycle once all temporary nodes have been /// replaced. +/// +/// MDNodes can be large or small, as well as resizable or non-resizable. +/// Large MDNodes' operands are allocated in a separate storage vector, +/// whereas small MDNodes' operands are co-allocated. Distinct and temporary +/// MDnodes are resizable, but only MDTuples support this capability. +/// +/// Clients can add operands to resizable MDNodes using push_back(). class MDNode : public Metadata { friend class ReplaceableMetadataImpl; friend class LLVMContextImpl; friend class DIArgList; - /// The header that is coallocated with an MDNode, along with the operands. - /// It is located immediately before the main body of the node. The operands - /// are in turn located immediately before the header. + /// The header that is coallocated with an MDNode along with its "small" + /// operands. It is located immediately before the main body of the node. + /// The operands are in turn located immediately before the header. + /// For resizable MDNodes, the space for the storage vector is also allocated + /// immediately before the header, overlapping with the operands. struct Header { - unsigned NumOperands; + bool IsResizable : 1; + bool IsLarge : 1; + size_t SmallSize : 4; + size_t SmallNumOps : 4; + size_t : sizeof(size_t) * CHAR_BIT - 10; + unsigned NumUnresolved = 0; + using LargeStorageVector = SmallVector; + + static constexpr size_t NumOpsFitInVector = + sizeof(LargeStorageVector) / sizeof(MDOperand); + static_assert( + NumOpsFitInVector * sizeof(MDOperand) == sizeof(LargeStorageVector), + "sizeof(LargeStorageVector) must be a multiple of sizeof(MDOperand)"); + + static constexpr size_t MaxSmallSize = 15; static constexpr size_t getOpSize(unsigned NumOps) { return sizeof(MDOperand) * NumOps; } - static constexpr size_t getAllocSize(unsigned NumOps) { - return getOpSize(NumOps) + sizeof(Header); + /// Returns the number of operands the node has space for based on its + /// allocation characteristics. + static size_t getSmallSize(size_t NumOps, bool IsResizable, bool IsLarge) { + return IsLarge ? NumOpsFitInVector + : std::max(NumOps, NumOpsFitInVector * IsResizable); + } + /// Returns the number of bytes allocated for operands and header. + static size_t getAllocSize(StorageType Storage, size_t NumOps) { + return getOpSize( + getSmallSize(NumOps, isResizable(Storage), isLarge(NumOps))) + + sizeof(Header); + } + + /// Only temporary and distinct nodes are resizable. + static bool isResizable(StorageType Storage) { return Storage != Uniqued; } + static bool isLarge(size_t NumOps) { return NumOps > MaxSmallSize; } + + size_t getAllocSize() const { + return getOpSize(SmallSize) + sizeof(Header); } void *getAllocation() { return reinterpret_cast(this + 1) - - alignTo(getAllocSize(NumOperands), alignof(uint64_t)); + alignTo(getAllocSize(), alignof(uint64_t)); + } + + void *getLargePtr() const; + void *getSmallPtr(); + + LargeStorageVector &getLarge() { + assert(IsLarge); + return *reinterpret_cast(getLargePtr()); } - explicit Header(unsigned NumOperands); + const LargeStorageVector &getLarge() const { + assert(IsLarge); + return *reinterpret_cast(getLargePtr()); + } + + void resizeSmall(size_t NumOps); + void resizeSmallToLarge(size_t NumOps); + void resize(size_t NumOps); + + explicit Header(size_t NumOps, StorageType Storage); ~Header(); + MutableArrayRef operands() { + if (IsLarge) + return getLarge(); return makeMutableArrayRef( - reinterpret_cast(this) - NumOperands, NumOperands); + reinterpret_cast(this) - SmallSize, SmallNumOps); } + ArrayRef operands() const { - return makeArrayRef( - reinterpret_cast(this) - NumOperands, NumOperands); + if (IsLarge) + return getLarge(); + return makeArrayRef(reinterpret_cast(this) - SmallSize, + SmallNumOps); } }; @@ -982,7 +1045,7 @@ ArrayRef Ops1, ArrayRef Ops2 = None); ~MDNode() = default; - void *operator new(size_t Size, unsigned NumOps, StorageType Storage); + void *operator new(size_t Size, size_t NumOps, StorageType Storage); void operator delete(void *Mem); /// Required by std, but never called. @@ -1146,6 +1209,17 @@ static T *storeImpl(T *N, StorageType Storage, StoreT &Store); template static T *storeImpl(T *N, StorageType Storage); + /// Resize the node to hold \a NumOps operands. + /// + /// \pre \a isTemporary() or \a isDistinct() + /// \pre MetadataID == MDTupleKind + void resize(size_t NumOps) { + assert(!isUniqued() && "Resizing is not supported for uniqued nodes"); + assert(getMetadataID() == MDTupleKind && + "Resizing is not supported for this node kind"); + getHeader().resize(NumOps); + } + private: void handleChangedOperand(void *Ref, Metadata *New); @@ -1207,7 +1281,7 @@ } /// Return number of MDNode operands. - unsigned getNumOperands() const { return getHeader().NumOperands; } + unsigned getNumOperands() const { return getHeader().operands().size(); } /// Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Metadata *MD) { @@ -1292,6 +1366,16 @@ /// Return a (temporary) clone of this. TempMDTuple clone() const { return cloneImpl(); } + /// Append an element to the tuple. This will resize the node. + void push_back(Metadata *MD) { + size_t NumOps = getNumOperands(); + resize(NumOps + 1); + setOperand(NumOps, MD); + } + + /// Shrink the operands by 1. + void pop_back() { resize(getNumOperands() - 1); } + static bool classof(const Metadata *MD) { return MD->getMetadataID() == MDTupleKind; } diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp --- a/llvm/lib/IR/Metadata.cpp +++ b/llvm/lib/IR/Metadata.cpp @@ -521,13 +521,13 @@ "Alignment is insufficient after objects prepended to " #CLASS); #include "llvm/IR/Metadata.def" -void *MDNode::operator new(size_t Size, unsigned NumOps, - StorageType /* Storage */) { +void *MDNode::operator new(size_t Size, size_t NumOps, StorageType Storage) { // uint64_t is the most aligned type we need support (ensured by static_assert // above) - size_t AllocSize = alignTo(Header::getAllocSize(NumOps), alignof(uint64_t)); + size_t AllocSize = + alignTo(Header::getAllocSize(Storage, NumOps), alignof(uint64_t)); char *Mem = reinterpret_cast(::operator new(AllocSize + Size)); - Header *H = new (Mem + AllocSize - sizeof(Header)) Header(NumOps); + Header *H = new (Mem + AllocSize - sizeof(Header)) Header(NumOps, Storage); return reinterpret_cast(H + 1); } @@ -566,17 +566,88 @@ } } -MDNode::Header::Header(unsigned NumOps) { - NumOperands = NumOps; - MDOperand *O = reinterpret_cast(this); - for (MDOperand *E = O - NumOps; O != E; --O) - (void)new (O - 1) MDOperand(); +MDNode::Header::Header(size_t NumOps, StorageType Storage) { + IsLarge = isLarge(NumOps); + IsResizable = isResizable(Storage); + SmallSize = getSmallSize(NumOps, IsResizable, IsLarge); + if (IsLarge) { + SmallNumOps = 0; + new (getLargePtr()) LargeStorageVector(); + getLarge().resize(NumOps); + return; + } + SmallNumOps = NumOps; + MDOperand *O = reinterpret_cast(this) - SmallSize; + for (MDOperand *E = O + SmallSize; O != E;) + (void)new (O++) MDOperand(); } MDNode::Header::~Header() { - MDOperand *O = reinterpret_cast(this) - NumOperands; - for (MDOperand *E = O + NumOperands; O != E; ++O) - (void)O->~MDOperand(); + if (IsLarge) { + getLarge().~LargeStorageVector(); + return; + } + MDOperand *O = reinterpret_cast(this); + for (MDOperand *E = O - SmallSize; O != E; --O) + (void)(O - 1)->~MDOperand(); +} + +void *MDNode::Header::getLargePtr() const { + static_assert(alignof(LargeStorageVector) <= alignof(Header), + "LargeStorageVector too strongly aligned"); + return reinterpret_cast(const_cast
(this)) - + sizeof(LargeStorageVector); +} + +void *MDNode::Header::getSmallPtr() { + static_assert(alignof(MDOperand) <= alignof(Header), + "MDOperand too strongly aligned"); + return reinterpret_cast(const_cast
(this)) - + sizeof(MDOperand) * SmallSize; +} + +void MDNode::Header::resize(size_t NumOps) { + assert(IsResizable && "Node is not resizable"); + if (operands().size() == NumOps) + return; + + if (IsLarge) + getLarge().resize(NumOps); + else if (NumOps <= SmallSize) + resizeSmall(NumOps); + else + resizeSmallToLarge(NumOps); +} + +void MDNode::Header::resizeSmall(size_t NumOps) { + assert(!IsLarge && "Expected a small MDNode"); + assert(NumOps <= SmallSize && "NumOps too large for small resize"); + + MutableArrayRef ExistingOps = operands(); + assert(NumOps != ExistingOps.size() && "Expected a different size"); + + int NumNew = (int)NumOps - (int)ExistingOps.size(); + MDOperand *O = ExistingOps.end(); + if (NumNew > 0) { + for (int I = 0, E = NumNew; I != E; ++I) + new (O++) MDOperand(); + } else { + for (int I = 0, E = NumNew; I != E; --I) + (--O)->~MDOperand(); + } + SmallNumOps = NumOps; + assert(O == operands().end() && "Operands not (un)initialized until the end"); +} + +void MDNode::Header::resizeSmallToLarge(size_t NumOps) { + assert(!IsLarge && "Expected a small MDNode"); + assert(NumOps > SmallSize && "Expected NumOps to be larger than allocation"); + LargeStorageVector NewOps; + NewOps.resize(NumOps); + llvm::move(operands(), NewOps.begin()); + resizeSmall(0); + new (getLargePtr()) LargeStorageVector(std::move(NewOps)); + IsLarge = true; } static bool isOperandUnresolved(Metadata *Op) { diff --git a/llvm/unittests/IR/MetadataTest.cpp b/llvm/unittests/IR/MetadataTest.cpp --- a/llvm/unittests/IR/MetadataTest.cpp +++ b/llvm/unittests/IR/MetadataTest.cpp @@ -3631,4 +3631,128 @@ EXPECT_EQ(NewOps2.get(), static_cast(Value2)); } +TEST_F(MDTupleAllocationTest, Resize) { + MDTuple *A = getTuple(); + Metadata *Value1 = getConstantAsMetadata(); + Metadata *Value2 = getConstantAsMetadata(); + Metadata *Value3 = getConstantAsMetadata(); + + EXPECT_EQ(A->getNumOperands(), 0u); + + // Add a couple of elements to it, which resizes the node. + A->push_back(Value1); + EXPECT_EQ(A->getNumOperands(), 1u); + EXPECT_EQ(A->getOperand(0), Value1); + + A->push_back(Value2); + EXPECT_EQ(A->getNumOperands(), 2u); + EXPECT_EQ(A->getOperand(0), Value1); + EXPECT_EQ(A->getOperand(1), Value2); + + // Append another element, which should resize the node + // to a "large" node, though not detectable by the user. + A->push_back(Value3); + EXPECT_EQ(A->getNumOperands(), 3u); + EXPECT_EQ(A->getOperand(0), Value1); + EXPECT_EQ(A->getOperand(1), Value2); + EXPECT_EQ(A->getOperand(2), Value3); + + // Remove the last element + A->pop_back(); + EXPECT_EQ(A->getNumOperands(), 2u); + EXPECT_EQ(A->getOperand(1), Value2); + + // Allocate a node with 4 operands. + Metadata *Value4 = getConstantAsMetadata(); + Metadata *Value5 = getConstantAsMetadata(); + + Metadata *Ops[] = {Value1, Value2, Value3, Value4}; + MDTuple *B = MDTuple::getDistinct(Context, Ops); + + EXPECT_EQ(B->getNumOperands(), 4u); + B->pop_back(); + EXPECT_EQ(B->getNumOperands(), 3u); + B->push_back(Value5); + EXPECT_EQ(B->getNumOperands(), 4u); + EXPECT_EQ(B->getOperand(0), Value1); + EXPECT_EQ(B->getOperand(1), Value2); + EXPECT_EQ(B->getOperand(2), Value3); + EXPECT_EQ(B->getOperand(3), Value5); + + // Check that we can resize temporary nodes as well. + auto Temp1 = MDTuple::getTemporary(Context, None); + EXPECT_EQ(Temp1->getNumOperands(), 0u); + + Temp1->push_back(Value1); + EXPECT_EQ(Temp1->getNumOperands(), 1u); + EXPECT_EQ(Temp1->getOperand(0), Value1); + + for (int i = 0; i < 11; i++) + Temp1->push_back(Value2); + EXPECT_EQ(Temp1->getNumOperands(), 12u); + EXPECT_EQ(Temp1->getOperand(2), Value2); + EXPECT_EQ(Temp1->getOperand(11), Value2); + + // Allocate a node that starts off as a large one. + Metadata *OpsLarge[] = {Value1, Value2, Value3, Value4, + Value1, Value2, Value3, Value4, + Value1, Value2, Value3, Value4, + Value1, Value2, Value3, Value4, + Value1, Value2, Value3, Value4}; + MDTuple *C = MDTuple::getDistinct(Context, OpsLarge); + EXPECT_EQ(C->getNumOperands(), 20u); + EXPECT_EQ(C->getOperand(7), Value4); + EXPECT_EQ(C->getOperand(13), Value2); + + C->push_back(Value1); + C->push_back(Value2); + EXPECT_EQ(C->getNumOperands(), 22u); + EXPECT_EQ(C->getOperand(21), Value2); + C->pop_back(); + EXPECT_EQ(C->getNumOperands(), 21u); + EXPECT_EQ(C->getOperand(20), Value1); +} + +TEST_F(MDTupleAllocationTest, Tracking2) { + // Resize a tuple and check that we can still RAUW one of its operands. + auto *Value1 = getConstantAsMetadata(); + MDTuple *A = getTuple(); + A->push_back(Value1); + A->push_back(Value1); + A->push_back(Value1); // Causes a resize to large. + EXPECT_EQ(A->getOperand(0), Value1); + EXPECT_EQ(A->getOperand(1), Value1); + EXPECT_EQ(A->getOperand(2), Value1); + + auto *Value2 = getConstantAsMetadata(); + Value *V1 = Value1->getValue(); + Value *V2 = Value2->getValue(); + ValueAsMetadata::handleRAUW(V1, V2); + + EXPECT_EQ(A->getOperand(0), Value2); + EXPECT_EQ(A->getOperand(1), Value2); + EXPECT_EQ(A->getOperand(2), Value2); +} + +#ifdef GTEST_HAS_DEATH_TEST +typedef MetadataTest MDTupleAllocationDeathTest; +TEST_F(MDTupleAllocationDeathTest, ResizeRejected) { + MDTuple *A = MDTuple::get(Context, None); + auto *Value1 = getConstantAsMetadata(); + EXPECT_DEATH(A->push_back(Value1), + "Resizing is not supported for uniqued nodes"); + + // Check that a node, which has been allocated as a temporary, + // cannot be resized after it has been uniqued. + auto *Value2 = getConstantAsMetadata(); + auto B = MDTuple::getTemporary(Context, {Value2}); + B->push_back(Value2); + MDTuple *BUniqued = MDNode::replaceWithUniqued(std::move(B)); + EXPECT_EQ(BUniqued->getNumOperands(), 2u); + EXPECT_EQ(BUniqued->getOperand(1), Value2); + EXPECT_DEATH(BUniqued->push_back(Value2), + "Resizing is not supported for uniqued nodes"); +} +#endif + } // end namespace