Index: llvm/include/llvm/IR/Metadata.h =================================================================== --- llvm/include/llvm/IR/Metadata.h +++ llvm/include/llvm/IR/Metadata.h @@ -16,6 +16,7 @@ #define LLVM_IR_METADATA_H #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/Bitfields.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/None.h" @@ -923,22 +924,88 @@ /// If an unresolved node is part of a cycle, \a resolveCycles() needs /// to be called on some member of the cycle once all temporary nodes have been /// replaced. +/// +/// Temporary and distinct MDNodes are resizable, but this is at present only +/// supported for MDTuples. At allocation time, depending on desired capacity, +/// operands are either co-allocated with the node or allocated separately in +/// hung-off storage. class MDNode : public Metadata { friend class ReplaceableMetadataImpl; friend class LLVMContextImpl; friend class DIArgList; - unsigned NumOperands; + // See the comment for operator new for more information on allocation types. + enum class AllocationType : uint8_t { + TinyFixed, + TinyResizable, + Small, + Large + }; + + /// How operands are stored if not co-allocated with the node. + /// \a Capacity determines the actual size. + struct HungOffOperandStorage { + unsigned Capacity; + unsigned NumOperands; + MDOperand Operands[1]; + }; + + unsigned Info; // Storage for the following bitfields. + + using AllocType = + Bitfield::Element; + /// The capacity of an MDNode with co-allocated operands. + using CoallocCapacity = Bitfield::Element; + using CoallocNumOperands = + Bitfield::Element; + unsigned NumUnresolved; ContextAndReplaceableUses Context; +protected: + using HungOffPtr = std::unique_ptr; + using CoCapType = CoallocCapacity::Type; + static constexpr unsigned CoCapSize = CoallocCapacity::Bits; + /// The maximum number of MDOperands that can be coallocated with an MDNode. + static constexpr unsigned MaxCoallocCapacity = + bitfields_details::BitPatterns::Umax; + +private: + /// Number of MDOperands to cover enough storage space for a HungOffPtr. + /// Nodes with at least this many operands do not need extra storage + /// allocated. This is host-dependent, but probably 1 in most cases. + static constexpr unsigned MinOpsForPtr = + (sizeof(HungOffPtr) - 1) / sizeof(MDOperand) + 1; + protected: MDNode(LLVMContext &Context, unsigned ID, StorageType Storage, ArrayRef Ops1, ArrayRef Ops2 = None); ~MDNode() = default; - void *operator new(size_t Size, unsigned NumOps); + /// For purposes of initial allocation, we differentiate between 4 different + /// allocation types: + /// (1) "large" nodes with a capacity of more than MaxCoallocCapacity (15) + /// operands, + /// (2) "small" nodes with a capacity of fewer than MaxCoallocCapacity but at + /// least MinOpsForPtr operands, + /// (3) "tinyResizable" nodes with a capacity of fewer than MinOpsForPtr + /// operands and with distinct or temporary storage type, + /// (4) "tinyFixed" nodes with unique storage type. + /// + /// Nodes of type (1) store their operands in hung-off storage, whereas all + /// the other types' operands are co-allocated. All nodes except type (4) are + /// resizable. Any resize makes a node into a "large" one. Nodes of type (3) + /// require extra space to accommodate a pointer to hung-off storage, in case + /// they need to be resized. + /// + /// Note that an MDNode's change in storage type (e.g. temporary->uniqued) + /// does not affect their allocation type. However, once a node becomes + /// uniqued, it can no longer be resized, no matter its allocation type. + /// + /// \c NumOps determines the capacity of the new node. The actual number + /// of operands is set by the constructors. + void *operator new(size_t Size, unsigned NumOps, StorageType Storage); void operator delete(void *Mem); /// Required by std, but never called. @@ -953,8 +1020,52 @@ void dropAllReferences(); - MDOperand *mutable_begin() { return mutable_end() - NumOperands; } - MDOperand *mutable_end() { return reinterpret_cast(this); } + void setNumOperands(unsigned NOps) { + assert(NOps <= getCapacity() && + "Number of operands exceeds capacity of MDNode"); + if (getAllocationType() == AllocationType::Large) + hungoff()->NumOperands = NOps; + else + setCoallocNumOperands(NOps); + } + + AllocationType getAllocationType() const { + return Bitfield::get(Info); + } + + void setAllocationType(AllocationType Type) { + Bitfield::set(Info, Type); + } + + unsigned getCoallocCapacity() const { + return Bitfield::get(Info); + } + + void setCoallocCapacity(unsigned Cap) { + Bitfield::set(Info, Cap); + } + + unsigned getCoallocNumOperands() const { + return Bitfield::get(Info); + } + + void setCoallocNumOperands(unsigned Num) { + Bitfield::set(Info, Num); + } + + HungOffPtr &hungoff() const { + assert(getAllocationType() == AllocationType::Large && + "Expected large MDNode"); + return *(reinterpret_cast(const_cast(this)) - 1); + } + + MDOperand *mutable_begin() { + return getAllocationType() == AllocationType::Large + ? hungoff()->Operands + : reinterpret_cast(this) - getCoallocCapacity(); + } + + MDOperand *mutable_end() { return mutable_begin() + getNumOperands(); } using mutable_op_range = iterator_range; @@ -1015,6 +1126,10 @@ Context.getReplaceableUses()->replaceAllUsesWith(MD); } + /// Increase the operand capacity to \c NumOps. + /// \pre Must be either a temporary or distinct node. + void reserve(unsigned NumOps); + /// Resolve cycles. /// /// Once all forward declarations have been resolved, force cycles to be @@ -1100,6 +1215,20 @@ template static T *storeImpl(T *N, StorageType Storage); private: + + static AllocationType determineAllocationType(unsigned NumOps, + StorageType Storage) { + return NumOps > MaxCoallocCapacity ? AllocationType::Large + : NumOps > MinOpsForPtr ? AllocationType::Small + : Storage == Uniqued ? AllocationType::TinyFixed + : AllocationType::TinyResizable; + } + + void ensureCapacity(unsigned NumOps) { + if (NumOps > getCapacity()) + reserve(NumOps); + } + void handleChangedOperand(void *Ref, Metadata *New); /// Drop RAUW support, if any. @@ -1155,12 +1284,38 @@ op_range operands() const { return op_range(op_begin(), op_end()); } const MDOperand &getOperand(unsigned I) const { - assert(I < NumOperands && "Out of range"); + assert(I < getNumOperands() && "Out of range"); return op_begin()[I]; } + /// Append a range of operands to the existing ones. Performs a + /// resize/reserve if necessary. Only allowed for distinct and temporary + /// storage classes. + void append(ArrayRef Ops); + + void append(ArrayRef Ops) { + append(makeArrayRef( + reinterpret_cast(const_cast(Ops.begin())), + Ops.size())); + } + + template + void + append(std::enable_if_t::value, T> Op) { + append(makeArrayRef(Op)); + } + /// Return number of MDNode operands. - unsigned getNumOperands() const { return NumOperands; } + unsigned getNumOperands() const { + return getAllocationType() == AllocationType::Large + ? hungoff()->NumOperands + : getCoallocNumOperands(); + } + + unsigned getCapacity() const { + return getAllocationType() == AllocationType::Large ? hungoff()->Capacity + : getCoallocCapacity(); + } /// Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Metadata *MD) { @@ -1207,10 +1362,12 @@ void recalculateHash(); static MDTuple *getImpl(LLVMContext &Context, ArrayRef MDs, - StorageType Storage, bool ShouldCreate = true); + StorageType Storage, unsigned Capacity, + bool ShouldCreate = true); TempMDTuple cloneImpl() const { - return getTemporary(getContext(), SmallVector(operands())); + return getTemporary(getContext(), SmallVector(operands()), + getCapacity()); } public: @@ -1218,18 +1375,19 @@ unsigned getHash() const { return SubclassData32; } static MDTuple *get(LLVMContext &Context, ArrayRef MDs) { - return getImpl(Context, MDs, Uniqued); + return getImpl(Context, MDs, Uniqued, MDs.size()); } static MDTuple *getIfExists(LLVMContext &Context, ArrayRef MDs) { - return getImpl(Context, MDs, Uniqued, /* ShouldCreate */ false); + return getImpl(Context, MDs, Uniqued, MDs.size(), /* ShouldCreate */ false); } /// Return a distinct node. /// /// Return a distinct node -- i.e., a node that is not uniqued. - static MDTuple *getDistinct(LLVMContext &Context, ArrayRef MDs) { - return getImpl(Context, MDs, Distinct); + static MDTuple *getDistinct(LLVMContext &Context, ArrayRef MDs, + unsigned Capacity = 0) { + return getImpl(Context, MDs, Distinct, Capacity); } /// Return a temporary node. @@ -1238,8 +1396,9 @@ /// not uniqued, may be RAUW'd, and must be manually deleted with /// deleteTemporary. static TempMDTuple getTemporary(LLVMContext &Context, - ArrayRef MDs) { - return TempMDTuple(getImpl(Context, MDs, Temporary)); + ArrayRef MDs, + unsigned Capacity = 0) { + return TempMDTuple(getImpl(Context, MDs, Temporary, Capacity)); } /// Return a (temporary) clone of this. Index: llvm/lib/AsmParser/LLParser.cpp =================================================================== --- llvm/lib/AsmParser/LLParser.cpp +++ llvm/lib/AsmParser/LLParser.cpp @@ -3783,7 +3783,8 @@ if (parseMDNodeVector(Elts)) return true; - MD = (IsDistinct ? MDTuple::getDistinct : MDTuple::get)(Context, Elts); + MD = IsDistinct ? MDTuple::getDistinct(Context, Elts) + : MDTuple::get(Context, Elts); return false; } Index: llvm/lib/CodeGen/MIRParser/MIParser.cpp =================================================================== --- llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -1249,8 +1249,8 @@ SmallVector Elts; if (parseMDNodeVector(Elts)) return true; - MD = (IsDistinct ? MDTuple::getDistinct - : MDTuple::get)(MF.getFunction().getContext(), Elts); + MD = IsDistinct ? MDTuple::getDistinct(MF.getFunction().getContext(), Elts) + : MDTuple::get(MF.getFunction().getContext(), Elts); return false; } Index: llvm/lib/IR/DebugInfoMetadata.cpp =================================================================== --- llvm/lib/IR/DebugInfoMetadata.cpp +++ llvm/lib/IR/DebugInfoMetadata.cpp @@ -78,8 +78,8 @@ Ops.push_back(Scope); if (InlinedAt) Ops.push_back(InlinedAt); - return storeImpl(new (Ops.size()) DILocation(Context, Storage, Line, Column, - Ops, ImplicitCode), + return storeImpl(new (Ops.size(), Storage) DILocation( + Context, Storage, Line, Column, Ops, ImplicitCode), Storage, Context.pImpl->DILocations); } @@ -304,7 +304,7 @@ // Use a nullptr for empty headers. assert(isCanonical(Header) && "Expected canonical MDString"); Metadata *PreOps[] = {Header}; - return storeImpl(new (DwarfOps.size() + 1) GenericDINode( + return storeImpl(new (DwarfOps.size() + 1, Storage) GenericDINode( Context, Storage, Hash, Tag, PreOps, DwarfOps), Storage, Context.pImpl->GenericDINodes); } @@ -329,17 +329,19 @@ } \ } while (false) #define DEFINE_GETIMPL_STORE(CLASS, ARGS, OPS) \ - return storeImpl(new (array_lengthof(OPS)) \ + return storeImpl(new (array_lengthof(OPS), Storage) \ CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \ Storage, Context.pImpl->CLASS##s) #define DEFINE_GETIMPL_STORE_NO_OPS(CLASS, ARGS) \ - return storeImpl(new (0u) CLASS(Context, Storage, UNWRAP_ARGS(ARGS)), \ + return storeImpl(new (0u, Storage) \ + CLASS(Context, Storage, UNWRAP_ARGS(ARGS)), \ Storage, Context.pImpl->CLASS##s) #define DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(CLASS, OPS) \ - return storeImpl(new (array_lengthof(OPS)) CLASS(Context, Storage, OPS), \ + return storeImpl(new (array_lengthof(OPS), Storage) \ + CLASS(Context, Storage, OPS), \ Storage, Context.pImpl->CLASS##s) #define DEFINE_GETIMPL_STORE_N(CLASS, ARGS, OPS, NUM_OPS) \ - return storeImpl(new (NUM_OPS) \ + return storeImpl(new (NUM_OPS, Storage) \ CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \ Storage, Context.pImpl->CLASS##s) @@ -848,7 +850,7 @@ Macros, SysRoot, SDK}; - return storeImpl(new (array_lengthof(Ops)) DICompileUnit( + return storeImpl(new (array_lengthof(Ops), Storage) DICompileUnit( Context, Storage, SourceLanguage, IsOptimized, RuntimeVersion, EmissionKind, DWOId, SplitDebugInlining, DebugInfoForProfiling, NameTableKind, RangesBaseAddress, Index: llvm/lib/IR/Metadata.cpp =================================================================== --- llvm/lib/IR/Metadata.cpp +++ llvm/lib/IR/Metadata.cpp @@ -523,15 +523,42 @@ "Alignment is insufficient after objects prepended to " #CLASS); #include "llvm/IR/Metadata.def" -void *MDNode::operator new(size_t Size, unsigned NumOps) { +void *MDNode::operator new(size_t Size, unsigned NumOps, StorageType Storage) { + AllocationType AllocType = determineAllocationType(NumOps, Storage); size_t OpSize = NumOps * sizeof(MDOperand); + + // For large nodes we do not co-allocate operands. + if (AllocType == AllocationType::Large) + OpSize = 0; + // All allocation types except small uniqued nodes must reserve extra space + // for a pointer to hung-off storage to enable resizing. + if (AllocType != AllocationType::TinyFixed) + OpSize = std::max(OpSize, sizeof(HungOffPtr)); // uint64_t is the most aligned type we need support (ensured by static_assert // above) OpSize = alignTo(OpSize, alignof(uint64_t)); + + // Allocate the main node. void *Ptr = reinterpret_cast(::operator new(OpSize + Size)) + OpSize; - MDOperand *O = static_cast(Ptr); - for (MDOperand *E = O - NumOps; O != E; --O) - (void)new (O - 1) MDOperand; + MDNode *NewNode = reinterpret_cast(Ptr); + NewNode->setCoallocCapacity(AllocType == AllocationType::Large ? 0 : NumOps); + NewNode->setCoallocNumOperands(0); + NewNode->setAllocationType(AllocType); + + // Allocate hung-off storage for large nodes. + if (AllocType == AllocationType::Large) { + size_t HungOffSize = + sizeof(HungOffOperandStorage) + (NumOps - 1) * sizeof(MDOperand); + auto HungOffStorage = + reinterpret_cast(::operator new(HungOffSize)); + HungOffStorage->Capacity = NumOps; + HungOffStorage->NumOperands = 0; + new (&NewNode->hungoff()) HungOffPtr(HungOffStorage); + } + + MDOperand *O = NewNode->mutable_begin(); + for (MDOperand *E = O + NumOps; O != E; ++O) + (void)new (O) MDOperand; return Ptr; } @@ -539,19 +566,73 @@ // delete. Bug report 24578 identifies this issue. LLVM_NO_SANITIZE_MEMORY_ATTRIBUTE void MDNode::operator delete(void *Mem) { MDNode *N = static_cast(Mem); - size_t OpSize = N->NumOperands * sizeof(MDOperand); - OpSize = alignTo(OpSize, alignof(uint64_t)); + MDOperand *O = N->mutable_begin() + N->getCapacity(); - MDOperand *O = static_cast(Mem); - for (MDOperand *E = O - N->NumOperands; O != E; --O) + for (MDOperand *E = O - N->getCapacity(); O != E; --O) (O - 1)->~MDOperand(); + + if (N->getAllocationType() == AllocationType::Large) + ::operator delete(N->hungoff().release()); + + // Deallocate the main node. + size_t OpSize = N->getCoallocCapacity() * sizeof(MDOperand); + if (N->getAllocationType() != AllocationType::TinyFixed) + OpSize = std::max(OpSize, sizeof(HungOffPtr)); + OpSize = alignTo(OpSize, alignof(uint64_t)); + ::operator delete(reinterpret_cast(Mem) - OpSize); } +void MDNode::reserve(unsigned NumOps) { + assert( + getMetadataID() == MDTupleKind && + "Increasing the number of operands is not supported for this node kind"); + assert(!isUniqued() && + "Cannot increase the number of operands of a uniqued node"); + if (NumOps <= getCapacity()) + return; + + size_t OldOpSize = getNumOperands() * sizeof(MDOperand); + size_t HungOffSize = + sizeof(HungOffOperandStorage) + (NumOps - 1) * sizeof(MDOperand); + + // Allocate the hung-off operand storage. + auto HOPtr = + reinterpret_cast(::operator new(HungOffSize)); + HOPtr->Capacity = NumOps; + HOPtr->NumOperands = getNumOperands(); + + MDOperand *OldOps = mutable_begin(); + MDOperand *NewOps = HOPtr->Operands; + // FIXME: Should we define MDOperand's move constructor to do this? + std::memcpy(NewOps, OldOps, OldOpSize); + for (MDOperand *OO = OldOps, *NO = NewOps; OO != op_end(); OO++, NO++) + MetadataTracking::retrack(OO, **OO, NO); + + // Initialize the extra MDOperands. + for (MDOperand *O = NewOps + getNumOperands(), *E = NewOps + NumOps; O != E; + ++O) + (void)new (O) MDOperand; + + if (getAllocationType() == AllocationType::Large) + ::operator delete(hungoff().release()); + setAllocationType(AllocationType::Large); + new (&hungoff()) HungOffPtr(HOPtr); +} + +void MDNode::append(ArrayRef Ops) { + unsigned NumNewOps = getNumOperands() + Ops.size(); + ensureCapacity(NumNewOps); + unsigned Op = getNumOperands(); + setNumOperands(NumNewOps); + for (Metadata *M : Ops) + setOperand(Op++, M); +} + MDNode::MDNode(LLVMContext &Context, unsigned ID, StorageType Storage, ArrayRef Ops1, ArrayRef Ops2) - : Metadata(ID, Storage), NumOperands(Ops1.size() + Ops2.size()), - NumUnresolved(0), Context(Context) { + : Metadata(ID, Storage), NumUnresolved(0), Context(Context) { + setNumOperands(Ops1.size() + Ops2.size()); unsigned Op = 0; for (Metadata *MD : Ops1) setOperand(Op++, MD); @@ -732,7 +813,7 @@ } void MDNode::dropAllReferences() { - for (unsigned I = 0, E = NumOperands; I != E; ++I) + for (unsigned I = 0, E = getNumOperands(); I != E; ++I) setOperand(I, nullptr); if (Context.hasReplaceableUses()) { Context.getReplaceableUses()->resolveAllUses(/* ResolveUsers */ false); @@ -855,7 +936,8 @@ } MDTuple *MDTuple::getImpl(LLVMContext &Context, ArrayRef MDs, - StorageType Storage, bool ShouldCreate) { + StorageType Storage, unsigned Capacity, + bool ShouldCreate) { unsigned Hash = 0; if (Storage == Uniqued) { MDTupleInfo::KeyTy Key(MDs); @@ -868,7 +950,8 @@ assert(ShouldCreate && "Expected non-uniqued nodes to always be created"); } - return storeImpl(new (MDs.size()) MDTuple(Context, Storage, Hash, MDs), + size_t Size = std::max(MDs.size(), static_cast(Capacity)); + return storeImpl(new (Size, Storage) MDTuple(Context, Storage, Hash, MDs), Storage, Context.pImpl->MDTuples); } @@ -913,7 +996,7 @@ } void MDNode::setOperand(unsigned I, Metadata *New) { - assert(I < NumOperands); + assert(I < getNumOperands()); mutable_begin()[I].reset(New, isUniqued() ? this : nullptr); } Index: llvm/lib/Linker/IRMover.cpp =================================================================== --- llvm/lib/Linker/IRMover.cpp +++ llvm/lib/Linker/IRMover.cpp @@ -1357,13 +1357,6 @@ DstM.getModuleIdentifier() + "'"); } - auto replaceDstValue = [&](MDNode *New) { - Metadata *FlagOps[] = {DstOp->getOperand(0), ID, New}; - MDNode *Flag = MDNode::get(DstM.getContext(), FlagOps); - DstModFlags->setOperand(DstIndex, Flag); - Flags[ID].first = Flag; - }; - // Emit a warning if the values differ and either source or destination // request Warning behavior. if ((DstBehaviorValue == Module::Warning || @@ -1417,6 +1410,17 @@ continue; } + auto ensureDistinctOp = [&](MDNode *DstValue) { + if (DstValue->isDistinct()) + return DstValue; + MDNode *New = MDNode::replaceWithDistinct(DstValue->clone()); + Metadata *FlagOps[] = {DstOp->getOperand(0), ID, New}; + MDNode *Flag = MDNode::get(DstM.getContext(), FlagOps); + DstModFlags->setOperand(DstIndex, Flag); + Flags[ID].first = Flag; + return New; + }; + // Perform the merge for standard behavior types. switch (SrcBehaviorValue) { case Module::Require: @@ -1438,25 +1442,22 @@ break; } case Module::Append: { - MDNode *DstValue = cast(DstOp->getOperand(2)); + MDNode *DstValue = ensureDistinctOp(cast(DstOp->getOperand(2))); MDNode *SrcValue = cast(SrcOp->getOperand(2)); - SmallVector MDs; - MDs.reserve(DstValue->getNumOperands() + SrcValue->getNumOperands()); - MDs.append(DstValue->op_begin(), DstValue->op_end()); - MDs.append(SrcValue->op_begin(), SrcValue->op_end()); - - replaceDstValue(MDNode::get(DstM.getContext(), MDs)); + DstValue->append(makeArrayRef(SrcValue->op_begin(), SrcValue->op_end())); break; } case Module::AppendUnique: { SmallSetVector Elts; - MDNode *DstValue = cast(DstOp->getOperand(2)); + MDNode *DstValue = ensureDistinctOp(cast(DstOp->getOperand(2))); MDNode *SrcValue = cast(SrcOp->getOperand(2)); Elts.insert(DstValue->op_begin(), DstValue->op_end()); Elts.insert(SrcValue->op_begin(), SrcValue->op_end()); - - replaceDstValue(MDNode::get(DstM.getContext(), - makeArrayRef(Elts.begin(), Elts.end()))); + // There should not be any duplicates in DstValue's original operands, + // therefore all of SrcValue's operands that were added must be trailing + // DstValue's operands. + DstValue->append( + makeArrayRef(Elts.begin() + DstValue->getNumOperands(), Elts.end())); break; } } Index: llvm/lib/Transforms/Instrumentation/CGProfile.cpp =================================================================== --- llvm/lib/Transforms/Instrumentation/CGProfile.cpp +++ llvm/lib/Transforms/Instrumentation/CGProfile.cpp @@ -39,7 +39,8 @@ Nodes.push_back(MDNode::get(Context, Vals)); } - M.addModuleFlag(Module::Append, "CG Profile", MDNode::get(Context, Nodes)); + M.addModuleFlag(Module::Append, "CG Profile", + MDNode::getDistinct(Context, Nodes)); return true; } Index: llvm/test/Instrumentation/cgprofile.ll =================================================================== --- llvm/test/Instrumentation/cgprofile.ll +++ llvm/test/Instrumentation/cgprofile.ll @@ -34,7 +34,7 @@ ; CHECK: !llvm.module.flags = !{![[cgprof:[0-9]+]]} ; CHECK: ![[cgprof]] = !{i32 5, !"CG Profile", ![[prof:[0-9]+]]} -; CHECK: ![[prof]] = !{![[e0:[0-9]+]], ![[e1:[0-9]+]], ![[e2:[0-9]+]], ![[e3:[0-9]+]], ![[e4:[0-9]+]], ![[e5:[0-9]+]], ![[e6:[0-9]+]]} +; CHECK: ![[prof]] = distinct !{![[e0:[0-9]+]], ![[e1:[0-9]+]], ![[e2:[0-9]+]], ![[e3:[0-9]+]], ![[e4:[0-9]+]], ![[e5:[0-9]+]], ![[e6:[0-9]+]]} ; CHECK: ![[e0]] = !{void ()* @a, void ()* @b, i64 32} ; CHECK: ![[e1]] = !{void (i1)* @freq, i32 ()* @func4, i64 1030} ; CHECK: ![[e2]] = !{void (i1)* @freq, i32 ()* @func2, i64 410} Index: llvm/test/Linker/module-flags-8-a.ll =================================================================== --- llvm/test/Linker/module-flags-8-a.ll +++ llvm/test/Linker/module-flags-8-a.ll @@ -3,9 +3,9 @@ ; Test append-type module flags. ; CHECK: !0 = !{i32 5, !"flag-0", !1} -; CHECK: !1 = !{i32 0, i32 0, i32 1} +; CHECK: !1 = distinct !{i32 0, i32 0, i32 1} ; CHECK: !2 = !{i32 6, !"flag-1", !3} -; CHECK: !3 = !{i32 0, i32 1, i32 2} +; CHECK: !3 = distinct !{i32 0, i32 1, i32 2} ; CHECK: !llvm.module.flags = !{!0, !2} !0 = !{ i32 5, !"flag-0", !{ i32 0 } } Index: llvm/test/Linker/module-flags-dont-change-others.ll =================================================================== --- llvm/test/Linker/module-flags-dont-change-others.ll +++ llvm/test/Linker/module-flags-dont-change-others.ll @@ -5,7 +5,7 @@ ; happen to also serve as module flags. ; CHECK: !named = !{!0, !1, !2, !3, !4, !5} -; CHECK: !llvm.module.flags = !{!6, !7, !8} +; CHECK: !llvm.module.flags = !{!6, !7, !9} !named = !{!0, !1, !2, !3, !4, !5} !llvm.module.flags = !{!3, !4, !5} @@ -16,8 +16,10 @@ ; CHECK: !4 = !{i32 5, !"bar", !0} ; CHECK: !5 = !{i32 6, !"baz", !1} ; CHECK: !6 = !{i32 4, !"foo", i32 37} -; CHECK: !7 = !{i32 5, !"bar", !1} -; CHECK: !8 = !{i32 6, !"baz", !2} +; CHECK: !7 = !{i32 5, !"bar", !8} +; CHECK: !8 = distinct !{!0} +; CHECK: !9 = !{i32 6, !"baz", !10} +; CHECK: !10 = distinct !{!0, !1} !0 = !{} !1 = !{!0} !2 = !{!0, !1} Index: llvm/test/Transforms/FunctionImport/cg_profile.ll =================================================================== --- llvm/test/Transforms/FunctionImport/cg_profile.ll +++ llvm/test/Transforms/FunctionImport/cg_profile.ll @@ -9,7 +9,7 @@ ; CHECK: !0 = !{i32 1, !"EnableSplitLTOUnit", i32 0} ; CHECK-NEXT: !1 = !{i32 5, !"CG Profile", !2} -; CHECK-NEXT: !2 = !{!3} +; CHECK-NEXT: !2 = distinct !{!3} ; CHECK-NEXT: !3 = !{void ()* @foo, void (%class.A*)* bitcast (void (%class.A.0*)* @bar to void (%class.A*)*), i64 2753} target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" Index: llvm/unittests/IR/MetadataTest.cpp =================================================================== --- llvm/unittests/IR/MetadataTest.cpp +++ llvm/unittests/IR/MetadataTest.cpp @@ -79,7 +79,9 @@ return MDNode::get(Context, MDs); } - MDTuple *getTuple() { return MDTuple::getDistinct(Context, None); } + MDTuple *getTuple(unsigned Capacity = 0) { + return MDTuple::getDistinct(Context, None, Capacity); + } DISubroutineType *getSubroutineType() { return DISubroutineType::getDistinct(Context, DINode::FlagZero, 0, getNode(nullptr)); @@ -3602,4 +3604,103 @@ EXPECT_EQ(DebugVariableMap.find(DebugVariableFragB)->second, 12u); } +typedef MetadataTest MDTupleAllocationTest; +TEST_F(MDTupleAllocationTest, Resize) { + MDTuple *A = getTuple(); + Metadata *Value1 = getConstantAsMetadata(); + Metadata *Value2 = getConstantAsMetadata(); + + // We can allocate a zero-length tuple and add elements to it. + EXPECT_EQ(A->getNumOperands(), 0u); + EXPECT_EQ(A->getCapacity(), 0u); + + A->append(Value1); + EXPECT_EQ(A->getNumOperands(), 1u); + EXPECT_EQ(A->getCapacity(), 1u); + EXPECT_EQ(A->getOperand(0), Value1); + + A->append(Value2); + EXPECT_EQ(A->getNumOperands(), 2u); + EXPECT_EQ(A->getCapacity(), 2u); + EXPECT_EQ(A->getOperand(0), Value1); + EXPECT_EQ(A->getOperand(1), Value2); + + // Allocate a "small" MDnode, append a couple of values, and reserve more + // space for operands afterwards. + MDTuple *B = getTuple(/* Capacity = */ 4); + + EXPECT_EQ(B->getCapacity(), 4u); + EXPECT_EQ(B->getNumOperands(), 0u); + + Metadata *Value3 = getConstantAsMetadata(); + Metadata *Value4 = getConstantAsMetadata(); + Metadata *Value5 = getConstantAsMetadata(); + + Metadata *Ops[] = {Value3, Value4}; + B->append(makeArrayRef(Ops, 2)); + + EXPECT_EQ(B->getCapacity(), 4u); + EXPECT_EQ(B->getNumOperands(), 2u); + EXPECT_EQ(B->getOperand(0), Value3); + EXPECT_EQ(B->getOperand(1), Value4); + + B->reserve(12); + EXPECT_EQ(B->getCapacity(), 12u); + EXPECT_EQ(B->getOperand(0), Value3); + EXPECT_EQ(B->getOperand(1), Value4); + + B->append(Value5); + EXPECT_EQ(B->getNumOperands(), 3u); + EXPECT_EQ(B->getOperand(2), Value5); + + // Check that we can increase the capacity of temporary nodes as well. + auto Temp1 = MDTuple::getTemporary(Context, None); + EXPECT_EQ(Temp1->getNumOperands(), 0u); + EXPECT_EQ(Temp1->getCapacity(), 0u); + + Temp1->append(Value1); + EXPECT_EQ(Temp1->getNumOperands(), 1u); + EXPECT_EQ(Temp1->getCapacity(), 1u); + EXPECT_EQ(Temp1->getOperand(0), Value1); + + Temp1->reserve(12); + EXPECT_EQ(Temp1->getNumOperands(), 1u); + EXPECT_EQ(Temp1->getCapacity(), 12u); + EXPECT_EQ(Temp1->getOperand(0), Value1); + + Temp1->append(Value2); + EXPECT_EQ(Temp1->getNumOperands(), 2u); + EXPECT_EQ(Temp1->getCapacity(), 12u); + EXPECT_EQ(Temp1->getOperand(1), Value2); +} + +TEST_F(MDTupleAllocationTest, Tracking) { + // Resize a tuple and check that we can still RAUW one of its operands. + auto *Value1 = getConstantAsMetadata(); + MDTuple *A = getTuple(1); + A->append(Value1); + A->reserve(2); + EXPECT_EQ(A->getOperand(0), Value1); + + auto *Value2 = getConstantAsMetadata(); + Value *V1 = Value1->getValue(); + Value *V2 = Value2->getValue(); + ValueAsMetadata::handleRAUW(V1, V2); + EXPECT_EQ(A->getOperand(0), Value2); +} + +#ifdef GTEST_HAS_DEATH_TEST +typedef MetadataTest MDTupleAllocationDeathTest; +TEST_F(MDTupleAllocationDeathTest, ResizeRejected) { + MDNode *A = getNode(); + EXPECT_DEATH(A->reserve(1), + "Cannot increase the number of operands of a uniqued node"); + + auto *B = getFile(); + EXPECT_DEATH( + B->reserve(2), + "Increasing the number of operands is not supported for this node kind"); +} +#endif + } // end namespace Index: llvm/unittests/Linker/LinkModulesTest.cpp =================================================================== --- llvm/unittests/Linker/LinkModulesTest.cpp +++ llvm/unittests/Linker/LinkModulesTest.cpp @@ -15,6 +15,7 @@ #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" +#include "llvm/IR/Verifier.h" #include "llvm/Linker/Linker.h" #include "llvm/Support/SourceMgr.h" #include "gtest/gtest.h" @@ -359,4 +360,60 @@ ASSERT_EQ(F->getNumUses(), (unsigned)2); } +TEST_F(LinkModuleTest, AppendModuleFlags) { + LLVMContext C; + SMDiagnostic Err; + + // Link 2 modules with "append" metadata nodes. In the linked module, + // check that the 3rd operand has been resized and has hung off oeprands. + + const char *FooStr = R"IR( + declare void @foo(i32, i32) + declare i32 @bar(i32) + declare i32 @baz(i32, i32) + + !llvm.module.flags = !{!0} + !0 = !{i32 5, !"CG Profile", !1} + !1 = distinct !{!2, !3} + !2 = !{void (i32, i32)* @foo, i32 (i32)* @bar, i64 1} + !3 = !{void (i32, i32)* @foo, i32 (i32, i32)* @baz, i64 1} + )IR"; + + const char *BarStr = R"IR( + declare void @bar(i32, i32) + declare i32 @baz(i32) + declare i32 @fie(i32, i32) + + !llvm.module.flags = !{!0} + !0 = !{i32 5, !"CG Profile", !1} + !1 = !{!2, !3} + !2 = !{void (i32, i32)* @bar, i32 (i32)* @baz, i64 1} + !3 = !{void (i32, i32)* @bar, i32 (i32, i32)* @fie, i64 1} + )IR"; + + std::unique_ptr Foo = parseAssemblyString(FooStr, Err, C); + assert(Foo); + const NamedMDNode *ModFlags1 = Foo->getModuleFlagsMetadata(); + const MDNode *FirstFlag1 = ModFlags1->getOperand(0); + ASSERT_TRUE(FirstFlag1->getNumOperands() == 3); + const MDNode *MDListTuple1 = cast(FirstFlag1->getOperand(2).get()); + + std::unique_ptr Bar = parseAssemblyString(BarStr, Err, C); + assert(Bar); + const NamedMDNode *ModFlags2 = Bar->getModuleFlagsMetadata(); + const MDNode *FirstFlag2 = ModFlags2->getOperand(0); + ASSERT_TRUE(FirstFlag2->getNumOperands() == 3); + + bool Failed = Linker::linkModules(*Foo, std::move(Bar)); + ASSERT_FALSE(Failed); + + const NamedMDNode *ModFlags = Foo->getModuleFlagsMetadata(); + const MDNode *FirstFlag = ModFlags->getOperand(0); + ASSERT_TRUE(FirstFlag->getNumOperands() == 3); + const MDNode *MDListTuple2 = cast(FirstFlag->getOperand(2).get()); + ASSERT_TRUE(MDListTuple2->isDistinct()); + ASSERT_TRUE(MDListTuple2->getNumOperands() == 4); + ASSERT_EQ(MDListTuple1, MDListTuple2); +} + } // end anonymous namespace