Index: cfe/trunk/include/clang/AST/Decl.h =================================================================== --- cfe/trunk/include/clang/AST/Decl.h +++ cfe/trunk/include/clang/AST/Decl.h @@ -2729,6 +2729,11 @@ /// bit-fields. bool isZeroLengthBitField(const ASTContext &Ctx) const; + /// Determine if this field is a subobject of zero size, that is, either a + /// zero-length bit-field or a field of empty class type with the + /// [[no_unique_address]] attribute. + bool isZeroSize(const ASTContext &Ctx) const; + /// Get the kind of (C++11) default member initializer that this field has. InClassInitStyle getInClassInitStyle() const { InitStorageKind storageKind = InitStorage.getInt(); Index: cfe/trunk/include/clang/AST/DeclCXX.h =================================================================== --- cfe/trunk/include/clang/AST/DeclCXX.h +++ cfe/trunk/include/clang/AST/DeclCXX.h @@ -334,10 +334,12 @@ /// True when this class is a POD-type. unsigned PlainOldData : 1; - /// true when this class is empty for traits purposes, - /// i.e. has no data members other than 0-width bit-fields, has no - /// virtual function/base, and doesn't inherit from a non-empty - /// class. Doesn't take union-ness into account. + /// True when this class is empty for traits purposes, that is: + /// * has no data members other than 0-width bit-fields and empty fields + /// marked [[no_unique_address]] + /// * has no virtual function/base, and + /// * doesn't inherit from a non-empty class. + /// Doesn't take union-ness into account. unsigned Empty : 1; /// True when this class is polymorphic, i.e., has at Index: cfe/trunk/include/clang/Basic/Attr.td =================================================================== --- cfe/trunk/include/clang/Basic/Attr.td +++ cfe/trunk/include/clang/Basic/Attr.td @@ -14,6 +14,7 @@ } def DocCatFunction : DocumentationCategory<"Function Attributes">; def DocCatVariable : DocumentationCategory<"Variable Attributes">; +def DocCatField : DocumentationCategory<"Field Attributes">; def DocCatType : DocumentationCategory<"Type Attributes">; def DocCatStmt : DocumentationCategory<"Statement Attributes">; def DocCatDecl : DocumentationCategory<"Declaration Attributes">; @@ -315,12 +316,14 @@ // Specifies Operating Systems for which the target applies, based off the // OSType enumeration in Triple.h list OSes; - // Specifies the C++ ABIs for which the target applies, based off the - // TargetCXXABI::Kind in TargetCXXABI.h. - list CXXABIs; // Specifies Object Formats for which the target applies, based off the // ObjectFormatType enumeration in Triple.h list ObjectFormats; + // A custom predicate, written as an expression evaluated in a context + // with the following declarations in scope: + // const clang::TargetInfo &Target; + // const llvm::Triple &T = Target.getTriple(); + code CustomCode = [{}]; } class TargetArch arches> : TargetSpec { @@ -338,8 +341,11 @@ def TargetWindows : TargetArch<["x86", "x86_64", "arm", "thumb", "aarch64"]> { let OSes = ["Win32"]; } +def TargetItaniumCXXABI : TargetSpec { + let CustomCode = [{ Target.getCXXABI().isItaniumFamily() }]; +} def TargetMicrosoftCXXABI : TargetArch<["x86", "x86_64", "arm", "thumb", "aarch64"]> { - let CXXABIs = ["Microsoft"]; + let CustomCode = [{ Target.getCXXABI().isMicrosoft() }]; } def TargetELF : TargetSpec { let ObjectFormats = ["ELF"]; @@ -1408,6 +1414,12 @@ let ASTNode = 0; } +def NoUniqueAddress : InheritableAttr, TargetSpecificAttr { + let Spellings = [CXX11<"", "no_unique_address", 201803>]; + let Subjects = SubjectList<[NonBitField], ErrorDiag>; + let Documentation = [NoUniqueAddressDocs]; +} + def ReturnsTwice : InheritableAttr { let Spellings = [GCC<"returns_twice">]; let Subjects = SubjectList<[Function]>; Index: cfe/trunk/include/clang/Basic/AttrDocs.td =================================================================== --- cfe/trunk/include/clang/Basic/AttrDocs.td +++ cfe/trunk/include/clang/Basic/AttrDocs.td @@ -1009,6 +1009,32 @@ }]; } +def NoUniqueAddressDocs : Documentation { + let Category = DocCatField; + let Content = [{ +The ``no_unique_address`` attribute allows tail padding in a non-static data +member to overlap other members of the enclosing class (and in the special +case when the type is empty, permits it to fully overlap other members). +The field is laid out as if a base class were encountered at the corresponding +point within the class (except that it does not share a vptr with the enclosing +object). + +Example usage: + +.. code-block:: c++ + + template struct my_vector { + T *p; + [[no_unique_address]] Alloc alloc; + // ... + }; + static_assert(sizeof(my_vector>) == sizeof(int*)); + +``[[no_unique_address]]`` is a standard C++20 attribute. Clang supports its use +in C++11 onwards. + }]; +} + def ObjCRequiresSuperDocs : Documentation { let Category = DocCatFunction; let Content = [{ Index: cfe/trunk/lib/AST/Decl.cpp =================================================================== --- cfe/trunk/lib/AST/Decl.cpp +++ cfe/trunk/lib/AST/Decl.cpp @@ -3913,6 +3913,39 @@ getBitWidthValue(Ctx) == 0; } +bool FieldDecl::isZeroSize(const ASTContext &Ctx) const { + if (isZeroLengthBitField(Ctx)) + return true; + + // C++2a [intro.object]p7: + // An object has nonzero size if it + // -- is not a potentially-overlapping subobject, or + if (!hasAttr()) + return false; + + // -- is not of class type, or + const auto *RT = getType()->getAs(); + if (!RT) + return false; + const RecordDecl *RD = RT->getDecl()->getDefinition(); + if (!RD) { + assert(isInvalidDecl() && "valid field has incomplete type"); + return false; + } + + // -- [has] virtual member functions or virtual base classes, or + // -- has subobjects of nonzero size or bit-fields of nonzero length + const auto *CXXRD = cast(RD); + if (!CXXRD->isEmpty()) + return false; + + // Otherwise, [...] the circumstances under which the object has zero size + // are implementation-defined. + // FIXME: This might be Itanium ABI specific; we don't yet know what the MS + // ABI will do. + return true; +} + unsigned FieldDecl::getFieldIndex() const { const FieldDecl *Canonical = getCanonicalDecl(); if (Canonical != this) Index: cfe/trunk/lib/AST/DeclCXX.cpp =================================================================== --- cfe/trunk/lib/AST/DeclCXX.cpp +++ cfe/trunk/lib/AST/DeclCXX.cpp @@ -605,14 +605,19 @@ // that sure looks like a wording bug. // -- If X is a non-union class type with a non-static data member - // [recurse to] the first non-static data member of X + // [recurse to each field] that is either of zero size or is the + // first non-static data member of X // -- If X is a union type, [recurse to union members] + bool IsFirstField = true; for (auto *FD : X->fields()) { // FIXME: Should we really care about the type of the first non-static // data member of a non-union if there are preceding unnamed bit-fields? if (FD->isUnnamedBitfield()) continue; + if (!IsFirstField && !FD->isZeroSize(Ctx)) + continue; + // -- If X is n array type, [visit the element type] QualType T = Ctx.getBaseElementType(FD->getType()); if (auto *RD = T->getAsCXXRecordDecl()) @@ -620,7 +625,7 @@ return true; if (!X->isUnion()) - break; + IsFirstField = false; } } @@ -1068,6 +1073,10 @@ if (T->isReferenceType()) data().DefaultedMoveAssignmentIsDeleted = true; + // Bitfields of length 0 are also zero-sized, but we already bailed out for + // those because they are always unnamed. + bool IsZeroSize = Field->isZeroSize(Context); + if (const auto *RecordTy = T->getAs()) { auto *FieldRec = cast(RecordTy->getDecl()); if (FieldRec->getDefinition()) { @@ -1183,7 +1192,8 @@ // A standard-layout class is a class that: // [...] // -- has no element of the set M(S) of types as a base class. - if (data().IsStandardLayout && (isUnion() || IsFirstField) && + if (data().IsStandardLayout && + (isUnion() || IsFirstField || IsZeroSize) && hasSubobjectAtOffsetZeroOfEmptyBaseType(Context, FieldRec)) data().IsStandardLayout = false; @@ -1265,8 +1275,10 @@ } // C++14 [meta.unary.prop]p4: - // T is a class type [...] with [...] no non-static data members - data().Empty = false; + // T is a class type [...] with [...] no non-static data members other + // than subobjects of zero size + if (data().Empty && !IsZeroSize) + data().Empty = false; } // Handle using declarations of conversion functions. Index: cfe/trunk/lib/AST/RecordLayoutBuilder.cpp =================================================================== --- cfe/trunk/lib/AST/RecordLayoutBuilder.cpp +++ cfe/trunk/lib/AST/RecordLayoutBuilder.cpp @@ -127,9 +127,10 @@ CharUnits Offset, bool PlacingEmptyBase); void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD, - const CXXRecordDecl *Class, - CharUnits Offset); - void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset); + const CXXRecordDecl *Class, CharUnits Offset, + bool PlacingOverlappingField); + void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset, + bool PlacingOverlappingField); /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty /// subobjects beyond the given offset. @@ -351,7 +352,7 @@ continue; CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo); - UpdateEmptyFieldSubobjects(*I, FieldOffset); + UpdateEmptyFieldSubobjects(*I, FieldOffset, PlacingEmptyBase); } } @@ -471,20 +472,25 @@ return false; // We are able to place the member variable at this offset. - // Make sure to update the empty base subobject map. - UpdateEmptyFieldSubobjects(FD, Offset); + // Make sure to update the empty field subobject map. + UpdateEmptyFieldSubobjects(FD, Offset, FD->hasAttr()); return true; } -void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD, - const CXXRecordDecl *Class, - CharUnits Offset) { +void EmptySubobjectMap::UpdateEmptyFieldSubobjects( + const CXXRecordDecl *RD, const CXXRecordDecl *Class, CharUnits Offset, + bool PlacingOverlappingField) { // We know that the only empty subobjects that can conflict with empty - // field subobjects are subobjects of empty bases that can be placed at offset - // zero. Because of this, we only need to keep track of empty field - // subobjects with offsets less than the size of the largest empty - // subobject for our class. - if (Offset >= SizeOfLargestEmptySubobject) + // field subobjects are subobjects of empty bases and potentially-overlapping + // fields that can be placed at offset zero. Because of this, we only need to + // keep track of empty field subobjects with offsets less than the size of + // the largest empty subobject for our class. + // + // (Proof: we will only consider placing a subobject at offset zero or at + // >= the current dsize. The only cases where the earlier subobject can be + // placed beyond the end of dsize is if it's an empty base or a + // potentially-overlapping field.) + if (!PlacingOverlappingField && Offset >= SizeOfLargestEmptySubobject) return; AddSubobjectAtOffset(RD, Offset); @@ -499,7 +505,8 @@ const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl); - UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset); + UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset, + PlacingOverlappingField); } if (RD == Class) { @@ -508,7 +515,8 @@ const CXXRecordDecl *VBaseDecl = Base.getType()->getAsCXXRecordDecl(); CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl); - UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset); + UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset, + PlacingOverlappingField); } } @@ -521,15 +529,15 @@ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo); - UpdateEmptyFieldSubobjects(*I, FieldOffset); + UpdateEmptyFieldSubobjects(*I, FieldOffset, PlacingOverlappingField); } } -void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD, - CharUnits Offset) { +void EmptySubobjectMap::UpdateEmptyFieldSubobjects( + const FieldDecl *FD, CharUnits Offset, bool PlacingOverlappingField) { QualType T = FD->getType(); if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) { - UpdateEmptyFieldSubobjects(RD, RD, Offset); + UpdateEmptyFieldSubobjects(RD, RD, Offset, PlacingOverlappingField); return; } @@ -552,10 +560,12 @@ // offset zero. Because of this, we only need to keep track of empty field // subobjects with offsets less than the size of the largest empty // subobject for our class. - if (ElementOffset >= SizeOfLargestEmptySubobject) + if (!PlacingOverlappingField && + ElementOffset >= SizeOfLargestEmptySubobject) return; - UpdateEmptyFieldSubobjects(RD, RD, ElementOffset); + UpdateEmptyFieldSubobjects(RD, RD, ElementOffset, + PlacingOverlappingField); ElementOffset += Layout.getSize(); } } @@ -622,6 +632,10 @@ CharUnits NonVirtualSize; CharUnits NonVirtualAlignment; + /// If we've laid out a field but not included its tail padding in Size yet, + /// this is the size up to the end of that field. + CharUnits PaddedFieldSize; + /// PrimaryBase - the primary base class (if one exists) of the class /// we're laying out. const CXXRecordDecl *PrimaryBase; @@ -670,7 +684,8 @@ UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0), MaxFieldAlignment(CharUnits::Zero()), DataSize(0), NonVirtualSize(CharUnits::Zero()), - NonVirtualAlignment(CharUnits::One()), PrimaryBase(nullptr), + NonVirtualAlignment(CharUnits::One()), + PaddedFieldSize(CharUnits::Zero()), PrimaryBase(nullptr), PrimaryBaseIsVirtual(false), HasOwnVFPtr(false), HasPackedField(false), FirstNearlyEmptyVBase(nullptr) {} @@ -980,7 +995,6 @@ // Round up the current record size to pointer alignment. setSize(getSize().alignTo(BaseAlign)); - setDataSize(getSize()); // Update the alignment. UpdateAlignment(BaseAlign, UnpackedBaseAlign); @@ -1172,6 +1186,7 @@ // Query the external layout to see if it provides an offset. bool HasExternalLayout = false; if (UseExternalLayout) { + // FIXME: This appears to be reversed. if (Base->IsVirtual) HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset); else @@ -1342,8 +1357,8 @@ // We start laying out ivars not at the end of the superclass // structure, but at the next byte following the last field. - setSize(SL.getDataSize()); - setDataSize(getSize()); + setDataSize(SL.getDataSize()); + setSize(getDataSize()); } InitializeLayout(D); @@ -1729,32 +1744,49 @@ UnfilledBitsInLastUnit = 0; LastBitfieldTypeSize = 0; + auto *FieldClass = D->getType()->getAsCXXRecordDecl(); + bool PotentiallyOverlapping = D->hasAttr() && FieldClass; + bool IsOverlappingEmptyField = PotentiallyOverlapping && FieldClass->isEmpty(); bool FieldPacked = Packed || D->hasAttr(); - CharUnits FieldOffset = - IsUnion ? CharUnits::Zero() : getDataSize(); + + CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField) + ? CharUnits::Zero() + : getDataSize(); CharUnits FieldSize; CharUnits FieldAlign; + // The amount of this class's dsize occupied by the field. + // This is equal to FieldSize unless we're permitted to pack + // into the field's tail padding. + CharUnits EffectiveFieldSize; if (D->getType()->isIncompleteArrayType()) { // This is a flexible array member; we can't directly // query getTypeInfo about these, so we figure it out here. // Flexible array members don't have any size, but they // have to be aligned appropriately for their element type. - FieldSize = CharUnits::Zero(); + EffectiveFieldSize = FieldSize = CharUnits::Zero(); const ArrayType* ATy = Context.getAsArrayType(D->getType()); FieldAlign = Context.getTypeAlignInChars(ATy->getElementType()); } else if (const ReferenceType *RT = D->getType()->getAs()) { unsigned AS = Context.getTargetAddressSpace(RT->getPointeeType()); - FieldSize = + EffectiveFieldSize = FieldSize = Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS)); FieldAlign = Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS)); } else { std::pair FieldInfo = Context.getTypeInfoInChars(D->getType()); - FieldSize = FieldInfo.first; + EffectiveFieldSize = FieldSize = FieldInfo.first; FieldAlign = FieldInfo.second; + // A potentially-overlapping field occupies its dsize or nvsize, whichever + // is larger. + if (PotentiallyOverlapping) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(FieldClass); + EffectiveFieldSize = + std::max(Layout.getNonVirtualSize(), Layout.getDataSize()); + } + if (IsMsStruct) { // If MS bitfield layout is required, figure out what type is being // laid out and align the field to the width of that type. @@ -1834,7 +1866,12 @@ // Check if we can place the field at this offset. while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) { // We couldn't place the field at the offset. Try again at a new offset. - FieldOffset += FieldAlign; + // We try offset 0 (for an empty field) and then dsize(C) onwards. + if (FieldOffset == CharUnits::Zero() && + getDataSize() != CharUnits::Zero()) + FieldOffset = getDataSize().alignTo(FieldAlign); + else + FieldOffset += FieldAlign; } } } @@ -1853,18 +1890,23 @@ if (FieldSize % ASanAlignment) ExtraSizeForAsan += ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment); - FieldSize += ExtraSizeForAsan; + EffectiveFieldSize = FieldSize = FieldSize + ExtraSizeForAsan; } // Reserve space for this field. - uint64_t FieldSizeInBits = Context.toBits(FieldSize); - if (IsUnion) - setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits)); - else - setDataSize(FieldOffset + FieldSize); + if (!IsOverlappingEmptyField) { + uint64_t EffectiveFieldSizeInBits = Context.toBits(EffectiveFieldSize); + if (IsUnion) + setDataSize(std::max(getDataSizeInBits(), EffectiveFieldSizeInBits)); + else + setDataSize(FieldOffset + EffectiveFieldSize); - // Update the size. - setSize(std::max(getSizeInBits(), getDataSizeInBits())); + PaddedFieldSize = std::max(PaddedFieldSize, FieldOffset + FieldSize); + setSize(std::max(getSizeInBits(), getDataSizeInBits())); + } else { + setSize(std::max(getSizeInBits(), + (uint64_t)Context.toBits(FieldOffset + FieldSize))); + } // Remember max struct/class alignment. UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign); @@ -1885,6 +1927,10 @@ setSize(CharUnits::One()); } + // If we have any remaining field tail padding, include that in the overall + // size. + setSize(std::max(getSizeInBits(), (uint64_t)Context.toBits(PaddedFieldSize))); + // Finally, round the size of the record up to the alignment of the // record itself. uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit; Index: cfe/trunk/lib/CodeGen/CGExpr.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGExpr.cpp +++ cfe/trunk/lib/CodeGen/CGExpr.cpp @@ -3879,12 +3879,27 @@ return EmitLValueForField(LambdaLV, Field); } +/// Get the address of a zero-sized field within a record. The resulting +/// address doesn't necessarily have the right type. +static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, + const FieldDecl *Field) { + CharUnits Offset = CGF.getContext().toCharUnitsFromBits( + CGF.getContext().getFieldOffset(Field)); + if (Offset.isZero()) + return Base; + Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty); + return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset); +} + /// Drill down to the storage of a field without walking into /// reference types. /// /// The resulting address doesn't necessarily have the right type. static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field) { + if (field->isZeroSize(CGF.getContext())) + return emitAddrOfZeroSizeField(CGF, base, field); + const RecordDecl *rec = field->getParent(); unsigned idx = Index: cfe/trunk/lib/CodeGen/CGExprAgg.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGExprAgg.cpp +++ cfe/trunk/lib/CodeGen/CGExprAgg.cpp @@ -1846,15 +1846,32 @@ return LV; } +AggValueSlot::Overlap_t +CodeGenFunction::overlapForFieldInit(const FieldDecl *FD) { + if (!FD->hasAttr() || !FD->getType()->isRecordType()) + return AggValueSlot::DoesNotOverlap; + + // If the field lies entirely within the enclosing class's nvsize, its tail + // padding cannot overlap any already-initialized object. (The only subobjects + // with greater addresses that might already be initialized are vbases.) + const RecordDecl *ClassRD = FD->getParent(); + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD); + if (Layout.getFieldOffset(FD->getFieldIndex()) + + getContext().getTypeSize(FD->getType()) <= + (uint64_t)getContext().toBits(Layout.getNonVirtualSize())) + return AggValueSlot::DoesNotOverlap; + + // The tail padding may contain values we need to preserve. + return AggValueSlot::MayOverlap; +} + AggValueSlot::Overlap_t CodeGenFunction::overlapForBaseInit( const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) { - // Virtual bases are initialized first, in address order, so there's never - // any overlap during their initialization. - // - // FIXME: Under P0840, this is no longer true: the tail padding of a vbase - // of a field could be reused by a vbase of a containing class. + // If the most-derived object is a field declared with [[no_unique_address]], + // the tail padding of any virtual base could be reused for other subobjects + // of that field's class. if (IsVirtual) - return AggValueSlot::DoesNotOverlap; + return AggValueSlot::MayOverlap; // If the base class is laid out entirely within the nvsize of the derived // class, its tail padding cannot yet be initialized, so we can issue Index: cfe/trunk/lib/CodeGen/CGExprConstant.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGExprConstant.cpp +++ cfe/trunk/lib/CodeGen/CGExprConstant.cpp @@ -675,11 +675,12 @@ ++FieldNo; // If this is a union, skip all the fields that aren't being initialized. - if (RD->isUnion() && ILE->getInitializedFieldInUnion() != Field) + if (RD->isUnion() && + !declaresSameEntity(ILE->getInitializedFieldInUnion(), Field)) continue; - // Don't emit anonymous bitfields, they just affect layout. - if (Field->isUnnamedBitfield()) + // Don't emit anonymous bitfields or zero-sized fields. + if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext())) continue; // Get the initializer. A struct can include fields without initializers, @@ -720,6 +721,10 @@ if (!AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit, AllowOverwrite)) return false; + // After emitting a non-empty field with [[no_unique_address]], we may + // need to overwrite its tail padding. + if (Field->hasAttr()) + AllowOverwrite = true; } else { // Otherwise we have a bitfield. if (auto *CI = dyn_cast(EltInit)) { @@ -793,14 +798,15 @@ unsigned FieldNo = 0; uint64_t OffsetBits = CGM.getContext().toBits(Offset); + bool AllowOverwrite = false; for (RecordDecl::field_iterator Field = RD->field_begin(), FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { // If this is a union, skip all the fields that aren't being initialized. if (RD->isUnion() && !declaresSameEntity(Val.getUnionField(), *Field)) continue; - // Don't emit anonymous bitfields, they just affect layout. - if (Field->isUnnamedBitfield()) + // Don't emit anonymous bitfields or zero-sized fields. + if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext())) continue; // Emit the value of the initializer. @@ -814,12 +820,16 @@ if (!Field->isBitField()) { // Handle non-bitfield members. if (!AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, - EltInit)) + EltInit, AllowOverwrite)) return false; + // After emitting a non-empty field with [[no_unique_address]], we may + // need to overwrite its tail padding. + if (Field->hasAttr()) + AllowOverwrite = true; } else { // Otherwise we have a bitfield. if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, - cast(EltInit))) + cast(EltInit), AllowOverwrite)) return false; } } @@ -2216,7 +2226,7 @@ for (const auto *Field : record->fields()) { // Fill in non-bitfields. (Bitfields always use a zero pattern, which we // will fill in later.) - if (!Field->isBitField()) { + if (!Field->isBitField() && !Field->isZeroSize(CGM.getContext())) { unsigned fieldIndex = layout.getLLVMFieldNo(Field); elements[fieldIndex] = CGM.EmitNullConstant(Field->getType()); } Index: cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp +++ cfe/trunk/lib/CodeGen/CGOpenMPRuntime.cpp @@ -7811,7 +7811,7 @@ for (const auto *Field : RD->fields()) { // Fill in non-bitfields. (Bitfields always use a zero pattern, which we // will fill in later.) - if (!Field->isBitField()) { + if (!Field->isBitField() && !Field->isZeroSize(CGF.getContext())) { unsigned FieldIndex = RL.getLLVMFieldNo(Field); RecordLayout[FieldIndex] = Field; } Index: cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ cfe/trunk/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -347,18 +347,21 @@ void CGRecordLowering::accumulateFields() { for (RecordDecl::field_iterator Field = D->field_begin(), FieldEnd = D->field_end(); - Field != FieldEnd;) + Field != FieldEnd;) { if (Field->isBitField()) { RecordDecl::field_iterator Start = Field; // Iterate to gather the list of bitfields. for (++Field; Field != FieldEnd && Field->isBitField(); ++Field); accumulateBitFields(Start, Field); - } else { + } else if (!Field->isZeroSize(Context)) { Members.push_back(MemberInfo( bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field, getStorageType(*Field), *Field)); ++Field; + } else { + ++Field; } + } } void @@ -590,10 +593,17 @@ if (!Member->Data && Member->Kind != MemberInfo::Scissor) continue; if (Member->Offset < Tail) { - assert(Prior->Kind == MemberInfo::Field && !Prior->FD && + assert(Prior->Kind == MemberInfo::Field && "Only storage fields have tail padding!"); - Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo( - cast(Prior->Data)->getIntegerBitWidth(), 8))); + if (!Prior->FD || Prior->FD->isBitField()) + Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo( + cast(Prior->Data)->getIntegerBitWidth(), 8))); + else { + assert(Prior->FD->hasAttr() && + "should not have reused this field's tail padding"); + Prior->Data = getByteArrayType( + Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).first); + } } if (Member->Data) Prior = Member; @@ -797,6 +807,10 @@ for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { const FieldDecl *FD = *it; + // Ignore zero-sized fields. + if (FD->isZeroSize(getContext())) + continue; + // For non-bit-fields, just check that the LLVM struct offset matches the // AST offset. if (!FD->isBitField()) { @@ -810,10 +824,6 @@ if (!FD->getDeclName()) continue; - // Don't inspect zero-length bitfields. - if (FD->isZeroLengthBitField(getContext())) - continue; - const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD)); Index: cfe/trunk/lib/CodeGen/CodeGenFunction.h =================================================================== --- cfe/trunk/lib/CodeGen/CodeGenFunction.h +++ cfe/trunk/lib/CodeGen/CodeGenFunction.h @@ -2322,14 +2322,7 @@ } /// Determine whether a field initialization may overlap some other object. - AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD) { - // FIXME: These cases can result in overlap as a result of P0840R0's - // [[no_unique_address]] attribute. We can still infer NoOverlap in the - // presence of that attribute if the field is within the nvsize of its - // containing class, because non-virtual subobjects are initialized in - // address order. - return AggValueSlot::DoesNotOverlap; - } + AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD); /// Determine whether a base class initialization may overlap some other /// object. Index: cfe/trunk/lib/Parse/ParseDeclCXX.cpp =================================================================== --- cfe/trunk/lib/Parse/ParseDeclCXX.cpp +++ cfe/trunk/lib/Parse/ParseDeclCXX.cpp @@ -3912,6 +3912,7 @@ case ParsedAttr::AT_Deprecated: case ParsedAttr::AT_FallThrough: case ParsedAttr::AT_CXX11NoReturn: + case ParsedAttr::AT_NoUniqueAddress: return true; case ParsedAttr::AT_WarnUnusedResult: return !ScopeName && AttrName->getName().equals("nodiscard"); Index: cfe/trunk/lib/Sema/SemaDeclAttr.cpp =================================================================== --- cfe/trunk/lib/Sema/SemaDeclAttr.cpp +++ cfe/trunk/lib/Sema/SemaDeclAttr.cpp @@ -6813,6 +6813,9 @@ case ParsedAttr::AT_NoSplitStack: handleSimpleAttribute(S, D, AL); break; + case ParsedAttr::AT_NoUniqueAddress: + handleSimpleAttribute(S, D, AL); + break; case ParsedAttr::AT_NonNull: if (auto *PVD = dyn_cast(D)) handleNonNullAttrParameter(S, PVD, AL); Index: cfe/trunk/test/CodeGenCXX/no-unique-address.cpp =================================================================== --- cfe/trunk/test/CodeGenCXX/no-unique-address.cpp +++ cfe/trunk/test/CodeGenCXX/no-unique-address.cpp @@ -0,0 +1,79 @@ +// RUN: %clang_cc1 -std=c++2a %s -emit-llvm -o - -triple x86_64-linux-gnu | FileCheck %s + +struct A { ~A(); int n; char c[3]; }; +struct B { [[no_unique_address]] A a; char k; }; +// CHECK-DAG: @b = global { i32, [3 x i8], i8 } { i32 1, [3 x i8] c"\02\03\04", i8 5 } +B b = {1, 2, 3, 4, 5}; + +struct C : A {}; +struct D : C {}; +struct E { int e; [[no_unique_address]] D d; char k; }; +// CHECK-DAG: @e = global { i32, i32, [3 x i8], i8 } { i32 1, i32 2, [3 x i8] c"\03\04\05", i8 6 } +E e = {1, 2, 3, 4, 5, 6}; + +struct Empty1 {}; +struct Empty2 {}; +struct Empty3 {}; +struct HasEmpty { + [[no_unique_address]] Empty1 e1; + int a; + [[no_unique_address]] Empty2 e2; + int b; + [[no_unique_address]] Empty3 e3; +}; +// CHECK-DAG: @he = global %{{[^ ]*}} { i32 1, i32 2 } +HasEmpty he = {{}, 1, {}, 2, {}}; + +struct HasEmptyDuplicates { + [[no_unique_address]] Empty1 e1; // +0 + int a; + [[no_unique_address]] Empty1 e2; // +4 + int b; + [[no_unique_address]] Empty1 e3; // +8 +}; +// CHECK-DAG: @off1 = global i64 0 +Empty1 HasEmptyDuplicates::*off1 = &HasEmptyDuplicates::e1; +// CHECK-DAG: @off2 = global i64 4 +Empty1 HasEmptyDuplicates::*off2 = &HasEmptyDuplicates::e2; +// CHECK-DAG: @off3 = global i64 8 +Empty1 HasEmptyDuplicates::*off3 = &HasEmptyDuplicates::e3; + +// CHECK-DAG: @hed = global %{{[^ ]*}} { i32 1, i32 2, [4 x i8] undef } +HasEmptyDuplicates hed = {{}, 1, {}, 2, {}}; + +struct __attribute__((packed, aligned(2))) PackedAndPadded { + ~PackedAndPadded(); + char c; + int n; +}; +struct WithPackedAndPadded { + [[no_unique_address]] PackedAndPadded pap; + char d; +}; +// CHECK-DAG: @wpap = global <{ i8, i32, i8 }> <{ i8 1, i32 2, i8 3 }> +WithPackedAndPadded wpap = {1, 2, 3}; + +struct FieldOverlap { + [[no_unique_address]] Empty1 e1, e2, e3, e4; + int n; +}; +static_assert(sizeof(FieldOverlap) == 4); +// CHECK-DAG: @fo = global %{{[^ ]*}} { i32 1234 } +FieldOverlap fo = {{}, {}, {}, {}, 1234}; + +// CHECK-DAG: @e1 = constant %[[E1:[^ ]*]]* bitcast (%[[FO:[^ ]*]]* @fo to %[[E1]]*) +Empty1 &e1 = fo.e1; +// CHECK-DAG: @e2 = constant %[[E1]]* bitcast (i8* getelementptr (i8, i8* bitcast (%[[FO]]* @fo to i8*), i64 1) to %[[E1]]*) +Empty1 &e2 = fo.e2; + +// CHECK-LABEL: accessE1 +// CHECK: %[[RET:.*]] = bitcast %[[FO]]* %{{.*}} to %[[E1]]* +// CHECK: ret %[[E1]]* %[[RET]] +Empty1 &accessE1(FieldOverlap &fo) { return fo.e1; } + +// CHECK-LABEL: accessE2 +// CHECK: %[[AS_I8:.*]] = bitcast %[[FO]]* %{{.*}} to i8* +// CHECK: %[[ADJUSTED:.*]] = getelementptr inbounds i8, i8* %[[AS_I8]], i64 1 +// CHECK: %[[RET:.*]] = bitcast i8* %[[ADJUSTED]] to %[[E1]]* +// CHECK: ret %[[E1]]* %[[RET]] +Empty1 &accessE2(FieldOverlap &fo) { return fo.e2; } Index: cfe/trunk/test/CodeGenCXX/tail-padding.cpp =================================================================== --- cfe/trunk/test/CodeGenCXX/tail-padding.cpp +++ cfe/trunk/test/CodeGenCXX/tail-padding.cpp @@ -32,3 +32,47 @@ // CHECK: store i32 {{.*}} @_ZTVN16InitWithinNVSize1CE // CHECK: store i8 } + +namespace NoUniqueAddr { + struct A { char c; A(const A&); }; + struct B { int n; char c[3]; ~B(); }; + struct C : virtual A { B b; }; + struct D : virtual A { [[no_unique_address]] B b; }; + struct E : virtual A { [[no_unique_address]] B b; char x; }; + static_assert(sizeof(C) == sizeof(void*) + 8 + alignof(void*)); + static_assert(sizeof(D) == sizeof(void*) + 8); + static_assert(sizeof(E) == sizeof(void*) + 8 + alignof(void*)); + + // CHECK: define {{.*}} @_ZN12NoUniqueAddr1CC1EOS0_ + // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_( + // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1CE + // Copy the full size of B. + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false) + C f(C c) { return c; } + + // CHECK: define {{.*}} @_ZN12NoUniqueAddr1DC1EOS0_ + // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_( + // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1DE + // Copy just the data size of B, to avoid overwriting the A base class. + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 7, i1 false) + D f(D d) { return d; } + + // CHECK: define {{.*}} @_ZN12NoUniqueAddr1EC1EOS0_ + // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_( + // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1EE + // We can copy the full size of B here. (As it happens, we fold the copy of 'x' into + // this memcpy, so we're copying 8 bytes either way.) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false) + E f(E e) { return e; } + + struct F : virtual A { + F(const F &o) : A(o), b(o.b) {} + [[no_unique_address]] B b; + }; + + // CHECK: define {{.*}} @_ZN12NoUniqueAddr1FC1ERKS0_ + // CHECK: call void @_ZN12NoUniqueAddr1AC2ERKS0_( + // CHECK: store i32 {{.*}} @_ZTVN12NoUniqueAddr1FE + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 7, i1 false) + F f(F x) { return x; } +} Index: cfe/trunk/test/Layout/no-unique-address.cpp =================================================================== --- cfe/trunk/test/Layout/no-unique-address.cpp +++ cfe/trunk/test/Layout/no-unique-address.cpp @@ -0,0 +1,265 @@ +// RUN: %clang_cc1 -std=c++2a -fsyntax-only -triple x86_64-linux-gnu -fdump-record-layouts %s | FileCheck %s + +namespace Empty { + struct A {}; + struct B { [[no_unique_address]] A a; char b; }; + static_assert(sizeof(B) == 1); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::B + // CHECK-NEXT: 0 | struct Empty::A a (empty) + // CHECK-NEXT: 0 | char b + // CHECK-NEXT: | [sizeof=1, dsize=1, align=1, + // CHECK-NEXT: | nvsize=1, nvalign=1] + + struct C {}; + struct D { + [[no_unique_address]] A a; + [[no_unique_address]] C c; + char d; + }; + static_assert(sizeof(D) == 1); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::D + // CHECK-NEXT: 0 | struct Empty::A a (empty) + // CHECK-NEXT: 0 | struct Empty::C c (empty) + // CHECK-NEXT: 0 | char d + // CHECK-NEXT: | [sizeof=1, dsize=1, align=1, + // CHECK-NEXT: | nvsize=1, nvalign=1] + + struct E { + [[no_unique_address]] A a1; + [[no_unique_address]] A a2; + char e; + }; + static_assert(sizeof(E) == 2); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::E + // CHECK-NEXT: 0 | struct Empty::A a1 (empty) + // CHECK-NEXT: 1 | struct Empty::A a2 (empty) + // CHECK-NEXT: 0 | char e + // CHECK-NEXT: | [sizeof=2, dsize=2, align=1, + // CHECK-NEXT: | nvsize=2, nvalign=1] + + struct F { + ~F(); + [[no_unique_address]] A a1; + [[no_unique_address]] A a2; + char f; + }; + static_assert(sizeof(F) == 2); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::F + // CHECK-NEXT: 0 | struct Empty::A a1 (empty) + // CHECK-NEXT: 1 | struct Empty::A a2 (empty) + // CHECK-NEXT: 0 | char f + // CHECK-NEXT: | [sizeof=2, dsize=1, align=1, + // CHECK-NEXT: | nvsize=2, nvalign=1] + + struct G { [[no_unique_address]] A a; ~G(); }; + static_assert(sizeof(G) == 1); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::G + // CHECK-NEXT: 0 | struct Empty::A a (empty) + // CHECK-NEXT: | [sizeof=1, dsize=0, align=1, + // CHECK-NEXT: | nvsize=1, nvalign=1] + + struct H { [[no_unique_address]] A a, b; ~H(); }; + static_assert(sizeof(H) == 2); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::H + // CHECK-NEXT: 0 | struct Empty::A a (empty) + // CHECK-NEXT: 1 | struct Empty::A b (empty) + // CHECK-NEXT: | [sizeof=2, dsize=0, align=1, + // CHECK-NEXT: | nvsize=2, nvalign=1] + + struct OversizedEmpty : A { + ~OversizedEmpty(); + [[no_unique_address]] A a; + }; + static_assert(sizeof(OversizedEmpty) == 2); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::OversizedEmpty + // CHECK-NEXT: 0 | struct Empty::A (base) (empty) + // CHECK-NEXT: 1 | struct Empty::A a (empty) + // CHECK-NEXT: | [sizeof=2, dsize=0, align=1, + // CHECK-NEXT: | nvsize=2, nvalign=1] + + struct HasOversizedEmpty { + [[no_unique_address]] OversizedEmpty m; + }; + static_assert(sizeof(HasOversizedEmpty) == 2); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::HasOversizedEmpty + // CHECK-NEXT: 0 | struct Empty::OversizedEmpty m (empty) + // CHECK-NEXT: 0 | struct Empty::A (base) (empty) + // CHECK-NEXT: 1 | struct Empty::A a (empty) + // CHECK-NEXT: | [sizeof=2, dsize=0, align=1, + // CHECK-NEXT: | nvsize=2, nvalign=1] + + struct EmptyWithNonzeroDSize { + [[no_unique_address]] A a; + int x; + [[no_unique_address]] A b; + int y; + [[no_unique_address]] A c; + }; + static_assert(sizeof(EmptyWithNonzeroDSize) == 12); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::EmptyWithNonzeroDSize + // CHECK-NEXT: 0 | struct Empty::A a (empty) + // CHECK-NEXT: 0 | int x + // CHECK-NEXT: 4 | struct Empty::A b (empty) + // CHECK-NEXT: 4 | int y + // CHECK-NEXT: 8 | struct Empty::A c (empty) + // CHECK-NEXT: | [sizeof=12, dsize=12, align=4, + // CHECK-NEXT: | nvsize=12, nvalign=4] + + struct EmptyWithNonzeroDSizeNonPOD { + ~EmptyWithNonzeroDSizeNonPOD(); + [[no_unique_address]] A a; + int x; + [[no_unique_address]] A b; + int y; + [[no_unique_address]] A c; + }; + static_assert(sizeof(EmptyWithNonzeroDSizeNonPOD) == 12); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct Empty::EmptyWithNonzeroDSizeNonPOD + // CHECK-NEXT: 0 | struct Empty::A a (empty) + // CHECK-NEXT: 0 | int x + // CHECK-NEXT: 4 | struct Empty::A b (empty) + // CHECK-NEXT: 4 | int y + // CHECK-NEXT: 8 | struct Empty::A c (empty) + // CHECK-NEXT: | [sizeof=12, dsize=8, align=4, + // CHECK-NEXT: | nvsize=9, nvalign=4] +} + +namespace POD { + // Cannot reuse tail padding of a PDO type. + struct A { int n; char c[3]; }; + struct B { [[no_unique_address]] A a; char d; }; + static_assert(sizeof(B) == 12); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct POD::B + // CHECK-NEXT: 0 | struct POD::A a + // CHECK-NEXT: 0 | int n + // CHECK-NEXT: 4 | char [3] c + // CHECK-NEXT: 8 | char d + // CHECK-NEXT: | [sizeof=12, dsize=12, align=4, + // CHECK-NEXT: | nvsize=12, nvalign=4] +} + +namespace NonPOD { + struct A { int n; char c[3]; ~A(); }; + struct B { [[no_unique_address]] A a; char d; }; + static_assert(sizeof(B) == 8); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct NonPOD::B + // CHECK-NEXT: 0 | struct NonPOD::A a + // CHECK-NEXT: 0 | int n + // CHECK-NEXT: 4 | char [3] c + // CHECK-NEXT: 7 | char d + // CHECK-NEXT: | [sizeof=8, dsize=8, align=4, + // CHECK-NEXT: | nvsize=8, nvalign=4] +} + +namespace NVSizeGreaterThanDSize { + // The nvsize of an object includes the complete size of its empty subobjects + // (although it's unclear why). Ensure this corner case is handled properly. + struct alignas(8) A { ~A(); }; // dsize 0, nvsize 0, size 8 + struct B : A { char c; }; // dsize 1, nvsize 8, size 8 + static_assert(sizeof(B) == 8); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct NVSizeGreaterThanDSize::B + // CHECK-NEXT: 0 | struct NVSizeGreaterThanDSize::A (base) (empty) + // CHECK-NEXT: 0 | char c + // CHECK-NEXT: | [sizeof=8, dsize=1, align=8, + // CHECK-NEXT: | nvsize=8, nvalign=8] + + struct V { int n; }; + + // V is at offset 16, not offset 12, because B's tail padding is strangely not + // usable for virtual bases. + struct C : B, virtual V {}; + static_assert(sizeof(C) == 24); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct NVSizeGreaterThanDSize::C + // CHECK-NEXT: 0 | (C vtable pointer) + // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::B (base) + // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::A (base) (empty) + // CHECK-NEXT: 8 | char c + // CHECK-NEXT: 16 | struct NVSizeGreaterThanDSize::V (virtual base) + // CHECK-NEXT: 16 | int n + // CHECK-NEXT: | [sizeof=24, dsize=20, align=8, + // CHECK-NEXT: | nvsize=16, nvalign=8] + + struct D : virtual V { + [[no_unique_address]] B b; + }; + static_assert(sizeof(D) == 24); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct NVSizeGreaterThanDSize::D + // CHECK-NEXT: 0 | (D vtable pointer) + // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::B b + // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::A (base) (empty) + // CHECK-NEXT: 8 | char c + // CHECK-NEXT: 16 | struct NVSizeGreaterThanDSize::V (virtual base) + // CHECK-NEXT: 16 | int n + // CHECK-NEXT: | [sizeof=24, dsize=20, align=8, + // CHECK-NEXT: | nvsize=16, nvalign=8] + + struct X : virtual A { [[no_unique_address]] A a; }; + struct E : virtual A { + [[no_unique_address]] A a; + // Here, we arrange for X to hang over the end of the nvsize of E. This + // should force the A vbase to be laid out at offset 24, not 16. + [[no_unique_address]] X x; + }; + static_assert(sizeof(E) == 32); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct NVSizeGreaterThanDSize::E + // CHECK-NEXT: 0 | (E vtable pointer) + // CHECK-NEXT: 0 | struct NVSizeGreaterThanDSize::A a (empty) + // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::X x + // CHECK-NEXT: 8 | (X vtable pointer) + // CHECK-NEXT: 8 | struct NVSizeGreaterThanDSize::A a (empty) + // CHECK-NEXT: 16 | struct NVSizeGreaterThanDSize::A (virtual base) (empty) + // CHECK-NEXT: 24 | struct NVSizeGreaterThanDSize::A (virtual base) (empty) + // CHECK-NEXT: | [sizeof=32, dsize=16, align=8, + // CHECK-NEXT: | nvsize=16, nvalign=8] +} + +namespace RepeatedVBase { + struct alignas(16) A { ~A(); }; + struct B : A {}; + struct X : virtual A, virtual B {}; + struct Y { [[no_unique_address]] X x; char c; }; + static_assert(sizeof(Y) == 32); + + // CHECK:*** Dumping AST Record Layout + // CHECK: 0 | struct RepeatedVBase::Y + // CHECK-NEXT: 0 | struct RepeatedVBase::X x + // CHECK-NEXT: 0 | (X vtable pointer) + // CHECK-NEXT: 0 | struct RepeatedVBase::A (virtual base) (empty) + // CHECK-NEXT: 16 | struct RepeatedVBase::B (virtual base) (empty) + // CHECK-NEXT: 16 | struct RepeatedVBase::A (base) (empty) + // CHECK-NEXT: 8 | char c + // CHECK-NEXT: | [sizeof=32, dsize=9, align=16, + // CHECK-NEXT: | nvsize=9, nvalign=16] +} Index: cfe/trunk/test/SemaCXX/cxx2a-no-unique-address.cpp =================================================================== --- cfe/trunk/test/SemaCXX/cxx2a-no-unique-address.cpp +++ cfe/trunk/test/SemaCXX/cxx2a-no-unique-address.cpp @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -std=c++2a %s -verify -triple x86_64-linux-gnu +// RUN: %clang_cc1 -std=c++2a %s -verify=unsupported -triple x86_64-windows + +[[no_unique_address]] int a; // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}} +[[no_unique_address]] void f(); // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}} +struct [[no_unique_address]] S { // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}} + [[no_unique_address]] int a; // unsupported-warning {{unknown}} + [[no_unique_address]] void f(); // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}} + [[no_unique_address]] static int sa;// expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}} + [[no_unique_address]] static void sf(); // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}} + [[no_unique_address]] int b : 3; // expected-error {{only applies to non-bit-field non-static data members}} unsupported-warning {{unknown}} + + [[no_unique_address, no_unique_address]] int duplicated; // expected-error {{cannot appear multiple times}} + // unsupported-error@-1 {{cannot appear multiple times}} unsupported-warning@-1 2{{unknown}} + [[no_unique_address]] [[no_unique_address]] int duplicated2; // unsupported-warning 2{{unknown}} + [[no_unique_address()]] int arglist; // expected-error {{cannot have an argument list}} unsupported-warning {{unknown}} + + int [[no_unique_address]] c; // expected-error {{cannot be applied to types}} unsupported-error {{cannot be applied to types}} +}; Index: cfe/trunk/utils/TableGen/ClangAttrEmitter.cpp =================================================================== --- cfe/trunk/utils/TableGen/ClangAttrEmitter.cpp +++ cfe/trunk/utils/TableGen/ClangAttrEmitter.cpp @@ -2810,7 +2810,7 @@ // Helper function for GenerateTargetSpecificAttrChecks that alters the 'Test' // parameter with only a single check type, if applicable. -static void GenerateTargetSpecificAttrCheck(const Record *R, std::string &Test, +static bool GenerateTargetSpecificAttrCheck(const Record *R, std::string &Test, std::string *FnName, StringRef ListName, StringRef CheckAgainst, @@ -2830,7 +2830,9 @@ *FnName += Part; } Test += ")"; + return true; } + return false; } // Generate a conditional expression to check if the current target satisfies @@ -2838,10 +2840,12 @@ // those checks to the Test string. If the FnName string pointer is non-null, // append a unique suffix to distinguish this set of target checks from other // TargetSpecificAttr records. -static void GenerateTargetSpecificAttrChecks(const Record *R, +static bool GenerateTargetSpecificAttrChecks(const Record *R, std::vector &Arches, std::string &Test, std::string *FnName) { + bool AnyTargetChecks = false; + // It is assumed that there will be an llvm::Triple object // named "T" and a TargetInfo object named "Target" within // scope that can be used to determine whether the attribute exists in @@ -2851,6 +2855,7 @@ // differently because GenerateTargetRequirements needs to combine the list // with ParseKind. if (!Arches.empty()) { + AnyTargetChecks = true; Test += " && ("; for (auto I = Arches.begin(), E = Arches.end(); I != E; ++I) { StringRef Part = *I; @@ -2865,16 +2870,24 @@ } // If the attribute is specific to particular OSes, check those. - GenerateTargetSpecificAttrCheck(R, Test, FnName, "OSes", "T.getOS()", - "llvm::Triple::"); + AnyTargetChecks |= GenerateTargetSpecificAttrCheck( + R, Test, FnName, "OSes", "T.getOS()", "llvm::Triple::"); - // If one or more CXX ABIs are specified, check those as well. - GenerateTargetSpecificAttrCheck(R, Test, FnName, "CXXABIs", - "Target.getCXXABI().getKind()", - "TargetCXXABI::"); // If one or more object formats is specified, check those. - GenerateTargetSpecificAttrCheck(R, Test, FnName, "ObjectFormats", - "T.getObjectFormat()", "llvm::Triple::"); + AnyTargetChecks |= + GenerateTargetSpecificAttrCheck(R, Test, FnName, "ObjectFormats", + "T.getObjectFormat()", "llvm::Triple::"); + + // If custom code is specified, emit it. + StringRef Code = R->getValueAsString("CustomCode"); + if (!Code.empty()) { + AnyTargetChecks = true; + Test += " && ("; + Test += Code; + Test += ")"; + } + + return AnyTargetChecks; } static void GenerateHasAttrSpellingStringSwitch( @@ -3510,7 +3523,7 @@ std::string FnName = "isTarget"; std::string Test; - GenerateTargetSpecificAttrChecks(R, Arches, Test, &FnName); + bool UsesT = GenerateTargetSpecificAttrChecks(R, Arches, Test, &FnName); // If this code has already been generated, simply return the previous // instance of it. @@ -3520,7 +3533,8 @@ return *I; OS << "static bool " << FnName << "(const TargetInfo &Target) {\n"; - OS << " const llvm::Triple &T = Target.getTriple();\n"; + if (UsesT) + OS << " const llvm::Triple &T = Target.getTriple(); (void)T;\n"; OS << " return " << Test << ";\n"; OS << "}\n\n"; Index: cfe/trunk/www/cxx_status.html =================================================================== --- cfe/trunk/www/cxx_status.html +++ cfe/trunk/www/cxx_status.html @@ -934,7 +934,7 @@ [[no_unique_address]] attribute P0840R2 - No + SVN [[likely]] and [[unlikely]] attributes