Index: docs/LanguageExtensions.rst =================================================================== --- docs/LanguageExtensions.rst +++ docs/LanguageExtensions.rst @@ -1191,12 +1191,14 @@ Clang provides support for :doc:`automated reference counting ` in Objective-C, which eliminates the need -for manual ``retain``/``release``/``autorelease`` message sends. There are two +for manual ``retain``/``release``/``autorelease`` message sends. There are three feature macros associated with automatic reference counting: ``__has_feature(objc_arc)`` indicates the availability of automated reference counting in general, while ``__has_feature(objc_arc_weak)`` indicates that automated reference counting also includes support for ``__weak`` pointers to -Objective-C objects. +Objective-C objects. ``__has_feature(objc_arc_fields)`` indicates that C structs +are allowed to have fields that are pointers to Objective-C objects managed by +automatic reference counting. .. _objc-fixed-enum: Index: include/clang/AST/Decl.h =================================================================== --- include/clang/AST/Decl.h +++ include/clang/AST/Decl.h @@ -3533,6 +3533,12 @@ /// when needed. mutable bool LoadedFieldsFromExternalStorage : 1; + /// Basic properties of non-trivial C structs. + bool NonTrivialToPrimitiveDefaultInitialize : 1; + bool NonTrivialToPrimitiveCopy : 1; + bool NonTrivialToPrimitiveDestructiveMove : 1; + bool NonTrivialToPrimitiveDestroy : 1; + protected: RecordDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, @@ -3591,6 +3597,39 @@ LoadedFieldsFromExternalStorage = val; } + /// Functions to query basic properties of non-trivial C structs. + bool isNonTrivialToPrimitiveDefaultInitialize() const { + return NonTrivialToPrimitiveDefaultInitialize; + } + + void setNonTrivialToPrimitiveDefaultInitialize() { + NonTrivialToPrimitiveDefaultInitialize = true; + } + + bool isNonTrivialToPrimitiveCopy() const { + return NonTrivialToPrimitiveCopy; + } + + void setNonTrivialToPrimitiveCopy() { + NonTrivialToPrimitiveCopy = true; + } + + bool isNonTrivialToPrimitiveDestructiveMove() const { + return NonTrivialToPrimitiveDestructiveMove; + } + + void setNonTrivialToPrimitiveDestructiveMove() { + NonTrivialToPrimitiveDestructiveMove = true; + } + + bool isNonTrivialToPrimitiveDestroy() const { + return NonTrivialToPrimitiveDestroy; + } + + void setNonTrivialToPrimitiveDestroy() { + NonTrivialToPrimitiveDestroy = true; + } + /// \brief Determines whether this declaration represents the /// injected class name. /// Index: include/clang/AST/Type.h =================================================================== --- include/clang/AST/Type.h +++ include/clang/AST/Type.h @@ -1087,11 +1087,53 @@ // true when Type is objc's weak and weak is enabled but ARC isn't. bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const; + // Enum used to distinguish the different kinds of fields of non-trivial C + // structs. + enum PrimitiveCopyKind { + PCK_Trivial, // a field of a trivial type. + PCK_Strong, // objc strong pointer. + PCK_Struct // non-trivial C struct. + }; + + enum PrimitiveDestructKind { + PDK_Trivial, // a field of a trivial type. + PDK_Strong, // objc strong pointer. + PDK_Struct // non-trivial C struct. + }; + + enum PrimitiveDefaultInitializeKind { + PDIK_Trivial, // a field of a trivial type. + PDIK_Strong, // objc strong pointer. + PDIK_Struct // non-trivial C struct. + }; + + /// Functions to query basic properties of non-trivial C struct types. + + /// Check if this is a non-trivial type that would cause a C struct + /// transitively containing this type to be non-trivial to default initialize + /// and return the kind. + PrimitiveDefaultInitializeKind + isNonTrivialToPrimitiveDefaultInitialize() const; + + /// Check if this is a non-trivial type that would cause a C struct + /// transitively containing this type to be non-trivial to copy and return the + /// kind. + PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const; + + /// Check if this is a non-trivial type that would cause a C struct + /// transitively containing this type to be non-trivial to destructively + /// move and return the kind. Destructive move in this context is a C++-style + /// move in which the source object is placed in a valid but unspecified state + /// after it is moved, as opposed to a truely destructive move in which the + /// source object is placed in an uninitialized state. + PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const; + enum DestructionKind { DK_none, DK_cxx_destructor, DK_objc_strong_lifetime, - DK_objc_weak_lifetime + DK_objc_weak_lifetime, + DK_nontrivial_c_struct }; /// Returns a nonzero value if objects of this type require Index: include/clang/Basic/DiagnosticSemaKinds.td =================================================================== --- include/clang/Basic/DiagnosticSemaKinds.td +++ include/clang/Basic/DiagnosticSemaKinds.td @@ -5119,12 +5119,17 @@ "jump bypasses initialization of __strong variable">; def note_protected_by_objc_weak_init : Note< "jump bypasses initialization of __weak variable">; +def note_protected_by_non_trivial_c_struct_init : Note< + "jump bypasses initialization of variable of non-trivial C struct type">; def note_enters_block_captures_cxx_obj : Note< "jump enters lifetime of block which captures a destructible C++ object">; def note_enters_block_captures_strong : Note< "jump enters lifetime of block which strongly captures a variable">; def note_enters_block_captures_weak : Note< "jump enters lifetime of block which weakly captures a variable">; +def note_enters_block_captures_non_trivial_c_struct : Note< + "jump enters lifetime of block which captures a C struct that is non-trivial " + "to destroy">; def note_exits_cleanup : Note< "jump exits scope of variable with __attribute__((cleanup))">; @@ -5165,6 +5170,9 @@ "jump exits lifetime of block which strongly captures a variable">; def note_exits_block_captures_weak : Note< "jump exits lifetime of block which weakly captures a variable">; +def note_exits_block_captures_non_trivial_c_struct : Note< + "jump exits lifetime of block which captures a C struct that is non-trivial " + "to destroy">; def err_func_returning_qualified_void : ExtWarn< "function cannot return qualified void type %0">, @@ -7183,6 +7191,10 @@ "cannot pass object with interface type %1 by value to variadic " "%select{function|block|method|constructor}2; expected type from format " "string was %3">; +def err_cannot_pass_non_trivial_c_struct_to_vararg : Error< + "cannot pass non-trivial C object of type %0 by value to variadic " + "%select{function|block|method|constructor}1">; + def err_cannot_pass_objc_interface_to_vararg : Error< "cannot pass object with interface type %0 by value through variadic " Index: lib/AST/ASTContext.cpp =================================================================== --- lib/AST/ASTContext.cpp +++ lib/AST/ASTContext.cpp @@ -2642,7 +2642,8 @@ bool ASTContext::isParamDestroyedInCallee(QualType T) const { return getTargetInfo().getCXXABI().areArgsDestroyedLeftToRightInCallee() || - T.hasTrivialABIOverride(); + T.hasTrivialABIOverride() || + T.isDestructedType() == QualType::DK_nontrivial_c_struct; } /// getComplexType - Return the uniqued reference to the type for a complex @@ -5771,6 +5772,11 @@ return true; } + // The block needs copy/destroy helpers if Ty is non-trivial to destructively + // move or destroy. + if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) + return true; + if (!Ty->isObjCRetainableType()) return false; Qualifiers qs = Ty.getQualifiers(); @@ -5784,13 +5790,12 @@ case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: return false; - - // Tell the runtime that this is ARC __weak, called by the - // byref routines. + + // These cases should have been taken care of when checking the type's + // non-triviality case Qualifiers::OCL_Weak: - // ARC __strong __block variables need to be retained. case Qualifiers::OCL_Strong: - return true; + llvm_unreachable("impossible"); } llvm_unreachable("fell out of lifetime switch!"); } Index: lib/AST/Decl.cpp =================================================================== --- lib/AST/Decl.cpp +++ lib/AST/Decl.cpp @@ -3923,7 +3923,11 @@ : TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc), HasFlexibleArrayMember(false), AnonymousStructOrUnion(false), HasObjectMember(false), HasVolatileMember(false), - LoadedFieldsFromExternalStorage(false) { + LoadedFieldsFromExternalStorage(false), + NonTrivialToPrimitiveDefaultInitialize(false), + NonTrivialToPrimitiveCopy(false), + NonTrivialToPrimitiveDestructiveMove(false), + NonTrivialToPrimitiveDestroy(false) { assert(classof(static_cast(this)) && "Invalid Kind!"); } Index: lib/AST/Type.cpp =================================================================== --- lib/AST/Type.cpp +++ lib/AST/Type.cpp @@ -2208,6 +2208,47 @@ getObjCLifetime() != Qualifiers::OCL_Weak; } +QualType::PrimitiveDefaultInitializeKind +QualType::isNonTrivialToPrimitiveDefaultInitialize() const { + if (const auto *RT = + getTypePtr()->getBaseElementTypeUnsafe()->getAs()) + if (RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) + return PDIK_Struct; + + Qualifiers::ObjCLifetime Lifetime = getQualifiers().getObjCLifetime(); + if (Lifetime == Qualifiers::OCL_Strong) + return PDIK_Strong; + + return PDIK_Trivial; +} + +QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveCopy() const { + if (const auto *RT = + getTypePtr()->getBaseElementTypeUnsafe()->getAs()) + if (RT->getDecl()->isNonTrivialToPrimitiveCopy()) + return PCK_Struct; + + Qualifiers::ObjCLifetime Lifetime = getQualifiers().getObjCLifetime(); + if (Lifetime == Qualifiers::OCL_Strong) + return PCK_Strong; + + return PCK_Trivial; +} + +QualType::PrimitiveCopyKind +QualType::isNonTrivialToPrimitiveDestructiveMove() const { + if (const auto *RT = + getTypePtr()->getBaseElementTypeUnsafe()->getAs()) + if (RT->getDecl()->isNonTrivialToPrimitiveDestructiveMove()) + return PCK_Struct; + + Qualifiers::ObjCLifetime Lifetime = getQualifiers().getObjCLifetime(); + if (Lifetime == Qualifiers::OCL_Strong) + return PCK_Strong; + + return PCK_Trivial; +} + bool Type::isLiteralType(const ASTContext &Ctx) const { if (isDependentType()) return false; @@ -3896,6 +3937,13 @@ return DK_objc_weak_lifetime; } + // See if this is a C struct that is non-trivial to destroy or an array that + // contains such a struct. + if (const auto *RT = + type->getBaseElementTypeUnsafe()->getAs()) + if (RT->getDecl()->isNonTrivialToPrimitiveDestroy()) + return DK_nontrivial_c_struct; + /// Currently, the only destruction kind we recognize is C++ objects /// with non-trivial destructors. const CXXRecordDecl *record = Index: lib/CodeGen/CGBlocks.cpp =================================================================== --- lib/CodeGen/CGBlocks.cpp +++ lib/CodeGen/CGBlocks.cpp @@ -477,6 +477,14 @@ info.NeedsCopyDispose = true; info.HasCXXObject = true; + // So do C structs that require non-trivial copy construction or + // destruction. + } else if (variable->getType().isNonTrivialToPrimitiveCopy() == + QualType::PCK_Struct || + variable->getType().isDestructedType() == + QualType::DK_nontrivial_c_struct) { + info.NeedsCopyDispose = true; + // And so do types with destructors. } else if (CGM.getLangOpts().CPlusPlus) { if (const CXXRecordDecl *record = @@ -1513,6 +1521,7 @@ CXXRecord, // Copy or destroy ARCWeak, ARCStrong, + NonTrivialCStruct, BlockObject, // Assign or release None }; @@ -1548,6 +1557,9 @@ Flags |= BLOCK_FIELD_IS_WEAK; return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags); } + if (T.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) + return std::make_pair(BlockCaptureEntityKind::NonTrivialCStruct, + BlockFieldFlags()); if (!T->isObjCRetainableType()) // For all other types, the memcpy is fine. return std::make_pair(BlockCaptureEntityKind::None, Flags); @@ -1677,6 +1689,13 @@ EmitSynthesizedCXXCopyCtor(dstField, srcField, CI.getCopyExpr()); } else if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCWeak) { EmitARCCopyWeak(dstField, srcField); + // If this is a C struct that requires non-trivial copy construction, emit a + // call to its copy constructor. + } else if (CopiedCapture.Kind == + BlockCaptureEntityKind::NonTrivialCStruct) { + QualType varType = CI.getVariable()->getType(); + callCStructCopyConstructor(dstField, srcField, varType, + varType.isVolatileQualified()); } else { llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src"); if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCStrong) { @@ -1749,6 +1768,10 @@ return std::make_pair(BlockCaptureEntityKind::CXXRecord, BlockFieldFlags()); } + if (T.isDestructedType() == QualType::DK_nontrivial_c_struct) + return std::make_pair(BlockCaptureEntityKind::NonTrivialCStruct, + BlockFieldFlags()); + // Other types don't need to be destroy explicitly. if (!T->isObjCRetainableType()) return std::make_pair(BlockCaptureEntityKind::None, Flags); @@ -1853,6 +1876,13 @@ } else if (DestroyedCapture.Kind == BlockCaptureEntityKind::ARCStrong) { EmitARCDestroyStrong(srcField, ARCImpreciseLifetime); + // If this is a C struct that requires non-trivial destruction, emit a call + // to its destructor. + } else if (DestroyedCapture.Kind == + BlockCaptureEntityKind::NonTrivialCStruct) { + QualType varType = CI.getVariable()->getType(); + pushDestroy(varType.isDestructedType(), srcField, varType); + // Otherwise we call _Block_object_dispose. It wouldn't be too // hard to just emit this as a cleanup if we wanted to make sure // that things were done in reverse. @@ -2020,6 +2050,36 @@ id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr()); } }; + +/// Emits the copy/dispose helpers for a __block variable that is a non-trivial +/// C struct. +class NonTrivialCStructByrefHelpers final : public BlockByrefHelpers { + QualType VarType; + +public: + NonTrivialCStructByrefHelpers(CharUnits alignment, QualType type) + : BlockByrefHelpers(alignment), VarType(type) {} + + void emitCopy(CodeGenFunction &CGF, Address destField, + Address srcField) override { + CGF.callCStructMoveConstructor(destField, srcField, VarType, + VarType.isVolatileQualified()); + } + + bool needsDispose() const override { + return VarType.isDestructedType() == QualType::DK_nontrivial_c_struct; + } + + void emitDispose(CodeGenFunction &CGF, Address field) override { + EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin(); + CGF.pushDestroy(VarType.isDestructedType(), field, VarType); + CGF.PopCleanupBlocks(cleanupDepth); + } + + void profileImpl(llvm::FoldingSetNodeID &id) const override { + id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr()); + } +}; } // end anonymous namespace static llvm::Constant * @@ -2205,6 +2265,13 @@ CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr)); } + // If type is a non-trivial C struct type that is non-trivial to + // destructly move or destroy, build the copy and dispose helpers. + if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct || + type.isDestructedType() == QualType::DK_nontrivial_c_struct) + return ::buildByrefHelpers( + CGM, byrefInfo, NonTrivialCStructByrefHelpers(valueAlignment, type)); + // Otherwise, if we don't have a retainable type, there's nothing to do. // that the runtime does extra copies. if (!type->isObjCRetainableType()) return nullptr; Index: lib/CodeGen/CGCall.cpp =================================================================== --- lib/CodeGen/CGCall.cpp +++ lib/CodeGen/CGCall.cpp @@ -3417,10 +3417,15 @@ QualType Ty; void Emit(CodeGenFunction &CGF, Flags flags) override { - const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); - assert(!Dtor->isTrivial()); - CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, - /*Delegating=*/false, Addr); + QualType::DestructionKind DtorKind = Ty.isDestructedType(); + if (DtorKind == QualType::DK_cxx_destructor) { + const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); + assert(!Dtor->isTrivial()); + CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, + /*Delegating=*/false, Addr); + } else { + CGF.callCStructDestructor(Addr, Ty, Ty.isVolatileQualified()); + } } }; @@ -3470,11 +3475,16 @@ else Slot = CreateAggTemp(type, "agg.tmp"); - const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); - bool DestroyedInCallee = - RD && RD->hasNonTrivialDestructor() && - (CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default || - RD->hasTrivialABIOverride()); + bool DestroyedInCallee = true, NeedsEHCleanup = true; + if (const auto *RD = type->getAsCXXRecordDecl()) { + DestroyedInCallee = + RD && RD->hasNonTrivialDestructor() && + (CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default || + RD->hasTrivialABIOverride()); + } else { + NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); + } + if (DestroyedInCallee) Slot.setExternallyDestructed(); @@ -3482,7 +3492,7 @@ RValue RV = Slot.asRValue(); args.add(RV, type); - if (DestroyedInCallee) { + if (DestroyedInCallee && NeedsEHCleanup) { // Create a no-op GEP between the placeholder and the cleanup so we can // RAUW it successfully. It also serves as a marker of the first // instruction where the cleanup is active. Index: lib/CodeGen/CGDecl.cpp =================================================================== --- lib/CodeGen/CGDecl.cpp +++ lib/CodeGen/CGDecl.cpp @@ -1287,6 +1287,17 @@ if (emission.IsByRef) emitByrefStructureInit(emission); + // Initialize the variable here if it doesn't have a initializer and it is a + // C struct that is non-trivial to initialize or an array containing such a + // struct. + if (!Init && + type.isNonTrivialToPrimitiveDefaultInitialize() == + QualType::PDIK_Struct) { + defaultInitNonTrivialCStructVar(emission.getAllocatedAddress(), type, + type.isVolatileQualified()); + return; + } + if (isTrivialInitializer(Init)) return; @@ -1462,6 +1473,11 @@ case QualType::DK_objc_weak_lifetime: break; + + case QualType::DK_nontrivial_c_struct: + destroyer = CodeGenFunction::destroyNonTrivialCStruct; + cleanupKind = getARCCleanupKind(); + break; } // If we haven't chosen a more specific destroyer, use the default. @@ -1523,6 +1539,8 @@ return destroyARCStrongPrecise; case QualType::DK_objc_weak_lifetime: return destroyARCWeak; + case QualType::DK_nontrivial_c_struct: + return destroyNonTrivialCStruct; } llvm_unreachable("Unknown DestructionKind"); } @@ -1871,9 +1889,12 @@ // cleanup. if (!IsScalar && !CurFuncIsThunk && getContext().isParamDestroyedInCallee(Ty)) { - const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); - if (RD && RD->hasNonTrivialDestructor()) - pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty); + if (QualType::DestructionKind DtorKind = Ty.isDestructedType()) { + assert((DtorKind == QualType::DK_cxx_destructor || + DtorKind == QualType::DK_nontrivial_c_struct) && + "unexpected destructor type"); + pushDestroy(DtorKind, DeclPtr, Ty); + } } } else { // Otherwise, create a temporary to hold the value. Index: lib/CodeGen/CGDeclCXX.cpp =================================================================== --- lib/CodeGen/CGDeclCXX.cpp +++ lib/CodeGen/CGDeclCXX.cpp @@ -79,6 +79,7 @@ case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: // We don't care about releasing objects during process teardown. assert(!D.getTLSKind() && "should have rejected this"); return; Index: lib/CodeGen/CGExprAgg.cpp =================================================================== --- lib/CodeGen/CGExprAgg.cpp +++ lib/CodeGen/CGExprAgg.cpp @@ -77,7 +77,9 @@ void EmitAggLoadOfLValue(const Expr *E); /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. - void EmitFinalDestCopy(QualType type, const LValue &src); + /// SrcIsRValue is true if source comes from an RValue. + void EmitFinalDestCopy(QualType type, const LValue &src, + bool SrcIsRValue = false); void EmitFinalDestCopy(QualType type, RValue src); void EmitCopy(QualType type, const AggValueSlot &dest, const AggValueSlot &src); @@ -245,6 +247,13 @@ /// directly into the return value slot. Otherwise, a final move /// will be performed. void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) { + // Push destructor if the result is ignored and the type is a C struct that + // is non-trivial to destroy. + QualType Ty = E->getType(); + if (Dest.isIgnored() && + Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) + CGF.pushDestroy(Ty.isDestructedType(), src.getAggregateAddress(), Ty); + if (shouldUseDestForReturnSlot()) { // Logically, Dest.getAddr() should equal Src.getAggregateAddr(). // The possibility of undef rvalues complicates that a lot, @@ -261,11 +270,12 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) { assert(src.isAggregate() && "value must be aggregate value!"); LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type); - EmitFinalDestCopy(type, srcLV); + EmitFinalDestCopy(type, srcLV, true); } /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. -void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) { +void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src, + bool SrcIsRValue) { // If Dest is ignored, then we're evaluating an aggregate expression // in a context that doesn't care about the result. Note that loads // from volatile l-values force the existence of a non-ignored @@ -273,6 +283,36 @@ if (Dest.isIgnored()) return; + // Copy non-trivial C structs here. + + // For simplicity, both the destination and the source are treated as being + // volatile if either is volatile. + bool IsVolatile = Dest.isVolatile() || src.isVolatile(); + + if (SrcIsRValue) { + if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { + if (Dest.isPotentiallyAliased()) + CGF.callCStructMoveAssignmentOperator(Dest.getAddress(), + src.getAddress(), type, + IsVolatile); + else + CGF.callCStructMoveConstructor(Dest.getAddress(), src.getAddress(), + type, IsVolatile); + return; + } + } else { + if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { + if (Dest.isPotentiallyAliased()) + CGF.callCStructCopyAssignmentOperator(Dest.getAddress(), + src.getAddress(), type, + IsVolatile); + else + CGF.callCStructCopyConstructor(Dest.getAddress(), src.getAddress(), + type, IsVolatile); + return; + } + } + AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased); Index: lib/CodeGen/CGNonTrivialStruct.cpp =================================================================== --- /dev/null +++ lib/CodeGen/CGNonTrivialStruct.cpp @@ -0,0 +1,768 @@ +//===--- CGNonTrivialStruct.cpp - Emit Special Functions for C Structs ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines functions to generate various special functions for C +// structs. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "llvm/Support/ScopedPrinter.h" +#include + +using namespace clang; +using namespace CodeGen; + +// Return the size of a field in number of bytes. If the field is a bitfield, +// its size gets rounded up to a char's size. +static uint64_t getFieldSize(const FieldDecl *FD, ASTContext &Ctx) { + if (FD->isBitField()) + return llvm::alignTo(FD->getBitWidthValue(Ctx), Ctx.getCharWidth()) / + Ctx.getCharWidth(); + return Ctx.getTypeSize(FD->getType()) / Ctx.getCharWidth(); +} + +namespace { +enum { DstIdx = 0, SrcIdx = 1 }; +const char *ValNameStr[2] = {"dst", "src"}; + +struct DestructorFieldVisitor { + template + static void visit(Visitor &V, QualType FT, bool IsVolatile, unsigned Offset, + Ts... Args) { + IsVolatile = IsVolatile || FT.isVolatileQualified(); + + switch (FT.isDestructedType()) { + case QualType::DK_objc_strong_lifetime: + if (V.getContext().getAsArrayType(FT)) + V.visitArray(FT, IsVolatile, Offset, Args...); + else + V.visitStrong(FT, IsVolatile, Offset, Args...); + break; + case QualType::DK_nontrivial_c_struct: + if (V.getContext().getAsArrayType(FT)) + V.visitArray(FT, IsVolatile, Offset, Args...); + else + V.visitStruct(FT, IsVolatile, Offset, Args...); + break; + default: + break; + } + } +}; + +struct DefaultInitializeFieldVisitor { + template + static void visit(Visitor &V, QualType FT, bool IsVolatile, unsigned Offset, + Ts... Args) { + IsVolatile = IsVolatile || FT.isVolatileQualified(); + + switch (FT.isNonTrivialToPrimitiveDefaultInitialize()) { + case QualType::PDIK_Strong: + if (V.getContext().getAsArrayType(FT)) + V.visitArray(FT, IsVolatile, Offset, Args...); + else + V.visitStrong(FT, IsVolatile, Offset, Args...); + break; + case QualType::PDIK_Struct: + if (V.getContext().getAsArrayType(FT)) + V.visitArray(FT, IsVolatile, Offset, Args...); + else + V.visitStruct(FT, IsVolatile, Offset, Args...); + break; + default: + break; + } + } +}; + +template struct BinaryFuncFieldVisitor { + template + static void visit(Visitor &V, QualType FT, bool IsVolatile, unsigned Offset, + Ts... Args) { + visitField(V, FT, IsVolatile, Offset, 0U, Args...); + } +}; + +template struct UnaryFuncStructVisitor { + typedef FieldVisitor FieldVisitorTy; + + UnaryFuncStructVisitor(ASTContext &Ctx) : Ctx(Ctx) {} + + template + void visitStructFields(QualType QT, bool IsVolatile, unsigned Offset, + Ts... Args) { + const RecordDecl *RD = QT->castAs()->getDecl(); + const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD); + + // Iterate over the fields of the struct. + size_t FieldNo = 0; + for (const FieldDecl *FD : RD->fields()) { + QualType FT = FD->getType(); + unsigned FOffset = + Offset + RL.getFieldOffset(FieldNo++) / Ctx.getCharWidth(); + FieldVisitor::visit(getDerived(), FT, + FT.isVolatileQualified() || IsVolatile, FOffset, + Args...); + } + } + + Derived &getDerived() { return static_cast(*this); } + + ASTContext &getContext() { return Ctx; } + ASTContext &Ctx; +}; + +struct BinaryFieldVisitor { + template + static void visit(Visitor &V, QualType FT, bool IsVolatile, unsigned Offset, + Ts... Args) { + V.visitField(FT, IsVolatile, Offset, 0, Args...); + } +}; + +template struct BinaryFuncStructVisitor { + typedef BinaryFieldVisitor FieldVisitorTy; + + BinaryFuncStructVisitor(ASTContext &Ctx) : Ctx(Ctx) {} + + template + void visitField(QualType FT, bool IsVolatile, unsigned Offset, unsigned Size, + Ts... Args) { + IsVolatile = IsVolatile || FT.isVolatileQualified(); + QualType::PrimitiveCopyKind PCK = + IsMove ? FT.isNonTrivialToPrimitiveDestructiveMove() + : FT.isNonTrivialToPrimitiveCopy(); + + if (PCK) + getDerived().flushTrivialFields(Args...); + + switch (PCK) { + case QualType::PCK_Trivial: + visitTrivialField(FT, IsVolatile, Offset, Size); + break; + case QualType::PCK_Strong: + if (Ctx.getAsArrayType(FT)) + getDerived().visitArray(FT, IsVolatile, Offset, Args...); + else + getDerived().visitStrong(FT, IsVolatile, Offset, Args...); + break; + case QualType::PCK_Struct: + if (Ctx.getAsArrayType(FT)) + getDerived().visitArray(FT, IsVolatile, Offset, Args...); + else + getDerived().visitStruct(FT, IsVolatile, Offset, Args...); + break; + } + } + + template + void visitStructFields(QualType QT, bool IsVolatile, unsigned Offset, + Ts... Args) { + const RecordDecl *RD = QT->castAs()->getDecl(); + const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD); + + // Iterate over the fields of the struct. + size_t FieldNo = 0; + for (const FieldDecl *FD : RD->fields()) { + QualType FT = FD->getType(); + unsigned FOffset = + Offset + RL.getFieldOffset(FieldNo++) / Ctx.getCharWidth(); + unsigned FSize = getFieldSize(FD, Ctx); + visitField(FT, FT.isVolatileQualified() || IsVolatile, FOffset, FSize, + Args...); + } + + getDerived().flushTrivialFields(Args...); + } + + void visitTrivialField(QualType FT, bool IsVolatile, unsigned Offset, + unsigned Size) { + if (Size == 0) + return; + + TrivialFieldIsVolatile |= IsVolatile; + if (Start == End) + Start = Offset; + End = Offset + Size; + } + + Derived &getDerived() { return static_cast(*this); } + ASTContext &getContext() { return Ctx; } + + bool TrivialFieldIsVolatile = false; + unsigned Start = 0, End = 0; + ASTContext &Ctx; +}; + +// This function creates the mangled name of a special function of a non-trivial +// C struct. Since there is no ODR in C, the function is mangled based on the +// struct contents and not the name. The mangled name has the following +// structure: +// +// ::= "_" +// ::= "__destructor_" | "__default_constructor_" | +// "__copy_constructor_" | "__move_constructor_" | +// "__copy_assignment_" | "__move_assignment_" +// ::= ["_" ] +// ::= + +// ::= | +// ::= | | +// +// ::= "_AB" "s" "n" +// "_AE" +// ::= +// ::= "_s" ["b"] ["v"] +// ::= "_t" ["v"] "_" + +template struct GenFuncNameBase { + std::string getVolatileOffsetStr(bool IsVolatile, uint64_t Offset) { + std::string S; + if (IsVolatile) + S = "v"; + S += llvm::to_string(Offset); + return S; + } + + void visitStrong(QualType FT, bool IsVolatile, unsigned Offset) { + appendStr("_s"); + if (FT->isBlockPointerType()) + appendStr("b"); + appendStr(getVolatileOffsetStr(IsVolatile, Offset)); + } + + void visitStruct(QualType QT, bool IsVolatile, unsigned Offset) { + getDerived().visitStructFields(QT, IsVolatile, Offset); + } + + template + void visitArray(QualType QT, bool IsVolatile, unsigned Offset, Ts... Args) { + ASTContext &Ctx = getDerived().getContext(); + const auto *AT = Ctx.getAsConstantArrayType(QT); + unsigned NumElts = Ctx.getConstantArrayElementCount(AT); + QualType EltTy = Ctx.getBaseElementType(AT); + unsigned EltSize = Ctx.getTypeSizeInChars(EltTy).getQuantity(); + appendStr("_AB" + llvm::to_string(Offset) + "s" + llvm::to_string(EltSize) + + "n" + llvm::to_string(NumElts)); + Derived::FieldVisitorTy::visit(getDerived(), EltTy, IsVolatile, Offset); + appendStr("_AE"); + } + + void appendStr(StringRef Str) { Name += Str; } + + std::string getName(QualType QT, bool IsVolatile) { + getDerived().visitStructFields(QT, IsVolatile, 0); + return Name; + } + + Derived &getDerived() { return static_cast(*this); } + + std::string Name; +}; + +template +struct GenUnaryFuncName + : UnaryFuncStructVisitor, FieldVisitor>, + GenFuncNameBase> { + GenUnaryFuncName(StringRef Prefix, CharUnits DstAlignment, ASTContext &Ctx) + : UnaryFuncStructVisitor, FieldVisitor>( + Ctx) { + this->appendStr(Prefix); + this->appendStr(llvm::to_string(DstAlignment.getQuantity())); + } +}; + +// Helper function to create a null constant. +static llvm::Constant *getNullForVariable(Address Addr) { + llvm::Type *Ty = Addr.getElementType(); + return llvm::ConstantPointerNull::get(cast(Ty)); +} + +/// Return an address with the specified offset from the passed address. +static Address getAddrWithOffset(Address Addr, unsigned Offset, + CodeGenFunction &CGF) { + if (!Addr.isValid() || Offset == 0) + return Addr; + Addr = CGF.Builder.CreateBitCast(Addr, CGF.CGM.Int8PtrTy); + Addr = CGF.Builder.CreateConstInBoundsGEP(Addr, Offset, CharUnits::One()); + return CGF.Builder.CreateBitCast(Addr, CGF.CGM.Int8PtrPtrTy); +} + +template +struct GenBinaryFuncName + : BinaryFuncStructVisitor, IsMove>, + GenFuncNameBase> { + + GenBinaryFuncName(StringRef Prefix, CharUnits DstAlignment, + CharUnits SrcAlignment, ASTContext &Ctx) + : BinaryFuncStructVisitor, IsMove>(Ctx) { + this->appendStr(Prefix); + this->appendStr(llvm::to_string(DstAlignment.getQuantity())); + this->appendStr("_" + llvm::to_string(SrcAlignment.getQuantity())); + } + + void flushTrivialFields() { + if (this->Start == this->End) + return; + + this->appendStr( + "_t" + + this->getVolatileOffsetStr(this->TrivialFieldIsVolatile, this->Start) + + "w" + llvm::to_string(this->End - this->Start)); + + this->TrivialFieldIsVolatile = false; + this->Start = this->End = 0; + } +}; + +// Helper function that creates CGFunctionInfo for an N-aray special function. +template +static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM, + FunctionArgList &Args) { + ASTContext &Ctx = CGM.getContext(); + llvm::SmallVector Params; + QualType ParamTy = Ctx.getPointerType(Ctx.VoidPtrTy); + + for (unsigned I = 0; I < N; ++I) + Params.push_back(ImplicitParamDecl::Create( + Ctx, nullptr, SourceLocation(), &Ctx.Idents.get(ValNameStr[I]), ParamTy, + ImplicitParamDecl::Other)); + + for (auto &P : Params) + Args.push_back(P); + + return CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); +} + +// Template classes that are used as bases for classes that emit special +// functions. +template struct GenFuncBase { + template + void visitStruct(QualType FT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + this->getDerived().callSpecialFunction(FT, IsVolatile, Offset, Addrs); + } + + template + void visitArray(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CodeGenFunction &CGF = *this->CGF; + ASTContext &Ctx = CGF.getContext(); + + // Compute the end address. + QualType BaseEltQT; + std::array StartAddrs = Addrs; + for (unsigned I = 0; I < N; ++I) + StartAddrs[I] = getAddrWithOffset(Addrs[I], Offset, CGF); + Address DstAddr = StartAddrs[DstIdx]; + llvm::Value *NumElts = + CGF.emitArrayLength(Ctx.getAsArrayType(QT), BaseEltQT, DstAddr); + unsigned BaseEltSize = Ctx.getTypeSizeInChars(BaseEltQT).getQuantity(); + llvm::Value *BaseEltSizeVal = + llvm::ConstantInt::get(NumElts->getType(), BaseEltSize); + llvm::Value *SizeInBytes = + CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts); + Address BC = CGF.Builder.CreateBitCast(DstAddr, CGF.CGM.Int8PtrTy); + llvm::Value *DstArrayEnd = + CGF.Builder.CreateInBoundsGEP(BC.getPointer(), SizeInBytes); + DstArrayEnd = CGF.Builder.CreateBitCast(DstArrayEnd, CGF.CGM.Int8PtrPtrTy, + "dstarray.end"); + llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock(); + + // Create the header block and insert the phi instructions. + llvm::BasicBlock *HeaderBB = CGF.createBasicBlock("loop.header"); + CGF.EmitBlock(HeaderBB); + llvm::PHINode *PHIs[N]; + + for (unsigned I = 0; I < N; ++I) { + PHIs[I] = CGF.Builder.CreatePHI(CGF.CGM.Int8PtrPtrTy, 2, "addr.cur"); + PHIs[I]->addIncoming(StartAddrs[I].getPointer(), PreheaderBB); + } + + // Create the exit and loop body blocks. + llvm::BasicBlock *ExitBB = CGF.createBasicBlock("loop.exit"); + llvm::BasicBlock *LoopBB = CGF.createBasicBlock("loop.body"); + + // Emit the comparison and conditional branch instruction that jumps to + // either the exit or the loop body. + llvm::Value *Done = + CGF.Builder.CreateICmpEQ(PHIs[DstIdx], DstArrayEnd, "done"); + CGF.Builder.CreateCondBr(Done, ExitBB, LoopBB); + + // Visit the element of the array in the loop body. + CGF.EmitBlock(LoopBB); + QualType EltQT = Ctx.getAsArrayType(QT)->getElementType(); + CharUnits EltSize = Ctx.getTypeSizeInChars(EltQT); + std::array NewAddrs = Addrs; + + for (unsigned I = 0; I < N; ++I) + NewAddrs[I] = Address( + PHIs[I], StartAddrs[I].getAlignment().alignmentAtOffset(EltSize)); + + Derived::FieldVisitorTy::visit(this->getDerived(), EltQT, IsVolatile, 0, + NewAddrs); + + LoopBB = CGF.Builder.GetInsertBlock(); + + for (unsigned I = 0; I < N; ++I) { + // Instrs to update the destination and source addresses. + // Update phi instructions. + NewAddrs[I] = getAddrWithOffset(NewAddrs[I], EltSize.getQuantity(), CGF); + PHIs[I]->addIncoming(NewAddrs[I].getPointer(), LoopBB); + } + + // Insert an unconditional branch to the header block. + CGF.Builder.CreateBr(HeaderBB); + CGF.EmitBlock(ExitBB); + } + + template + llvm::Function *getFunction(StringRef FuncName, QualType QT, bool IsVolatile, + std::array Addrs, + std::array Alignments, + CodeGenModule &CGM) { + // If the special function already exists in the module, return it. + if (llvm::Function *F = CGM.getModule().getFunction(FuncName)) + return F; + + ASTContext &Ctx = CGM.getContext(); + FunctionArgList Args; + const CGFunctionInfo &FI = getFunctionInfo(CGM, Args); + llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); + llvm::Function *F = + llvm::Function::Create(FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, + FuncName, &CGM.getModule()); + F->setVisibility(llvm::GlobalValue::HiddenVisibility); + CGM.SetLLVMFunctionAttributes(nullptr, FI, F); + CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); + IdentifierInfo *II = &Ctx.Idents.get(FuncName); + FunctionDecl *FD = FunctionDecl::Create( + Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), + II, Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false); + CodeGenFunction NewCGF(CGM); + setCGF(&NewCGF); + CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args); + + for (unsigned I = 0; I < N; ++I) { + llvm::Value *V = CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[I])); + Addrs[I] = Address(V, Alignments[I]); + } + + getDerived().visitStructFields(QT, IsVolatile, 0, Addrs); + CGF->FinishFunction(); + return F; + } + + template + void callFunc(StringRef FuncName, QualType QT, bool IsVolatile, + std::array Addrs, CodeGenFunction &CallerCGF) { + std::array Alignments; + llvm::Value *Ptrs[N]; + + for (unsigned I = 0; I < N; ++I) { + Alignments[I] = Addrs[I].getAlignment(); + Ptrs[I] = + CallerCGF.Builder.CreateBitCast(Addrs[I], CallerCGF.CGM.Int8PtrPtrTy) + .getPointer(); + } + + CallerCGF.EmitNounwindRuntimeCall( + getFunction(FuncName, QT, IsVolatile, Addrs, Alignments, CallerCGF.CGM), + Ptrs); + } + + Derived &getDerived() { return static_cast(*this); } + + void setCGF(CodeGenFunction *F) { CGF = F; } + + CodeGenFunction *CGF = nullptr; +}; + +template +struct GenBinaryFunc : BinaryFuncStructVisitor, + GenFuncBase { + GenBinaryFunc(ASTContext &Ctx) + : BinaryFuncStructVisitor(Ctx) {} + + void flushTrivialFields(std::array Addrs) { + unsigned Size = this->End - this->Start; + + if (Size == 0) + return; + + Address DstAddr = getAddrWithOffset(Addrs[DstIdx], this->Start, *this->CGF); + Address SrcAddr = getAddrWithOffset(Addrs[SrcIdx], this->Start, *this->CGF); + + // Emit memcpy. + if (Size >= 16 || !llvm::isPowerOf2_32(Size)) { + llvm::Value *SizeVal = llvm::ConstantInt::get(this->CGF->SizeTy, Size); + DstAddr = + this->CGF->Builder.CreateElementBitCast(DstAddr, this->CGF->Int8Ty); + SrcAddr = + this->CGF->Builder.CreateElementBitCast(SrcAddr, this->CGF->Int8Ty); + this->CGF->Builder.CreateMemCpy(DstAddr, SrcAddr, SizeVal, + this->TrivialFieldIsVolatile); + } else { + llvm::Type *Ty = + llvm::Type::getIntNTy(this->CGF->getLLVMContext(), + Size * this->CGF->getContext().getCharWidth()); + DstAddr = this->CGF->Builder.CreateElementBitCast(DstAddr, Ty); + SrcAddr = this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty); + llvm::Value *SrcVal = + this->CGF->Builder.CreateLoad(SrcAddr, this->TrivialFieldIsVolatile); + this->CGF->Builder.CreateStore(SrcVal, DstAddr, + this->TrivialFieldIsVolatile); + } + + this->TrivialFieldIsVolatile = false; + this->Start = this->End = 0; + } +}; + +// These classes that emit the special functions for a non-trivial struct. +struct GenDestructor + : UnaryFuncStructVisitor, + GenFuncBase { + GenDestructor(ASTContext &Ctx) + : UnaryFuncStructVisitor(Ctx) {} + void visitStrong(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->destroyARCStrongImprecise( + *CGF, getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), QT); + } + + void callSpecialFunction(QualType FT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->callCStructDestructor(getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), + FT, IsVolatile); + } +}; + +struct GenDefaultInitialize + : UnaryFuncStructVisitor, + GenFuncBase { + typedef GenFuncBase GenFuncBaseTy; + GenDefaultInitialize(ASTContext &Ctx) + : UnaryFuncStructVisitor(Ctx) {} + + void visitStrong(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->EmitNullInitialization(getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), + QT); + } + + template + void visitArray(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + ASTContext &Ctx = getContext(); + CharUnits Size = Ctx.getTypeSizeInChars(QT); + QualType EltTy = Ctx.getBaseElementType(QT); + + if (Size < CharUnits::fromQuantity(16) || EltTy->getAs()) { + GenFuncBaseTy::visitArray(QT, IsVolatile, Offset, Addrs); + return; + } + + llvm::Constant *SizeVal = CGF->Builder.getInt64(Size.getQuantity()); + Address DstAddr = getAddrWithOffset(Addrs[DstIdx], Offset, *CGF); + Address Loc = CGF->Builder.CreateElementBitCast(DstAddr, CGF->Int8Ty); + CGF->Builder.CreateMemSet(Loc, CGF->Builder.getInt8(0), SizeVal, + IsVolatile || QT.isVolatileQualified()); + } + + void callSpecialFunction(QualType FT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->callCStructDefaultConstructor( + getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), FT, IsVolatile); + } +}; + +struct GenCopyConstructor : GenBinaryFunc { + GenCopyConstructor(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitStrong(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset, *CGF); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF); + llvm::Value *SrcVal = CGF->EmitLoadOfScalar( + Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation()); + llvm::Value *Val = CGF->EmitARCRetain(QT, SrcVal); + CGF->EmitStoreOfScalar(Val, CGF->MakeAddrLValue(Addrs[DstIdx], QT), true); + } + void callSpecialFunction(QualType FT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->callCStructCopyConstructor( + getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), + getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF), FT, IsVolatile); + } +}; + +struct GenMoveConstructor : GenBinaryFunc { + GenMoveConstructor(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitStrong(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset, *CGF); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF); + LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT); + llvm::Value *SrcVal = + CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal(); + CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV); + CGF->EmitStoreOfScalar(SrcVal, CGF->MakeAddrLValue(Addrs[DstIdx], QT), + /* isInitialization */ true); + } + void callSpecialFunction(QualType FT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->callCStructMoveConstructor( + getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), + getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF), FT, IsVolatile); + } +}; + +struct GenCopyAssignment : GenBinaryFunc { + GenCopyAssignment(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitStrong(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset, *CGF); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF); + llvm::Value *SrcVal = CGF->EmitLoadOfScalar( + Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation()); + CGF->EmitARCStoreStrong(CGF->MakeAddrLValue(Addrs[DstIdx], QT), SrcVal, + false); + } + void callSpecialFunction(QualType FT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->callCStructCopyAssignmentOperator( + getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), + getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF), FT, IsVolatile); + } +}; + +struct GenMoveAssignment : GenBinaryFunc { + GenMoveAssignment(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitStrong(QualType QT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], Offset, *CGF); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF); + LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT); + llvm::Value *SrcVal = + CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal(); + CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV); + LValue DstLV = CGF->MakeAddrLValue(Addrs[DstIdx], QT); + llvm::Value *DstVal = + CGF->EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal(); + CGF->EmitStoreOfScalar(SrcVal, DstLV); + CGF->EmitARCRelease(DstVal, ARCImpreciseLifetime); + } + + void callSpecialFunction(QualType FT, bool IsVolatile, unsigned Offset, + std::array Addrs) { + CGF->callCStructMoveAssignmentOperator( + getAddrWithOffset(Addrs[DstIdx], Offset, *CGF), + getAddrWithOffset(Addrs[SrcIdx], Offset, *CGF), FT, IsVolatile); + } +}; + +} // namespace + +void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF, + Address Addr, QualType Type) { + CGF.callCStructDestructor(Addr, Type, Type.isVolatileQualified()); +} + +// Default-initialize a variable that is a non-trivial struct or an array of +// such structure. +void CodeGenFunction::defaultInitNonTrivialCStructVar(Address DstPtr, + QualType QT, + bool IsVolatile) { + GenDefaultInitialize Gen(getContext()); + DstPtr = Builder.CreateBitCast(DstPtr, CGM.Int8PtrPtrTy); + Gen.setCGF(this); + DefaultInitializeFieldVisitor::visit(Gen, QT, IsVolatile, 0, + std::array({{DstPtr}})); +} + +template +static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, + bool IsVolatile, CodeGenFunction &CGF, + std::array Addrs) { + for (unsigned I = 0; I < N; ++I) + Addrs[I] = CGF.Builder.CreateBitCast(Addrs[I], CGF.CGM.Int8PtrPtrTy); + Gen.callFunc(FuncName, QT, IsVolatile, Addrs, CGF); +} + +// Functions to emit calls to the special functions of a non-trivial C struct. +void CodeGenFunction::callCStructDefaultConstructor(Address DstPtr, QualType QT, + bool IsVolatile) { + GenUnaryFuncName GenName( + "__default_constructor_", DstPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenDefaultInitialize(getContext()), FuncName, QT, + IsVolatile, *this, std::array({{DstPtr}})); +} + +void CodeGenFunction::callCStructDestructor(Address DstPtr, QualType QT, + bool IsVolatile) { + GenUnaryFuncName GenName( + "__destructor_", DstPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenDestructor(getContext()), FuncName, QT, IsVolatile, + *this, std::array({{DstPtr}})); +} + +void CodeGenFunction::callCStructCopyConstructor(Address DstPtr, Address SrcPtr, + QualType QT, bool IsVolatile) { + GenBinaryFuncName GenName("__copy_constructor_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenCopyConstructor(getContext()), FuncName, QT, + IsVolatile, *this, + std::array({{DstPtr, SrcPtr}})); +} + +void CodeGenFunction::callCStructCopyAssignmentOperator(Address DstPtr, + Address SrcPtr, + QualType QT, + bool IsVolatile) { + GenBinaryFuncName GenName("__copy_assignment_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenCopyAssignment(getContext()), FuncName, QT, IsVolatile, + *this, std::array({{DstPtr, SrcPtr}})); +} + +void CodeGenFunction::callCStructMoveConstructor(Address DstPtr, Address SrcPtr, + QualType QT, bool IsVolatile) { + GenBinaryFuncName GenName("__move_constructor_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenMoveConstructor(getContext()), FuncName, QT, + IsVolatile, *this, + std::array({{DstPtr, SrcPtr}})); +} + +void CodeGenFunction::callCStructMoveAssignmentOperator(Address DstPtr, + Address SrcPtr, + QualType QT, + bool IsVolatile) { + GenBinaryFuncName GenName("__move_assignment_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenMoveAssignment(getContext()), FuncName, QT, IsVolatile, + *this, std::array({{DstPtr, SrcPtr}})); +} Index: lib/CodeGen/CMakeLists.txt =================================================================== --- lib/CodeGen/CMakeLists.txt +++ lib/CodeGen/CMakeLists.txt @@ -56,6 +56,7 @@ CGExprScalar.cpp CGGPUBuiltin.cpp CGLoopInfo.cpp + CGNonTrivialStruct.cpp CGObjC.cpp CGObjCGNU.cpp CGObjCMac.cpp Index: lib/CodeGen/CodeGenFunction.h =================================================================== --- lib/CodeGen/CodeGenFunction.h +++ lib/CodeGen/CodeGenFunction.h @@ -1536,6 +1536,7 @@ return false; case QualType::DK_cxx_destructor: case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: return getLangOpts().Exceptions; case QualType::DK_objc_strong_lifetime: return getLangOpts().Exceptions && @@ -3389,6 +3390,22 @@ CXXDtorType Type, const CXXRecordDecl *RD); + // These functions emit calls to the special functions of non-trivial C + // structs. + void defaultInitNonTrivialCStructVar(Address DstPtr, QualType QT, + bool IsVolatile); + void callCStructDefaultConstructor(Address DstPtr, QualType QT, + bool IsVolatile); + void callCStructCopyConstructor(Address DstPtr, Address SrcPtr, + QualType QT, bool IsVolatile); + void callCStructMoveConstructor(Address DstPtr, Address SrcPtr, + QualType QT, bool IsVolatile); + void callCStructCopyAssignmentOperator(Address DstPtr, Address SrcPtr, + QualType QT, bool IsVolatile); + void callCStructMoveAssignmentOperator(Address DstPtr, Address SrcPtr, + QualType QT, bool IsVolatile); + void callCStructDestructor(Address DstPtr, QualType QT, bool IsVolatile); + RValue EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method, const CGCallee &Callee, @@ -3563,6 +3580,7 @@ static Destroyer destroyARCStrongPrecise; static Destroyer destroyARCWeak; static Destroyer emitARCIntrinsicUse; + static Destroyer destroyNonTrivialCStruct; void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr); llvm::Value *EmitObjCAutoreleasePoolPush(); Index: lib/Lex/PPMacroExpansion.cpp =================================================================== --- lib/Lex/PPMacroExpansion.cpp +++ lib/Lex/PPMacroExpansion.cpp @@ -1151,6 +1151,7 @@ // Objective-C features .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE? .Case("objc_arc", LangOpts.ObjCAutoRefCount) + .Case("objc_arc_fields", true) .Case("objc_arc_weak", LangOpts.ObjCWeak) .Case("objc_default_synthesize_properties", LangOpts.ObjC2) .Case("objc_fixed_enum", LangOpts.ObjC2) Index: lib/Sema/JumpDiagnostics.cpp =================================================================== --- lib/Sema/JumpDiagnostics.cpp +++ lib/Sema/JumpDiagnostics.cpp @@ -154,6 +154,10 @@ return ScopePair(diag::note_protected_by_objc_weak_init, diag::note_exits_objc_weak); + case QualType::DK_nontrivial_c_struct: + return ScopePair(diag::note_protected_by_non_trivial_c_struct_init, + diag::note_exits_dtor); + case QualType::DK_cxx_destructor: OutDiag = diag::note_exits_dtor; break; @@ -254,6 +258,10 @@ Diags = ScopePair(diag::note_enters_block_captures_weak, diag::note_exits_block_captures_weak); break; + case QualType::DK_nontrivial_c_struct: + Diags = ScopePair(diag::note_enters_block_captures_non_trivial_c_struct, + diag::note_exits_block_captures_non_trivial_c_struct); + break; case QualType::DK_none: llvm_unreachable("non-lifetime captured variable"); } Index: lib/Sema/SemaDecl.cpp =================================================================== --- lib/Sema/SemaDecl.cpp +++ lib/Sema/SemaDecl.cpp @@ -11342,6 +11342,9 @@ } } + if (var->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) + getCurFunction()->setHasBranchProtectedScope(); + // Warn about externally-visible variables being defined without a // prior declaration. We only want to do this for global // declarations, but we also specifically need to avoid doing it for @@ -15232,6 +15235,7 @@ // Get the type for the field. const Type *FDTy = FD->getType().getTypePtr(); + Qualifiers QS = FD->getType().getQualifiers(); if (!FD->isAnonymousStructOrUnion()) { // Remember all fields written by the user. @@ -15373,7 +15377,9 @@ FD->setType(T); } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() && Record && !ObjCFieldLifetimeErrReported && - (!getLangOpts().CPlusPlus || Record->isUnion())) { + ((!getLangOpts().CPlusPlus && + QS.getObjCLifetime() == Qualifiers::OCL_Weak) || + Record->isUnion())) { // It's an error in ARC or Weak if a field has lifetime. // We don't want to report this in a system header, though, // so we just make the field unavailable. @@ -15409,6 +15415,19 @@ Record->setHasObjectMember(true); } } + + if (Record && !getLangOpts().CPlusPlus) { + QualType FT = FD->getType(); + if (FT.isNonTrivialToPrimitiveDefaultInitialize()) + Record->setNonTrivialToPrimitiveDefaultInitialize(); + if (FT.isNonTrivialToPrimitiveCopy()) + Record->setNonTrivialToPrimitiveCopy(); + if (FT.isNonTrivialToPrimitiveDestructiveMove()) + Record->setNonTrivialToPrimitiveDestructiveMove(); + if (FT.isDestructedType()) + Record->setNonTrivialToPrimitiveDestroy(); + } + if (Record && FD->getType().isVolatileQualified()) Record->setHasVolatileMember(true); // Keep track of the number of named members. Index: lib/Sema/SemaExpr.cpp =================================================================== --- lib/Sema/SemaExpr.cpp +++ lib/Sema/SemaExpr.cpp @@ -776,6 +776,9 @@ return VAK_Valid; } + if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) + return VAK_Invalid; + if (Ty.isCXX98PODType(Context)) return VAK_Valid; @@ -837,7 +840,10 @@ break; case VAK_Invalid: - if (Ty->isObjCObjectType()) + if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) + Diag(E->getLocStart(), + diag::err_cannot_pass_non_trivial_c_struct_to_vararg) << Ty << CT; + else if (Ty->isObjCObjectType()) DiagRuntimeBehavior( E->getLocStart(), nullptr, PDiag(diag::err_cannot_pass_objc_interface_to_vararg) Index: test/ARCMT/checking.m =================================================================== --- test/ARCMT/checking.m +++ test/ARCMT/checking.m @@ -116,7 +116,7 @@ } struct S { - A* a; // expected-error {{ARC forbids Objective-C objects in struct}} + A* a; }; @interface B Index: test/CodeGenObjC/nontrivial-c-struct-exception.m =================================================================== --- /dev/null +++ test/CodeGenObjC/nontrivial-c-struct-exception.m @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -fobjc-exceptions -fexceptions -fobjc-arc-exceptions -emit-llvm -o - %s | FileCheck %s + +// CHECK: %[[STRUCT_STRONG:.*]] = type { i32, i8* } + +typedef struct { + int i; + id f1; +} Strong; + +// CHECK: define void @testStrongException() +// CHECK: %[[AGG_TMP:.*]] = alloca %[[STRUCT_STRONG]], align 8 +// CHECK: %[[AGG_TMP1:.*]] = alloca %[[STRUCT_STRONG]], align 8 +// CHECK: %[[CALL:.*]] = call [2 x i64] @genStrong() +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONG]]* %[[AGG_TMP]] to [2 x i64]* +// CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V0]], align 8 +// CHECK: invoke [2 x i64] @genStrong() + +// CHECK: call void @calleeStrong([2 x i64] %{{.*}}, [2 x i64] %{{.*}}) +// CHECK-NEXT: ret void + +// CHECK: landingpad { i8*, i32 } +// CHECK: %[[V9:.*]] = bitcast %[[STRUCT_STRONG]]* %[[AGG_TMP]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V9]]) +// CHECK: br label + +// CHECK: resume + +Strong genStrong(void); +void calleeStrong(Strong, Strong); + +void testStrongException(void) { + calleeStrong(genStrong(), genStrong()); +} Index: test/CodeGenObjC/strong-in-c-struct.m =================================================================== --- /dev/null +++ test/CodeGenObjC/strong-in-c-struct.m @@ -0,0 +1,489 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -emit-llvm -o - %s | FileCheck %s + +typedef void (^BlockTy)(void); + +typedef struct { + int a[4]; +} Trivial; + +typedef struct { + Trivial f0; + id f1; +} Strong; + +typedef struct { + int i; + id f1; +} StrongSmall; + +typedef struct { + Strong f0; + id f1; + double d; +} StrongOuter; + +typedef struct { + int f0; + volatile id f1; +} StrongVolatile; + +typedef struct { + BlockTy f0; +} StrongBlock; + +typedef struct { + int i; + id f0[2][2]; +} IDArray; + +typedef struct { + double d; + Strong f0[2][2]; +} StructArray; + +typedef struct { + id f0; + int i : 9; +} Bitfield0; + +typedef struct { + char c; + int i0 : 2; + int i1 : 4; + id f0; + int i2 : 31; + int i3 : 1; + id f1; + int : 0; + int a[3]; + id f2; + double d; + int i4 : 1; +} Bitfield1; + +StrongSmall getStrongSmall(void); +StrongOuter getStrongOuter(void); +void calleeStrongSmall(StrongSmall); +void func(Strong *); + +// CHECK: define void @test_constructor_destructor_StrongOuter() +// CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER:.*]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: call void @__default_constructor_8_s16_s24(i8** %[[V0]]) +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: call void @__destructor_8_s16_s24(i8** %[[V1]]) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_s16_s24(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: call void @__default_constructor_8_s16(i8** %[[V0]]) +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 24 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: %[[V4:.*]] = bitcast i8** %[[V3]] to i8* +// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V4]], i8 0, i64 8, i1 false) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_s16(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 16 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: %[[V4:.*]] = bitcast i8** %[[V3]] to i8* +// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V4]], i8 0, i64 8, i1 false) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__destructor_8_s16_s24(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: call void @__destructor_8_s16(i8** %[[V0]]) +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 24 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: call void @objc_storeStrong(i8** %[[V3]], i8* null) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__destructor_8_s16(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 16 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: call void @objc_storeStrong(i8** %[[V3]], i8* null) +// CHECK: ret void + +void test_constructor_destructor_StrongOuter(void) { + StrongOuter t; +} + +// CHECK: define void @test_copy_constructor_StrongOuter(%[[STRUCT_STRONGOUTER:.*]]* %[[S:.*]]) +// CHECK: %[[S_ADDR:.*]] = alloca %[[STRUCT_STRONGOUTER]]*, align 8 +// CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER]], align 8 +// CHECK: store %[[STRUCT_STRONGOUTER]]* %[[S]], %[[STRUCT_STRONGOUTER]]** %[[S_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load %[[STRUCT_STRONGOUTER]]*, %[[STRUCT_STRONGOUTER]]** %[[S_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[V0]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w16_s16_s24_t32w8(i8** %[[V1]], i8** %[[V2]]) +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: call void @__destructor_8_s16_s24(i8** %[[V3]]) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: call void @__copy_constructor_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: %[[V9:.*]] = call i8* @objc_retain(i8* %[[V8]]) +// CHECK: store i8* %[[V9]], i8** %[[V4]], align 8 +// CHECK: %[[V10:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V11:.*]] = getelementptr inbounds i8, i8* %[[V10]], i64 32 +// CHECK: %[[V12:.*]] = bitcast i8* %[[V11]] to i8** +// CHECK: %[[V13:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V14:.*]] = getelementptr inbounds i8, i8* %[[V13]], i64 32 +// CHECK: %[[V15:.*]] = bitcast i8* %[[V14]] to i8** +// CHECK: %[[V16:.*]] = bitcast i8** %[[V12]] to i64* +// CHECK: %[[V17:.*]] = bitcast i8** %[[V15]] to i64* +// CHECK: %[[V18:.*]] = load i64, i64* %[[V17]], align 8 +// CHECK: store i64 %[[V18]], i64* %[[V16]], align 8 +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w16_s16(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[V2]], i8* align 8 %[[V3]], i64 16, i1 false) +// CHECK: %[[V4:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 16 +// CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** +// CHECK: %[[V7:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 16 +// CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** +// CHECK: %[[V10:.*]] = load i8*, i8** %[[V9]], align 8 +// CHECK: %[[V11:.*]] = call i8* @objc_retain(i8* %[[V10]]) +// CHECK: store i8* %[[V11]], i8** %[[V6]], align 8 +// CHECK: ret void + +void test_copy_constructor_StrongOuter(StrongOuter *s) { + StrongOuter t = *s; +} + +/// CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: call void @objc_storeStrong(i8** %[[V4]], i8* %[[V8]]) + +void test_copy_assignment_StrongOuter(StrongOuter *d, StrongOuter *s) { + *d = *s; +} + +// CHECK: define internal void @__Block_byref_object_copy_(i8*, i8*) +// CHECK: call void @__move_constructor_8_8_t0w16_s16_s24_t32w8( + +// CHECK: define linkonce_odr hidden void @__move_constructor_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: call void @__move_constructor_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: store i8* null, i8** %[[V7]], align 8 +// CHECK: store i8* %[[V8]], i8** %[[V4]], align 8 + +// CHECK: define internal void @__Block_byref_object_dispose_(i8*) +// CHECK: call void @__destructor_8_s16_s24( + +void test_move_constructor_StrongOuter(void) { + __block StrongOuter t; + BlockTy b = ^{ (void)t; }; +} + +// CHECK: define linkonce_odr hidden void @__move_assignment_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: call void @__move_assignment_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: store i8* null, i8** %[[V7]], align 8 +// CHECK: %[[V9:.*]] = load i8*, i8** %[[V4]], align 8 +// CHECK: store i8* %[[V8]], i8** %[[V4]], align 8 +// CHECK: call void @objc_release(i8* %[[V9]]) + +void test_move_assignment_StrongOuter(StrongOuter *p) { + *p = getStrongOuter(); +} + +// CHECK: define void @test_parameter_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONG:.*]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONG]]* %[[A]] to [2 x i64]* +// CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONG]]* %[[A]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V1]]) +// CHECK: ret void + +void test_parameter_StrongSmall(StrongSmall a) { +} + +// CHECK: define void @test_argument_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL:.*]], align 8 +// CHECK: %[[TEMP_LVALUE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* +// CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TEMP_LVALUE]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** %[[V2]]) +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TEMP_LVALUE]] to [2 x i64]* +// CHECK: %[[V4:.*]] = load [2 x i64], [2 x i64]* %[[V3]], align 8 +// CHECK: call void @calleeStrongSmall([2 x i64] %[[V4]]) +// CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V5]]) +// CHECK: ret void + +void test_argument_StrongSmall(StrongSmall a) { + calleeStrongSmall(a); +} + +// CHECK: define [2 x i64] @test_return_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_STRONGSMALL:.*]], align 8 +// CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* +// CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[RETVAL]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** %[[V2]]) +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V3]]) +// CHECK: %[[V4:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[RETVAL]] to [2 x i64]* +// CHECK: %[[V5:.*]] = load [2 x i64], [2 x i64]* %[[V4]], align 8 +// CHECK: ret [2 x i64] %[[V5]] + +StrongSmall test_return_StrongSmall(StrongSmall a) { + return a; +} + +// CHECK: define void @test_destructor_ignored_result() +// CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL:.*]], align 8 +// CHECK: %[[CALL:.*]] = call [2 x i64] @getStrongSmall() +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to [2 x i64]* +// CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V1]]) +// CHECK: ret void + +void test_destructor_ignored_result(void) { + getStrongSmall(); +} + +// CHECK: define void @test_copy_constructor_StrongBlock( +// CHECK: call void @__copy_constructor_8_8_sb0( +// CHECK: call void @__destructor_8_sb0( +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_sb0(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 +// CHECK: %[[V3:.*]] = call i8* @objc_retainBlock(i8* %[[V2]]) +// CHECK: store i8* %[[V3]], i8** %[[V0]], align 8 +// CHECK: ret void + +void test_copy_constructor_StrongBlock(StrongBlock *s) { + StrongBlock t = *s; +} + +// CHECK: define void @test_copy_assignment_StrongBlock(%[[STRUCT_STRONGBLOCK:.*]]* %[[D:.*]], %[[STRUCT_STRONGBLOCK]]* %[[S:.*]]) +// CHECK: call void @__copy_assignment_8_8_sb0( + +// CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_sb0(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 +// CHECK: %[[V3:.*]] = call i8* @objc_retainBlock(i8* %[[V2]]) +// CHECK: %[[V4:.*]] = load i8*, i8** %[[V0]], align 8 +// CHECK: store i8* %[[V3]], i8** %[[V0]], align 8 +// CHECK: call void @objc_release(i8* %[[V4]]) +// CHECK: ret void + +void test_copy_assignment_StrongBlock(StrongBlock *d, StrongBlock *s) { + *d = *s; +} + +// CHECK: define void @test_copy_constructor_StrongVolatile0( +// CHECK: call void @__copy_constructor_8_8_t0w4_sv8( +// CHECK: call void @__destructor_8_sv8( + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w4_sv8( +// CHECK: %[[V8:.*]] = load volatile i8*, i8** %{{.*}}, align 8 +// CHECK: %[[V9:.*]] = call i8* @objc_retain(i8* %[[V8]]) +// CHECK: store volatile i8* %[[V9]], i8** %{{.*}}, align 8 + +void test_copy_constructor_StrongVolatile0(StrongVolatile *s) { + StrongVolatile t = *s; +} + +// CHECK: define void @test_copy_constructor_StrongVolatile1( +// CHECK: call void @__copy_constructor_8_8_tv0w16_sv16( + +void test_copy_constructor_StrongVolatile1(Strong *s) { + volatile Strong t = *s; +} + +// CHECK: define void @test_block_capture_Strong() +// CHECK: call void @__default_constructor_8_s16( +// CHECK: call void @__copy_constructor_8_8_t0w16_s16( +// CHECK: call void @__destructor_8_s16( +// CHECK: call void @__destructor_8_s16( +// CHECK: ret void + +// CHECK: define internal void @__copy_helper_block_.1(i8*, i8*) +// CHECK: call void @__copy_constructor_8_8_t0w16_s16( +// CHECK: ret void + +// CHECK: define internal void @__destroy_helper_block_.2( +// CHECK: call void @__destructor_8_s16( +// CHECK: ret void + +void test_block_capture_Strong(void) { + Strong t; + BlockTy b = ^(){ (void)t; }; +} + +// CHECK: define void @test_variable_length_array(i32 %[[N:.*]]) +// CHECK: %[[N_ADDR:.*]] = alloca i32, align 4 +// CHECK: store i32 %[[N]], i32* %[[N_ADDR]], align 4 +// CHECK: %[[V0:.*]] = load i32, i32* %[[N_ADDR]], align 4 +// CHECK: %[[V1:.*]] = zext i32 %[[V0]] to i64 +// CHECK: %[[VLA:.*]] = alloca %[[STRUCT_STRONG:.*]], i64 %[[V1]], align 8 +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONG]]* %[[VLA]] to i8** +// CHECK: %[[V4:.*]] = mul nuw i64 24, %[[V1]] +// CHECK: %[[V5:.*]] = bitcast i8** %[[V3]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 %[[V4]] +// CHECK: %[[DSTARRAY_END:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: br label + +// CHECK: %[[DSTADDR_CUR:.*]] = phi i8** [ %[[V3]], {{.*}} ], [ %[[V7:.*]], {{.*}} ] +// CHECK: %[[DONE:.*]] = icmp eq i8** %[[DSTADDR_CUR]], %[[DSTARRAY_END]] +// CHECK: br i1 %[[DONE]], label + +// CHECK: call void @__default_constructor_8_s16(i8** %[[DSTADDR_CUR]]) +// CHECK: %[[V8:.*]] = bitcast i8** %[[DSTADDR_CUR]] to i8* +// CHECK: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 24 +// CHECK: %[[V7]] = bitcast i8* %[[V9]] to i8** +// CHECK: br label + +// CHECK: call void @func(%[[STRUCT_STRONG]]* %[[VLA]]) +// CHECK: %[[V10:.*]] = getelementptr inbounds %[[STRUCT_STRONG]], %[[STRUCT_STRONG]]* %[[VLA]], i64 %[[V1]] +// CHECK: %[[ARRAYDESTROY_ISEMPTY:.*]] = icmp eq %[[STRUCT_STRONG]]* %[[VLA]], %[[V10]] +// CHECK: br i1 %[[ARRAYDESTROY_ISEMPTY]], label + +// CHECK: %[[ARRAYDESTROY_ELEMENTPAST:.*]] = phi %[[STRUCT_STRONG]]* [ %[[V10]], {{.*}} ], [ %[[ARRAYDESTROY_ELEMENT:.*]], {{.*}} ] +// CHECK: %[[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds %[[STRUCT_STRONG]], %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENTPAST]], i64 -1 +// CHECK: %[[V11:.*]] = bitcast %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENT]] to i8** +// CHECK: call void @__destructor_8_s16(i8** %[[V11]]) +// CHECK: %[[ARRAYDESTROY_DONE:.*]] = icmp eq %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENT]], %[[VLA]] +// CHECK: br i1 %[[ARRAYDESTROY_DONE]], label + +// CHECK: ret void + +void test_variable_length_array(int n) { + Strong a[n]; + func(a); +} + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s8n4_s8_AE( +// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %{{.*}}, i8 0, i64 32, i1 false) +void test_constructor_destructor_IDArray(void) { + IDArray t; +} + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s24n4_s24_AE( +void test_constructor_destructor_StructArray(void) { + StructArray t; +} + +// Check that IRGen copies the 9-bit bitfield emitting i16 load and store. + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_t8w2( +// CHECK: %[[V4:.*]] = bitcast i8** %{{.*}} to i8* +// CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 8 +// CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** +// CHECK: %[[V7:.*]] = bitcast i8** %{{.*}} to i8* +// CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 8 +// CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** +// CHECK: %[[V10:.*]] = bitcast i8** %[[V6]] to i16* +// CHECK: %[[V11:.*]] = bitcast i8** %[[V9]] to i16* +// CHECK: %[[V12:.*]] = load i16, i16* %[[V11]], align 8 +// CHECK: store i16 %[[V12]], i16* %[[V10]], align 8 +// CHECK: ret void + +void test_copy_constructor_Bitfield0(Bitfield0 *a) { + Bitfield0 t = *a; +} + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w2_s8_t16w4_s24_t32w12_s48_t56w9( +// CHECK: %[[V4:.*]] = load i16, i16* %{{.*}}, align 8 +// CHECK: store i16 %[[V4]], i16* %{{.*}}, align 8 +// CHECK: %[[V21:.*]] = load i32, i32* %{{.*}}, align 8 +// CHECK: store i32 %[[V21]], i32* %{{.*}}, align 8 +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %{{.*}}, i8* align 8 %{{.*}}, i64 12, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %{{.*}}, i8* align 8 %{{.*}}, i64 9, i1 false) + +void test_copy_constructor_Bitfield1(Bitfield1 *a) { + Bitfield1 t = *a; +} Index: test/Lexer/has_feature_objc_arc.m =================================================================== --- test/Lexer/has_feature_objc_arc.m +++ test/Lexer/has_feature_objc_arc.m @@ -13,8 +13,16 @@ void no_objc_arc_weak_feature(); #endif +#if __has_feature(objc_arc_fields) +void has_objc_arc_fields(); +#else +void no_objc_arc_fields(); +#endif + // CHECK-ARC: void has_objc_arc_feature(); // CHECK-ARC: void has_objc_arc_weak_feature(); +// CHECK-ARC: void has_objc_arc_fields(); // CHECK-ARCLITE: void has_objc_arc_feature(); // CHECK-ARCLITE: void no_objc_arc_weak_feature(); +// CHECK-ARCLITE: void has_objc_arc_fields(); Index: test/SemaObjC/arc-decls.m =================================================================== --- test/SemaObjC/arc-decls.m +++ test/SemaObjC/arc-decls.m @@ -3,7 +3,7 @@ // rdar://8843524 struct A { - id x; // expected-error {{ARC forbids Objective-C objects in struct}} + id x; }; union u { @@ -13,7 +13,7 @@ @interface I { struct A a; struct B { - id y[10][20]; // expected-error {{ARC forbids Objective-C objects in struct}} + id y[10][20]; id z; } b; @@ -23,7 +23,7 @@ // rdar://10260525 struct r10260525 { - id (^block) (); // expected-error {{ARC forbids blocks in struct}} + id (^block) (); }; struct S { Index: test/SemaObjC/arc-system-header.m =================================================================== --- test/SemaObjC/arc-system-header.m +++ test/SemaObjC/arc-system-header.m @@ -23,8 +23,7 @@ } void test5(struct Test5 *p) { - p->field = 0; // expected-error {{'field' is unavailable in ARC}} - // expected-note@arc-system-header.h:25 {{field has non-trivial ownership qualification}} + p->field = 0; } id test6() { @@ -49,8 +48,7 @@ extern void doSomething(Test9 arg); void test9() { - Test9 foo2 = {0, 0}; // expected-error {{'field' is unavailable in ARC}} - // expected-note@arc-system-header.h:56 {{field has non-trivial ownership qualification}} + Test9 foo2 = {0, 0}; doSomething(foo2); } #endif Index: test/SemaObjC/strong-in-c-struct.m =================================================================== --- /dev/null +++ test/SemaObjC/strong-in-c-struct.m @@ -0,0 +1,56 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -fsyntax-only -verify %s + +typedef struct { + id a; +} Strong; + +void callee_variadic(const char *, ...); + +void test_variadic(void) { + Strong t; + callee_variadic("s", t); // expected-error {{cannot pass non-trivial C object of type 'Strong' by value to variadic function}} +} + +void test_jump0(int cond) { + switch (cond) { + case 0: + ; + Strong x; // expected-note {{jump bypasses initialization of variable of non-trivial C struct type}} + break; + case 1: // expected-error {{cannot jump from switch statement to this case label}} + x.a = 0; + break; + } +} + +void test_jump1(void) { + static void *ips[] = { &&L0 }; +L0: // expected-note {{possible target of indirect goto}} + ; + Strong x; // expected-note {{jump exits scope of variable with non-trivial destructor}} + goto *ips; // expected-error {{cannot jump}} +} + +typedef void (^BlockTy)(void); +void func(BlockTy); +void func2(Strong); + +void test_block_scope0(int cond) { + Strong x; // expected-note {{jump enters lifetime of block which captures a C struct that is non-trivial to destroy}} + switch (cond) { + case 0: + func(^{ func2(x); }); + break; + default: // expected-error {{cannot jump from switch statement to this case label}} + break; + } +} + +void test_block_scope1(void) { + static void *ips[] = { &&L0 }; +L0: // expected-note {{possible target of indirect goto}} + ; + Strong x; // expected-note {{jump exits scope of variable with non-trivial destructor}} expected-note {{jump exits lifetime of block which captures a C struct that is non-trivial to destroy}} + func(^{ func2(x); }); + goto *ips; // expected-error {{cannot jump}} +}