Index: cfe/trunk/docs/LanguageExtensions.rst =================================================================== --- cfe/trunk/docs/LanguageExtensions.rst +++ cfe/trunk/docs/LanguageExtensions.rst @@ -1191,12 +1191,14 @@ Clang provides support for :doc:`automated reference counting ` in Objective-C, which eliminates the need -for manual ``retain``/``release``/``autorelease`` message sends. There are two +for manual ``retain``/``release``/``autorelease`` message sends. There are three feature macros associated with automatic reference counting: ``__has_feature(objc_arc)`` indicates the availability of automated reference counting in general, while ``__has_feature(objc_arc_weak)`` indicates that automated reference counting also includes support for ``__weak`` pointers to -Objective-C objects. +Objective-C objects. ``__has_feature(objc_arc_fields)`` indicates that C structs +are allowed to have fields that are pointers to Objective-C objects managed by +automatic reference counting. .. _objc-fixed-enum: Index: cfe/trunk/include/clang/AST/Decl.h =================================================================== --- cfe/trunk/include/clang/AST/Decl.h +++ cfe/trunk/include/clang/AST/Decl.h @@ -3533,6 +3533,11 @@ /// when needed. mutable bool LoadedFieldsFromExternalStorage : 1; + /// Basic properties of non-trivial C structs. + bool NonTrivialToPrimitiveDefaultInitialize : 1; + bool NonTrivialToPrimitiveCopy : 1; + bool NonTrivialToPrimitiveDestroy : 1; + protected: RecordDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, @@ -3591,6 +3596,31 @@ LoadedFieldsFromExternalStorage = val; } + /// Functions to query basic properties of non-trivial C structs. + bool isNonTrivialToPrimitiveDefaultInitialize() const { + return NonTrivialToPrimitiveDefaultInitialize; + } + + void setNonTrivialToPrimitiveDefaultInitialize() { + NonTrivialToPrimitiveDefaultInitialize = true; + } + + bool isNonTrivialToPrimitiveCopy() const { + return NonTrivialToPrimitiveCopy; + } + + void setNonTrivialToPrimitiveCopy() { + NonTrivialToPrimitiveCopy = true; + } + + bool isNonTrivialToPrimitiveDestroy() const { + return NonTrivialToPrimitiveDestroy; + } + + void setNonTrivialToPrimitiveDestroy() { + NonTrivialToPrimitiveDestroy = true; + } + /// \brief Determines whether this declaration represents the /// injected class name. /// Index: cfe/trunk/include/clang/AST/Type.h =================================================================== --- cfe/trunk/include/clang/AST/Type.h +++ cfe/trunk/include/clang/AST/Type.h @@ -1087,11 +1087,71 @@ // true when Type is objc's weak and weak is enabled but ARC isn't. bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const; + enum PrimitiveDefaultInitializeKind { + /// The type does not fall into any of the following categories. Note that + /// this case is zero-valued so that values of this enum can be used as a + /// boolean condition for non-triviality. + PDIK_Trivial, + + /// The type is an Objective-C retainable pointer type that is qualified + /// with the ARC __strong qualifier. + PDIK_ARCStrong, + + /// The type is a struct containing a field whose type is not PCK_Trivial. + PDIK_Struct + }; + + /// Functions to query basic properties of non-trivial C struct types. + + /// Check if this is a non-trivial type that would cause a C struct + /// transitively containing this type to be non-trivial to default initialize + /// and return the kind. + PrimitiveDefaultInitializeKind + isNonTrivialToPrimitiveDefaultInitialize() const; + + enum PrimitiveCopyKind { + /// The type does not fall into any of the following categories. Note that + /// this case is zero-valued so that values of this enum can be used as a + /// boolean condition for non-triviality. + PCK_Trivial, + + /// The type would be trivial except that it is volatile-qualified. Types + /// that fall into one of the other non-trivial cases may additionally be + /// volatile-qualified. + PCK_VolatileTrivial, + + /// The type is an Objective-C retainable pointer type that is qualified + /// with the ARC __strong qualifier. + PCK_ARCStrong, + + /// The type is a struct containing a field whose type is neither + /// PCK_Trivial nor PCK_VolatileTrivial. + /// Note that a C++ struct type does not necessarily match this; C++ copying + /// semantics are too complex to express here, in part because they depend + /// on the exact constructor or assignment operator that is chosen by + /// overload resolution to do the copy. + PCK_Struct + }; + + /// Check if this is a non-trivial type that would cause a C struct + /// transitively containing this type to be non-trivial to copy and return the + /// kind. + PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const; + + /// Check if this is a non-trivial type that would cause a C struct + /// transitively containing this type to be non-trivial to destructively + /// move and return the kind. Destructive move in this context is a C++-style + /// move in which the source object is placed in a valid but unspecified state + /// after it is moved, as opposed to a truly destructive move in which the + /// source object is placed in an uninitialized state. + PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const; + enum DestructionKind { DK_none, DK_cxx_destructor, DK_objc_strong_lifetime, - DK_objc_weak_lifetime + DK_objc_weak_lifetime, + DK_nontrivial_c_struct }; /// Returns a nonzero value if objects of this type require Index: cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td =================================================================== --- cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td +++ cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td @@ -5119,12 +5119,17 @@ "jump bypasses initialization of __strong variable">; def note_protected_by_objc_weak_init : Note< "jump bypasses initialization of __weak variable">; +def note_protected_by_non_trivial_c_struct_init : Note< + "jump bypasses initialization of variable of non-trivial C struct type">; def note_enters_block_captures_cxx_obj : Note< "jump enters lifetime of block which captures a destructible C++ object">; def note_enters_block_captures_strong : Note< "jump enters lifetime of block which strongly captures a variable">; def note_enters_block_captures_weak : Note< "jump enters lifetime of block which weakly captures a variable">; +def note_enters_block_captures_non_trivial_c_struct : Note< + "jump enters lifetime of block which captures a C struct that is non-trivial " + "to destroy">; def note_exits_cleanup : Note< "jump exits scope of variable with __attribute__((cleanup))">; @@ -5165,6 +5170,9 @@ "jump exits lifetime of block which strongly captures a variable">; def note_exits_block_captures_weak : Note< "jump exits lifetime of block which weakly captures a variable">; +def note_exits_block_captures_non_trivial_c_struct : Note< + "jump exits lifetime of block which captures a C struct that is non-trivial " + "to destroy">; def err_func_returning_qualified_void : ExtWarn< "function cannot return qualified void type %0">, @@ -7183,6 +7191,10 @@ "cannot pass object with interface type %1 by value to variadic " "%select{function|block|method|constructor}2; expected type from format " "string was %3">; +def err_cannot_pass_non_trivial_c_struct_to_vararg : Error< + "cannot pass non-trivial C object of type %0 by value to variadic " + "%select{function|block|method|constructor}1">; + def err_cannot_pass_objc_interface_to_vararg : Error< "cannot pass object with interface type %0 by value through variadic " Index: cfe/trunk/lib/AST/ASTContext.cpp =================================================================== --- cfe/trunk/lib/AST/ASTContext.cpp +++ cfe/trunk/lib/AST/ASTContext.cpp @@ -2642,7 +2642,8 @@ bool ASTContext::isParamDestroyedInCallee(QualType T) const { return getTargetInfo().getCXXABI().areArgsDestroyedLeftToRightInCallee() || - T.hasTrivialABIOverride(); + T.hasTrivialABIOverride() || + T.isDestructedType() == QualType::DK_nontrivial_c_struct; } /// getComplexType - Return the uniqued reference to the type for a complex @@ -5771,6 +5772,11 @@ return true; } + // The block needs copy/destroy helpers if Ty is non-trivial to destructively + // move or destroy. + if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) + return true; + if (!Ty->isObjCRetainableType()) return false; Qualifiers qs = Ty.getQualifiers(); @@ -5784,13 +5790,12 @@ case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: return false; - - // Tell the runtime that this is ARC __weak, called by the - // byref routines. + + // These cases should have been taken care of when checking the type's + // non-triviality. case Qualifiers::OCL_Weak: - // ARC __strong __block variables need to be retained. case Qualifiers::OCL_Strong: - return true; + llvm_unreachable("impossible"); } llvm_unreachable("fell out of lifetime switch!"); } Index: cfe/trunk/lib/AST/Decl.cpp =================================================================== --- cfe/trunk/lib/AST/Decl.cpp +++ cfe/trunk/lib/AST/Decl.cpp @@ -3929,7 +3929,9 @@ : TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc), HasFlexibleArrayMember(false), AnonymousStructOrUnion(false), HasObjectMember(false), HasVolatileMember(false), - LoadedFieldsFromExternalStorage(false) { + LoadedFieldsFromExternalStorage(false), + NonTrivialToPrimitiveDefaultInitialize(false), + NonTrivialToPrimitiveCopy(false), NonTrivialToPrimitiveDestroy(false) { assert(classof(static_cast(this)) && "Invalid Kind!"); } Index: cfe/trunk/lib/AST/Type.cpp =================================================================== --- cfe/trunk/lib/AST/Type.cpp +++ cfe/trunk/lib/AST/Type.cpp @@ -2208,6 +2208,38 @@ getObjCLifetime() != Qualifiers::OCL_Weak; } +QualType::PrimitiveDefaultInitializeKind +QualType::isNonTrivialToPrimitiveDefaultInitialize() const { + if (const auto *RT = + getTypePtr()->getBaseElementTypeUnsafe()->getAs()) + if (RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) + return PDIK_Struct; + + Qualifiers::ObjCLifetime Lifetime = getQualifiers().getObjCLifetime(); + if (Lifetime == Qualifiers::OCL_Strong) + return PDIK_ARCStrong; + + return PDIK_Trivial; +} + +QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveCopy() const { + if (const auto *RT = + getTypePtr()->getBaseElementTypeUnsafe()->getAs()) + if (RT->getDecl()->isNonTrivialToPrimitiveCopy()) + return PCK_Struct; + + Qualifiers Qs = getQualifiers(); + if (Qs.getObjCLifetime() == Qualifiers::OCL_Strong) + return PCK_ARCStrong; + + return Qs.hasVolatile() ? PCK_VolatileTrivial : PCK_Trivial; +} + +QualType::PrimitiveCopyKind +QualType::isNonTrivialToPrimitiveDestructiveMove() const { + return isNonTrivialToPrimitiveCopy(); +} + bool Type::isLiteralType(const ASTContext &Ctx) const { if (isDependentType()) return false; @@ -3896,12 +3928,20 @@ return DK_objc_weak_lifetime; } - /// Currently, the only destruction kind we recognize is C++ objects - /// with non-trivial destructors. - const CXXRecordDecl *record = - type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); - if (record && record->hasDefinition() && !record->hasTrivialDestructor()) - return DK_cxx_destructor; + if (const auto *RT = + type->getBaseElementTypeUnsafe()->getAs()) { + const RecordDecl *RD = RT->getDecl(); + if (const auto *CXXRD = dyn_cast(RD)) { + /// Check if this is a C++ object with a non-trivial destructor. + if (CXXRD->hasDefinition() && !CXXRD->hasTrivialDestructor()) + return DK_cxx_destructor; + } else { + /// Check if this is a C struct that is non-trivial to destroy or an array + /// that contains such a struct. + if (RD->isNonTrivialToPrimitiveDestroy()) + return DK_nontrivial_c_struct; + } + } return DK_none; } Index: cfe/trunk/lib/CodeGen/CGBlocks.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGBlocks.cpp +++ cfe/trunk/lib/CodeGen/CGBlocks.cpp @@ -477,6 +477,14 @@ info.NeedsCopyDispose = true; info.HasCXXObject = true; + // So do C structs that require non-trivial copy construction or + // destruction. + } else if (variable->getType().isNonTrivialToPrimitiveCopy() == + QualType::PCK_Struct || + variable->getType().isDestructedType() == + QualType::DK_nontrivial_c_struct) { + info.NeedsCopyDispose = true; + // And so do types with destructors. } else if (CGM.getLangOpts().CPlusPlus) { if (const CXXRecordDecl *record = @@ -1511,6 +1519,7 @@ CXXRecord, // Copy or destroy ARCWeak, ARCStrong, + NonTrivialCStruct, BlockObject, // Assign or release None }; @@ -1546,39 +1555,46 @@ Flags |= BLOCK_FIELD_IS_WEAK; return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags); } - if (!T->isObjCRetainableType()) - // For all other types, the memcpy is fine. - return std::make_pair(BlockCaptureEntityKind::None, Flags); Flags = BLOCK_FIELD_IS_OBJECT; bool isBlockPointer = T->isBlockPointerType(); if (isBlockPointer) Flags = BLOCK_FIELD_IS_BLOCK; - // Special rules for ARC captures: - Qualifiers QS = T.getQualifiers(); - - // We need to register __weak direct captures with the runtime. - if (QS.getObjCLifetime() == Qualifiers::OCL_Weak) - return std::make_pair(BlockCaptureEntityKind::ARCWeak, Flags); - - // We need to retain the copied value for __strong direct captures. - if (QS.getObjCLifetime() == Qualifiers::OCL_Strong) { - // If it's a block pointer, we have to copy the block and - // assign that to the destination pointer, so we might as - // well use _Block_object_assign. Otherwise we can avoid that. + switch (T.isNonTrivialToPrimitiveCopy()) { + case QualType::PCK_Struct: + return std::make_pair(BlockCaptureEntityKind::NonTrivialCStruct, + BlockFieldFlags()); + case QualType::PCK_ARCStrong: + // We need to retain the copied value for __strong direct captures. + // If it's a block pointer, we have to copy the block and assign that to + // the destination pointer, so we might as well use _Block_object_assign. + // Otherwise we can avoid that. return std::make_pair(!isBlockPointer ? BlockCaptureEntityKind::ARCStrong : BlockCaptureEntityKind::BlockObject, Flags); - } + case QualType::PCK_Trivial: + case QualType::PCK_VolatileTrivial: { + if (!T->isObjCRetainableType()) + // For all other types, the memcpy is fine. + return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags()); - // Non-ARC captures of retainable pointers are strong and - // therefore require a call to _Block_object_assign. - if (!QS.getObjCLifetime() && !LangOpts.ObjCAutoRefCount) - return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags); + // Special rules for ARC captures: + Qualifiers QS = T.getQualifiers(); + + // We need to register __weak direct captures with the runtime. + if (QS.getObjCLifetime() == Qualifiers::OCL_Weak) + return std::make_pair(BlockCaptureEntityKind::ARCWeak, Flags); + + // Non-ARC captures of retainable pointers are strong and + // therefore require a call to _Block_object_assign. + if (!QS.getObjCLifetime() && !LangOpts.ObjCAutoRefCount) + return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags); - // Otherwise the memcpy is fine. - return std::make_pair(BlockCaptureEntityKind::None, Flags); + // Otherwise the memcpy is fine. + return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags()); + } + } } /// Find the set of block captures that need to be explicitly copied or destroy. @@ -1675,6 +1691,13 @@ EmitSynthesizedCXXCopyCtor(dstField, srcField, CI.getCopyExpr()); } else if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCWeak) { EmitARCCopyWeak(dstField, srcField); + // If this is a C struct that requires non-trivial copy construction, emit a + // call to its copy constructor. + } else if (CopiedCapture.Kind == + BlockCaptureEntityKind::NonTrivialCStruct) { + QualType varType = CI.getVariable()->getType(); + callCStructCopyConstructor(MakeAddrLValue(dstField, varType), + MakeAddrLValue(srcField, varType)); } else { llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src"); if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCStrong) { @@ -1730,50 +1753,50 @@ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); } +static BlockFieldFlags +getBlockFieldFlagsForObjCObjectPointer(const BlockDecl::Capture &CI, + QualType T) { + BlockFieldFlags Flags = BLOCK_FIELD_IS_OBJECT; + if (T->isBlockPointerType()) + Flags = BLOCK_FIELD_IS_BLOCK; + return Flags; +} + static std::pair computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T, const LangOptions &LangOpts) { - BlockFieldFlags Flags; if (CI.isByRef()) { - Flags = BLOCK_FIELD_IS_BYREF; + BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF; if (T.isObjCGCWeak()) Flags |= BLOCK_FIELD_IS_WEAK; return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags); } - if (const CXXRecordDecl *Record = T->getAsCXXRecordDecl()) { - if (Record->hasTrivialDestructor()) - return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags()); + switch (T.isDestructedType()) { + case QualType::DK_cxx_destructor: return std::make_pair(BlockCaptureEntityKind::CXXRecord, BlockFieldFlags()); + case QualType::DK_objc_strong_lifetime: + // Use objc_storeStrong for __strong direct captures; the + // dynamic tools really like it when we do this. + return std::make_pair(BlockCaptureEntityKind::ARCStrong, + getBlockFieldFlagsForObjCObjectPointer(CI, T)); + case QualType::DK_objc_weak_lifetime: + // Support __weak direct captures. + return std::make_pair(BlockCaptureEntityKind::ARCWeak, + getBlockFieldFlagsForObjCObjectPointer(CI, T)); + case QualType::DK_nontrivial_c_struct: + return std::make_pair(BlockCaptureEntityKind::NonTrivialCStruct, + BlockFieldFlags()); + case QualType::DK_none: { + // Non-ARC captures are strong, and we need to use _Block_object_dispose. + if (T->isObjCRetainableType() && !T.getQualifiers().hasObjCLifetime() && + !LangOpts.ObjCAutoRefCount) + return std::make_pair(BlockCaptureEntityKind::BlockObject, + getBlockFieldFlagsForObjCObjectPointer(CI, T)); + // Otherwise, we have nothing to do. + return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags()); + } } - - // Other types don't need to be destroy explicitly. - if (!T->isObjCRetainableType()) - return std::make_pair(BlockCaptureEntityKind::None, Flags); - - Flags = BLOCK_FIELD_IS_OBJECT; - if (T->isBlockPointerType()) - Flags = BLOCK_FIELD_IS_BLOCK; - - // Special rules for ARC captures. - Qualifiers QS = T.getQualifiers(); - - // Use objc_storeStrong for __strong direct captures; the - // dynamic tools really like it when we do this. - if (QS.getObjCLifetime() == Qualifiers::OCL_Strong) - return std::make_pair(BlockCaptureEntityKind::ARCStrong, Flags); - - // Support __weak direct captures. - if (QS.getObjCLifetime() == Qualifiers::OCL_Weak) - return std::make_pair(BlockCaptureEntityKind::ARCWeak, Flags); - - // Non-ARC captures are strong, and we need to use - // _Block_object_dispose. - if (!QS.hasObjCLifetime() && !LangOpts.ObjCAutoRefCount) - return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags); - - // Otherwise, we have nothing to do. - return std::make_pair(BlockCaptureEntityKind::None, Flags); } /// Generate the destroy-helper function for a block closure object: @@ -1851,6 +1874,13 @@ } else if (DestroyedCapture.Kind == BlockCaptureEntityKind::ARCStrong) { EmitARCDestroyStrong(srcField, ARCImpreciseLifetime); + // If this is a C struct that requires non-trivial destruction, emit a call + // to its destructor. + } else if (DestroyedCapture.Kind == + BlockCaptureEntityKind::NonTrivialCStruct) { + QualType varType = CI.getVariable()->getType(); + pushDestroy(varType.isDestructedType(), srcField, varType); + // Otherwise we call _Block_object_dispose. It wouldn't be too // hard to just emit this as a cleanup if we wanted to make sure // that things were done in reverse. @@ -2018,6 +2048,36 @@ id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr()); } }; + +/// Emits the copy/dispose helpers for a __block variable that is a non-trivial +/// C struct. +class NonTrivialCStructByrefHelpers final : public BlockByrefHelpers { + QualType VarType; + +public: + NonTrivialCStructByrefHelpers(CharUnits alignment, QualType type) + : BlockByrefHelpers(alignment), VarType(type) {} + + void emitCopy(CodeGenFunction &CGF, Address destField, + Address srcField) override { + CGF.callCStructMoveConstructor(CGF.MakeAddrLValue(destField, VarType), + CGF.MakeAddrLValue(srcField, VarType)); + } + + bool needsDispose() const override { + return VarType.isDestructedType(); + } + + void emitDispose(CodeGenFunction &CGF, Address field) override { + EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin(); + CGF.pushDestroy(VarType.isDestructedType(), field, VarType); + CGF.PopCleanupBlocks(cleanupDepth); + } + + void profileImpl(llvm::FoldingSetNodeID &id) const override { + id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr()); + } +}; } // end anonymous namespace static llvm::Constant * @@ -2203,6 +2263,13 @@ CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr)); } + // If type is a non-trivial C struct type that is non-trivial to + // destructly move or destroy, build the copy and dispose helpers. + if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct || + type.isDestructedType() == QualType::DK_nontrivial_c_struct) + return ::buildByrefHelpers( + CGM, byrefInfo, NonTrivialCStructByrefHelpers(valueAlignment, type)); + // Otherwise, if we don't have a retainable type, there's nothing to do. // that the runtime does extra copies. if (!type->isObjCRetainableType()) return nullptr; Index: cfe/trunk/lib/CodeGen/CGCall.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGCall.cpp +++ cfe/trunk/lib/CodeGen/CGCall.cpp @@ -3432,10 +3432,15 @@ QualType Ty; void Emit(CodeGenFunction &CGF, Flags flags) override { - const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); - assert(!Dtor->isTrivial()); - CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, - /*Delegating=*/false, Addr); + QualType::DestructionKind DtorKind = Ty.isDestructedType(); + if (DtorKind == QualType::DK_cxx_destructor) { + const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); + assert(!Dtor->isTrivial()); + CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, + /*Delegating=*/false, Addr); + } else { + CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); + } } }; @@ -3485,11 +3490,16 @@ else Slot = CreateAggTemp(type, "agg.tmp"); - const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); - bool DestroyedInCallee = - RD && RD->hasNonTrivialDestructor() && - (CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default || - RD->hasTrivialABIOverride()); + bool DestroyedInCallee = true, NeedsEHCleanup = true; + if (const auto *RD = type->getAsCXXRecordDecl()) { + DestroyedInCallee = + RD && RD->hasNonTrivialDestructor() && + (CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default || + RD->hasTrivialABIOverride()); + } else { + NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); + } + if (DestroyedInCallee) Slot.setExternallyDestructed(); @@ -3497,7 +3507,7 @@ RValue RV = Slot.asRValue(); args.add(RV, type); - if (DestroyedInCallee) { + if (DestroyedInCallee && NeedsEHCleanup) { // Create a no-op GEP between the placeholder and the cleanup so we can // RAUW it successfully. It also serves as a marker of the first // instruction where the cleanup is active. Index: cfe/trunk/lib/CodeGen/CGDecl.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGDecl.cpp +++ cfe/trunk/lib/CodeGen/CGDecl.cpp @@ -1289,6 +1289,19 @@ if (emission.IsByRef) emitByrefStructureInit(emission); + // Initialize the variable here if it doesn't have a initializer and it is a + // C struct that is non-trivial to initialize or an array containing such a + // struct. + if (!Init && + type.isNonTrivialToPrimitiveDefaultInitialize() == + QualType::PDIK_Struct) { + LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type); + if (emission.IsByRef) + drillIntoBlockVariable(*this, Dst, &D); + defaultInitNonTrivialCStructVar(Dst); + return; + } + if (isTrivialInitializer(Init)) return; @@ -1464,6 +1477,10 @@ case QualType::DK_objc_weak_lifetime: break; + + case QualType::DK_nontrivial_c_struct: + destroyer = CodeGenFunction::destroyNonTrivialCStruct; + break; } // If we haven't chosen a more specific destroyer, use the default. @@ -1525,6 +1542,8 @@ return destroyARCStrongPrecise; case QualType::DK_objc_weak_lifetime: return destroyARCWeak; + case QualType::DK_nontrivial_c_struct: + return destroyNonTrivialCStruct; } llvm_unreachable("Unknown DestructionKind"); } @@ -1876,9 +1895,12 @@ // cleanup. if (!IsScalar && !CurFuncIsThunk && getContext().isParamDestroyedInCallee(Ty)) { - const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); - if (RD && RD->hasNonTrivialDestructor()) - pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty); + if (QualType::DestructionKind DtorKind = Ty.isDestructedType()) { + assert((DtorKind == QualType::DK_cxx_destructor || + DtorKind == QualType::DK_nontrivial_c_struct) && + "unexpected destructor type"); + pushDestroy(DtorKind, DeclPtr, Ty); + } } } else { // Otherwise, create a temporary to hold the value. Index: cfe/trunk/lib/CodeGen/CGDeclCXX.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGDeclCXX.cpp +++ cfe/trunk/lib/CodeGen/CGDeclCXX.cpp @@ -79,6 +79,7 @@ case QualType::DK_objc_strong_lifetime: case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: // We don't care about releasing objects during process teardown. assert(!D.getTLSKind() && "should have rejected this"); return; Index: cfe/trunk/lib/CodeGen/CGExprAgg.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGExprAgg.cpp +++ cfe/trunk/lib/CodeGen/CGExprAgg.cpp @@ -77,8 +77,15 @@ /// then loads the result into DestPtr. void EmitAggLoadOfLValue(const Expr *E); + enum ExprValueKind { + EVK_RValue, + EVK_NonRValue + }; + /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. - void EmitFinalDestCopy(QualType type, const LValue &src); + /// SrcIsRValue is true if source comes from an RValue. + void EmitFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind = EVK_NonRValue); void EmitFinalDestCopy(QualType type, RValue src); void EmitCopy(QualType type, const AggValueSlot &dest, const AggValueSlot &src); @@ -246,6 +253,13 @@ /// directly into the return value slot. Otherwise, a final move /// will be performed. void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) { + // Push destructor if the result is ignored and the type is a C struct that + // is non-trivial to destroy. + QualType Ty = E->getType(); + if (Dest.isIgnored() && + Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) + CGF.pushDestroy(Ty.isDestructedType(), src.getAggregateAddress(), Ty); + if (shouldUseDestForReturnSlot()) { // Logically, Dest.getAddr() should equal Src.getAggregateAddr(). // The possibility of undef rvalues complicates that a lot, @@ -262,11 +276,12 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) { assert(src.isAggregate() && "value must be aggregate value!"); LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type); - EmitFinalDestCopy(type, srcLV); + EmitFinalDestCopy(type, srcLV, EVK_RValue); } /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. -void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) { +void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind) { // If Dest is ignored, then we're evaluating an aggregate expression // in a context that doesn't care about the result. Note that loads // from volatile l-values force the existence of a non-ignored @@ -274,6 +289,28 @@ if (Dest.isIgnored()) return; + // Copy non-trivial C structs here. + LValue DstLV = CGF.MakeAddrLValue( + Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type); + + if (SrcValueKind == EVK_RValue) { + if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { + if (Dest.isPotentiallyAliased()) + CGF.callCStructMoveAssignmentOperator(DstLV, src); + else + CGF.callCStructMoveConstructor(DstLV, src); + return; + } + } else { + if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { + if (Dest.isPotentiallyAliased()) + CGF.callCStructCopyAssignmentOperator(DstLV, src); + else + CGF.callCStructCopyConstructor(DstLV, src); + return; + } + } + AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased); Index: cfe/trunk/lib/CodeGen/CGNonTrivialStruct.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGNonTrivialStruct.cpp +++ cfe/trunk/lib/CodeGen/CGNonTrivialStruct.cpp @@ -0,0 +1,855 @@ +//===--- CGNonTrivialStruct.cpp - Emit Special Functions for C Structs ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines functions to generate various special functions for C +// structs. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "llvm/Support/ScopedPrinter.h" +#include + +using namespace clang; +using namespace CodeGen; + +// Return the size of a field in number of bits. +static uint64_t getFieldSize(const FieldDecl *FD, ASTContext &Ctx) { + if (FD->isBitField()) + return FD->getBitWidthValue(Ctx); + return Ctx.getTypeSize(FD->getType()); +} + +namespace { +enum { DstIdx = 0, SrcIdx = 1 }; +const char *ValNameStr[2] = {"dst", "src"}; + +template struct DestructedTypeVisitor { + template RetTy visit(QualType FT, Ts &&... Args) { + return asDerived().visit(FT.isDestructedType(), FT, + std::forward(Args)...); + } + + template + RetTy visit(QualType::DestructionKind DK, QualType FT, Ts &&... Args) { + if (asDerived().getContext().getAsArrayType(FT)) + return asDerived().visitArray(DK, FT, std::forward(Args)...); + + switch (DK) { + case QualType::DK_objc_strong_lifetime: + return asDerived().visitARCStrong(FT, std::forward(Args)...); + case QualType::DK_nontrivial_c_struct: + return asDerived().visitStruct(FT, std::forward(Args)...); + case QualType::DK_none: + return asDerived().visitTrivial(FT, std::forward(Args)...); + case QualType::DK_cxx_destructor: + return asDerived().visitCXXDestructor(FT, std::forward(Args)...); + case QualType::DK_objc_weak_lifetime: + return asDerived().visitARCWeak(FT, std::forward(Args)...); + } + + llvm_unreachable("unknown destruction kind"); + } + + Derived &asDerived() { return static_cast(*this); } +}; + +template +struct DefaultInitializedTypeVisitor { + template RetTy visit(QualType FT, Ts &&... Args) { + return asDerived().visit(FT.isNonTrivialToPrimitiveDefaultInitialize(), FT, + std::forward(Args)...); + } + + template + RetTy visit(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, + Ts &&... Args) { + if (asDerived().getContext().getAsArrayType(FT)) + return asDerived().visitArray(PDIK, FT, std::forward(Args)...); + + switch (PDIK) { + case QualType::PDIK_ARCStrong: + return asDerived().visitARCStrong(FT, std::forward(Args)...); + case QualType::PDIK_Struct: + return asDerived().visitStruct(FT, std::forward(Args)...); + case QualType::PDIK_Trivial: + return asDerived().visitTrivial(FT, std::forward(Args)...); + } + + llvm_unreachable("unknown default-initialize kind"); + } + + Derived &asDerived() { return static_cast(*this); } +}; + +template +struct CopiedTypeVisitor { + template RetTy visit(QualType FT, Ts &&... Args) { + QualType::PrimitiveCopyKind PCK = + IsMove ? FT.isNonTrivialToPrimitiveDestructiveMove() + : FT.isNonTrivialToPrimitiveCopy(); + return asDerived().visit(PCK, FT, std::forward(Args)...); + } + + template + RetTy visit(QualType::PrimitiveCopyKind PCK, QualType FT, Ts &&... Args) { + asDerived().preVisit(PCK, FT, std::forward(Args)...); + + if (asDerived().getContext().getAsArrayType(FT)) + return asDerived().visitArray(PCK, FT, std::forward(Args)...); + + switch (PCK) { + case QualType::PCK_ARCStrong: + return asDerived().visitARCStrong(FT, std::forward(Args)...); + case QualType::PCK_Struct: + return asDerived().visitStruct(FT, std::forward(Args)...); + case QualType::PCK_Trivial: + return asDerived().visitTrivial(FT, std::forward(Args)...); + case QualType::PCK_VolatileTrivial: + return asDerived().visitVolatileTrivial(FT, std::forward(Args)...); + } + + llvm_unreachable("unknown primitive copy kind"); + } + + Derived &asDerived() { return static_cast(*this); } +}; + +template struct StructVisitor { + StructVisitor(ASTContext &Ctx) : Ctx(Ctx) {} + + template + void visitStructFields(QualType QT, CharUnits CurStructOffset, Ts... Args) { + const RecordDecl *RD = QT->castAs()->getDecl(); + + // Iterate over the fields of the struct. + for (const FieldDecl *FD : RD->fields()) { + QualType FT = FD->getType(); + FT = QT.isVolatileQualified() ? FT.withVolatile() : FT; + asDerived().visit(FT, FD, CurStructOffset, Args...); + } + + asDerived().flushTrivialFields(Args...); + } + + template void visitTrivial(Ts... Args) {} + + template void visitARCWeak(Ts... Args) { + // FIXME: remove this when visitARCWeak is implemented in the subclasses. + llvm_unreachable("weak field is not expected"); + } + + template void visitCXXDestructor(Ts... Args) { + llvm_unreachable("field of a C++ struct type is not expected"); + } + + template void flushTrivialFields(Ts... Args) {} + + uint64_t getFieldOffsetInBits(const FieldDecl *FD) { + return FD ? Ctx.getASTRecordLayout(FD->getParent()) + .getFieldOffset(FD->getFieldIndex()) + : 0; + } + + CharUnits getFieldOffset(const FieldDecl *FD) { + return Ctx.toCharUnitsFromBits(getFieldOffsetInBits(FD)); + } + + Derived &asDerived() { return static_cast(*this); } + + ASTContext &getContext() { return Ctx; } + ASTContext &Ctx; +}; + +template +struct CopyStructVisitor : StructVisitor, + CopiedTypeVisitor { + using StructVisitor::asDerived; + + CopyStructVisitor(ASTContext &Ctx) : StructVisitor(Ctx) {} + + template + void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, + const FieldDecl *FD, CharUnits CurStructOffsset, + Ts &&... Args) { + if (PCK) + asDerived().flushTrivialFields(std::forward(Args)...); + } + + template + void visitTrivial(QualType FT, const FieldDecl *FD, CharUnits CurStructOffset, + Ts... Args) { + assert(!FT.isVolatileQualified() && "volatile field not expected"); + ASTContext &Ctx = asDerived().getContext(); + uint64_t FieldSize = getFieldSize(FD, Ctx); + + // Ignore zero-sized fields. + if (FieldSize == 0) + return; + + uint64_t FStartInBits = asDerived().getFieldOffsetInBits(FD); + uint64_t FEndInBits = FStartInBits + FieldSize; + uint64_t RoundedFEnd = llvm::alignTo(FEndInBits, Ctx.getCharWidth()); + + // Set Start if this is the first field of a sequence of trivial fields. + if (Start == End) + Start = CurStructOffset + Ctx.toCharUnitsFromBits(FStartInBits); + End = CurStructOffset + Ctx.toCharUnitsFromBits(RoundedFEnd); + } + + CharUnits Start = CharUnits::Zero(), End = CharUnits::Zero(); +}; + +// This function creates the mangled name of a special function of a non-trivial +// C struct. Since there is no ODR in C, the function is mangled based on the +// struct contents and not the name. The mangled name has the following +// structure: +// +// ::= "_" +// ::= "__destructor_" | "__default_constructor_" | +// "__copy_constructor_" | "__move_constructor_" | +// "__copy_assignment_" | "__move_assignment_" +// ::= ["_" ] +// ::= + +// ::= | +// ::= | | +// +// ::= "_AB" "s" "n" +// "_AE" +// ::= +// ::= "_s" ["b"] ["v"] +// ::= "_t" ["v"] "_" + +template struct GenFuncNameBase { + std::string getVolatileOffsetStr(bool IsVolatile, CharUnits Offset) { + std::string S; + if (IsVolatile) + S = "v"; + S += llvm::to_string(Offset.getQuantity()); + return S; + } + + void visitARCStrong(QualType FT, const FieldDecl *FD, + CharUnits CurStructOffset) { + appendStr("_s"); + if (FT->isBlockPointerType()) + appendStr("b"); + CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD); + appendStr(getVolatileOffsetStr(FT.isVolatileQualified(), FieldOffset)); + } + + void visitStruct(QualType QT, const FieldDecl *FD, + CharUnits CurStructOffset) { + CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD); + asDerived().visitStructFields(QT, FieldOffset); + } + + template + void visitArray(FieldKind FK, QualType QT, const FieldDecl *FD, + CharUnits CurStructOffset) { + // String for non-volatile trivial fields is emitted when + // flushTrivialFields is called. + if (!FK) + return asDerived().visitTrivial(QT, FD, CurStructOffset); + + CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD); + ASTContext &Ctx = asDerived().getContext(); + const auto *AT = Ctx.getAsConstantArrayType(QT); + unsigned NumElts = Ctx.getConstantArrayElementCount(AT); + QualType EltTy = Ctx.getBaseElementType(AT); + CharUnits EltSize = Ctx.getTypeSizeInChars(EltTy); + appendStr("_AB" + llvm::to_string(FieldOffset.getQuantity()) + "s" + + llvm::to_string(EltSize.getQuantity()) + "n" + + llvm::to_string(NumElts)); + EltTy = QT.isVolatileQualified() ? EltTy.withVolatile() : EltTy; + asDerived().visit(FK, EltTy, nullptr, FieldOffset); + appendStr("_AE"); + } + + void appendStr(StringRef Str) { Name += Str; } + + std::string getName(QualType QT, bool IsVolatile) { + QT = IsVolatile ? QT.withVolatile() : QT; + asDerived().visitStructFields(QT, CharUnits::Zero()); + return Name; + } + + Derived &asDerived() { return static_cast(*this); } + + std::string Name; +}; + +template +struct GenUnaryFuncName : StructVisitor, GenFuncNameBase { + GenUnaryFuncName(StringRef Prefix, CharUnits DstAlignment, ASTContext &Ctx) + : StructVisitor(Ctx) { + this->appendStr(Prefix); + this->appendStr(llvm::to_string(DstAlignment.getQuantity())); + } +}; + +// Helper function to create a null constant. +static llvm::Constant *getNullForVariable(Address Addr) { + llvm::Type *Ty = Addr.getElementType(); + return llvm::ConstantPointerNull::get(cast(Ty)); +} + +template +struct GenBinaryFuncName : CopyStructVisitor, IsMove>, + GenFuncNameBase> { + + GenBinaryFuncName(StringRef Prefix, CharUnits DstAlignment, + CharUnits SrcAlignment, ASTContext &Ctx) + : CopyStructVisitor, IsMove>(Ctx) { + this->appendStr(Prefix); + this->appendStr(llvm::to_string(DstAlignment.getQuantity())); + this->appendStr("_" + llvm::to_string(SrcAlignment.getQuantity())); + } + + void flushTrivialFields() { + if (this->Start == this->End) + return; + + this->appendStr("_t" + llvm::to_string(this->Start.getQuantity()) + "w" + + llvm::to_string((this->End - this->Start).getQuantity())); + + this->Start = this->End = CharUnits::Zero(); + } + + void visitVolatileTrivial(QualType FT, const FieldDecl *FD, + CharUnits CurStackOffset) { + // Because volatile fields can be bit-fields and are individually copied, + // their offset and width are in bits. + uint64_t OffsetInBits = + this->Ctx.toBits(CurStackOffset) + this->getFieldOffsetInBits(FD); + this->appendStr("_tv" + llvm::to_string(OffsetInBits) + "w" + + llvm::to_string(getFieldSize(FD, this->Ctx))); + } +}; + +struct GenDefaultInitializeFuncName + : GenUnaryFuncName, + DefaultInitializedTypeVisitor { + GenDefaultInitializeFuncName(CharUnits DstAlignment, ASTContext &Ctx) + : GenUnaryFuncName("__default_constructor_", + DstAlignment, Ctx) {} +}; + +struct GenDestructorFuncName : GenUnaryFuncName, + DestructedTypeVisitor { + GenDestructorFuncName(CharUnits DstAlignment, ASTContext &Ctx) + : GenUnaryFuncName("__destructor_", DstAlignment, + Ctx) {} +}; + +// Helper function that creates CGFunctionInfo for an N-ary special function. +template +static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM, + FunctionArgList &Args) { + ASTContext &Ctx = CGM.getContext(); + llvm::SmallVector Params; + QualType ParamTy = Ctx.getPointerType(Ctx.VoidPtrTy); + + for (unsigned I = 0; I < N; ++I) + Params.push_back(ImplicitParamDecl::Create( + Ctx, nullptr, SourceLocation(), &Ctx.Idents.get(ValNameStr[I]), ParamTy, + ImplicitParamDecl::Other)); + + for (auto &P : Params) + Args.push_back(P); + + return CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); +} + +// Template classes that are used as bases for classes that emit special +// functions. +template struct GenFuncBase { + template + void visitStruct(QualType FT, const FieldDecl *FD, CharUnits CurStackOffset, + std::array Addrs) { + this->asDerived().callSpecialFunction( + FT, CurStackOffset + asDerived().getFieldOffset(FD), Addrs); + } + + template + void visitArray(FieldKind FK, QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + // Non-volatile trivial fields are copied when flushTrivialFields is called. + if (!FK) + return asDerived().visitTrivial(QT, FD, CurStackOffset, Addrs); + + CodeGenFunction &CGF = *this->CGF; + ASTContext &Ctx = CGF.getContext(); + + // Compute the end address. + QualType BaseEltQT; + std::array StartAddrs = Addrs; + for (unsigned I = 0; I < N; ++I) + StartAddrs[I] = getAddrWithOffset(Addrs[I], CurStackOffset, FD); + Address DstAddr = StartAddrs[DstIdx]; + llvm::Value *NumElts = + CGF.emitArrayLength(Ctx.getAsArrayType(QT), BaseEltQT, DstAddr); + unsigned BaseEltSize = Ctx.getTypeSizeInChars(BaseEltQT).getQuantity(); + llvm::Value *BaseEltSizeVal = + llvm::ConstantInt::get(NumElts->getType(), BaseEltSize); + llvm::Value *SizeInBytes = + CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts); + Address BC = CGF.Builder.CreateBitCast(DstAddr, CGF.CGM.Int8PtrTy); + llvm::Value *DstArrayEnd = + CGF.Builder.CreateInBoundsGEP(BC.getPointer(), SizeInBytes); + DstArrayEnd = CGF.Builder.CreateBitCast(DstArrayEnd, CGF.CGM.Int8PtrPtrTy, + "dstarray.end"); + llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock(); + + // Create the header block and insert the phi instructions. + llvm::BasicBlock *HeaderBB = CGF.createBasicBlock("loop.header"); + CGF.EmitBlock(HeaderBB); + llvm::PHINode *PHIs[N]; + + for (unsigned I = 0; I < N; ++I) { + PHIs[I] = CGF.Builder.CreatePHI(CGF.CGM.Int8PtrPtrTy, 2, "addr.cur"); + PHIs[I]->addIncoming(StartAddrs[I].getPointer(), PreheaderBB); + } + + // Create the exit and loop body blocks. + llvm::BasicBlock *ExitBB = CGF.createBasicBlock("loop.exit"); + llvm::BasicBlock *LoopBB = CGF.createBasicBlock("loop.body"); + + // Emit the comparison and conditional branch instruction that jumps to + // either the exit or the loop body. + llvm::Value *Done = + CGF.Builder.CreateICmpEQ(PHIs[DstIdx], DstArrayEnd, "done"); + CGF.Builder.CreateCondBr(Done, ExitBB, LoopBB); + + // Visit the element of the array in the loop body. + CGF.EmitBlock(LoopBB); + QualType EltQT = Ctx.getAsArrayType(QT)->getElementType(); + CharUnits EltSize = Ctx.getTypeSizeInChars(EltQT); + std::array NewAddrs = Addrs; + + for (unsigned I = 0; I < N; ++I) + NewAddrs[I] = Address( + PHIs[I], StartAddrs[I].getAlignment().alignmentAtOffset(EltSize)); + + EltQT = QT.isVolatileQualified() ? EltQT.withVolatile() : EltQT; + this->asDerived().visit(EltQT, nullptr, CharUnits::Zero(), NewAddrs); + + LoopBB = CGF.Builder.GetInsertBlock(); + + for (unsigned I = 0; I < N; ++I) { + // Instrs to update the destination and source addresses. + // Update phi instructions. + NewAddrs[I] = getAddrWithOffset(NewAddrs[I], EltSize); + PHIs[I]->addIncoming(NewAddrs[I].getPointer(), LoopBB); + } + + // Insert an unconditional branch to the header block. + CGF.Builder.CreateBr(HeaderBB); + CGF.EmitBlock(ExitBB); + } + + /// Return an address with the specified offset from the passed address. + Address getAddrWithOffset(Address Addr, CharUnits Offset) { + assert(Addr.isValid() && "invalid address"); + if (Offset.getQuantity() == 0) + return Addr; + Addr = CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrTy); + Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity(), + CharUnits::One()); + return CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrPtrTy); + } + + Address getAddrWithOffset(Address Addr, CharUnits StructFieldOffset, + const FieldDecl *FD) { + return getAddrWithOffset(Addr, StructFieldOffset + + asDerived().getFieldOffset(FD)); + } + + template + llvm::Function * + getFunction(StringRef FuncName, QualType QT, std::array Addrs, + std::array Alignments, CodeGenModule &CGM) { + // If the special function already exists in the module, return it. + if (llvm::Function *F = CGM.getModule().getFunction(FuncName)) { + bool WrongType = false; + if (!F->getReturnType()->isVoidTy()) + WrongType = true; + else { + for (const llvm::Argument &Arg : F->args()) + if (Arg.getType() != CGM.Int8PtrPtrTy) + WrongType = true; + } + + if (WrongType) { + std::string FuncName = F->getName(); + SourceLocation Loc = QT->castAs()->getDecl()->getLocation(); + CGM.Error(Loc, "special function " + FuncName + + " for non-trivial C struct has incorrect type"); + return nullptr; + } + return F; + } + + ASTContext &Ctx = CGM.getContext(); + FunctionArgList Args; + const CGFunctionInfo &FI = getFunctionInfo(CGM, Args); + llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); + llvm::Function *F = + llvm::Function::Create(FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, + FuncName, &CGM.getModule()); + F->setVisibility(llvm::GlobalValue::HiddenVisibility); + CGM.SetLLVMFunctionAttributes(nullptr, FI, F); + CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); + IdentifierInfo *II = &Ctx.Idents.get(FuncName); + FunctionDecl *FD = FunctionDecl::Create( + Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), + II, Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false); + CodeGenFunction NewCGF(CGM); + setCGF(&NewCGF); + CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args); + + for (unsigned I = 0; I < N; ++I) { + llvm::Value *V = CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[I])); + Addrs[I] = Address(V, Alignments[I]); + } + + asDerived().visitStructFields(QT, CharUnits::Zero(), Addrs); + CGF->FinishFunction(); + return F; + } + + template + void callFunc(StringRef FuncName, QualType QT, std::array Addrs, + CodeGenFunction &CallerCGF) { + std::array Alignments; + llvm::Value *Ptrs[N]; + + for (unsigned I = 0; I < N; ++I) { + Alignments[I] = Addrs[I].getAlignment(); + Ptrs[I] = + CallerCGF.Builder.CreateBitCast(Addrs[I], CallerCGF.CGM.Int8PtrPtrTy) + .getPointer(); + } + + if (llvm::Function *F = + getFunction(FuncName, QT, Addrs, Alignments, CallerCGF.CGM)) + CallerCGF.EmitNounwindRuntimeCall(F, Ptrs); + } + + Derived &asDerived() { return static_cast(*this); } + + void setCGF(CodeGenFunction *F) { CGF = F; } + + CodeGenFunction *CGF = nullptr; +}; + +template +struct GenBinaryFunc : CopyStructVisitor, + GenFuncBase { + GenBinaryFunc(ASTContext &Ctx) : CopyStructVisitor(Ctx) {} + + void flushTrivialFields(std::array Addrs) { + CharUnits Size = this->End - this->Start; + + if (Size.getQuantity() == 0) + return; + + Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], this->Start); + Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], this->Start); + + // Emit memcpy. + if (Size.getQuantity() >= 16 || !llvm::isPowerOf2_32(Size.getQuantity())) { + llvm::Value *SizeVal = + llvm::ConstantInt::get(this->CGF->SizeTy, Size.getQuantity()); + DstAddr = + this->CGF->Builder.CreateElementBitCast(DstAddr, this->CGF->Int8Ty); + SrcAddr = + this->CGF->Builder.CreateElementBitCast(SrcAddr, this->CGF->Int8Ty); + this->CGF->Builder.CreateMemCpy(DstAddr, SrcAddr, SizeVal, false); + } else { + llvm::Type *Ty = llvm::Type::getIntNTy( + this->CGF->getLLVMContext(), + Size.getQuantity() * this->CGF->getContext().getCharWidth()); + DstAddr = this->CGF->Builder.CreateElementBitCast(DstAddr, Ty); + SrcAddr = this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty); + llvm::Value *SrcVal = this->CGF->Builder.CreateLoad(SrcAddr, false); + this->CGF->Builder.CreateStore(SrcVal, DstAddr, false); + } + + this->Start = this->End = CharUnits::Zero(); + } + + template + void visitVolatileTrivial(QualType FT, const FieldDecl *FD, CharUnits Offset, + std::array Addrs) { + QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0); + llvm::PointerType *PtrTy = this->CGF->ConvertType(RT)->getPointerTo(); + Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset); + LValue DstBase = this->CGF->MakeAddrLValue( + this->CGF->Builder.CreateBitCast(DstAddr, PtrTy), FT); + LValue DstLV = this->CGF->EmitLValueForField(DstBase, FD); + Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], Offset); + LValue SrcBase = this->CGF->MakeAddrLValue( + this->CGF->Builder.CreateBitCast(SrcAddr, PtrTy), FT); + LValue SrcLV = this->CGF->EmitLValueForField(SrcBase, FD); + RValue SrcVal = this->CGF->EmitLoadOfLValue(SrcLV, SourceLocation()); + this->CGF->EmitStoreThroughLValue(SrcVal, DstLV); + } +}; + +// These classes that emit the special functions for a non-trivial struct. +struct GenDestructor : StructVisitor, + GenFuncBase, + DestructedTypeVisitor { + GenDestructor(ASTContext &Ctx) : StructVisitor(Ctx) {} + void visitARCStrong(QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + CGF->destroyARCStrongImprecise( + *CGF, getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT); + } + + void callSpecialFunction(QualType FT, CharUnits Offset, + std::array Addrs) { + CGF->callCStructDestructor( + CGF->MakeAddrLValue(getAddrWithOffset(Addrs[DstIdx], Offset), FT)); + } +}; + +struct GenDefaultInitialize + : StructVisitor, + GenFuncBase, + DefaultInitializedTypeVisitor { + typedef GenFuncBase GenFuncBaseTy; + GenDefaultInitialize(ASTContext &Ctx) + : StructVisitor(Ctx) {} + + void visitARCStrong(QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + CGF->EmitNullInitialization( + getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT); + } + + template + void visitArray(FieldKind FK, QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + if (!FK) + return visitTrivial(QT, FD, CurStackOffset, Addrs); + + ASTContext &Ctx = getContext(); + CharUnits Size = Ctx.getTypeSizeInChars(QT); + QualType EltTy = Ctx.getBaseElementType(QT); + + if (Size < CharUnits::fromQuantity(16) || EltTy->getAs()) { + GenFuncBaseTy::visitArray(FK, QT, FD, CurStackOffset, Addrs); + return; + } + + llvm::Constant *SizeVal = CGF->Builder.getInt64(Size.getQuantity()); + Address DstAddr = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD); + Address Loc = CGF->Builder.CreateElementBitCast(DstAddr, CGF->Int8Ty); + CGF->Builder.CreateMemSet(Loc, CGF->Builder.getInt8(0), SizeVal, + QT.isVolatileQualified()); + } + + void callSpecialFunction(QualType FT, CharUnits Offset, + std::array Addrs) { + CGF->callCStructDefaultConstructor( + CGF->MakeAddrLValue(getAddrWithOffset(Addrs[DstIdx], Offset), FT)); + } +}; + +struct GenCopyConstructor : GenBinaryFunc { + GenCopyConstructor(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitARCStrong(QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD); + llvm::Value *SrcVal = CGF->EmitLoadOfScalar( + Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation()); + llvm::Value *Val = CGF->EmitARCRetain(QT, SrcVal); + CGF->EmitStoreOfScalar(Val, CGF->MakeAddrLValue(Addrs[DstIdx], QT), true); + } + void callSpecialFunction(QualType FT, CharUnits Offset, + std::array Addrs) { + CGF->callCStructCopyConstructor(CGF->MakeAddrLValue(Addrs[DstIdx], FT), + CGF->MakeAddrLValue(Addrs[SrcIdx], FT)); + } +}; + +struct GenMoveConstructor : GenBinaryFunc { + GenMoveConstructor(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitARCStrong(QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD); + LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT); + llvm::Value *SrcVal = + CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal(); + CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV); + CGF->EmitStoreOfScalar(SrcVal, CGF->MakeAddrLValue(Addrs[DstIdx], QT), + /* isInitialization */ true); + } + void callSpecialFunction(QualType FT, CharUnits Offset, + std::array Addrs) { + CGF->callCStructMoveConstructor(CGF->MakeAddrLValue(Addrs[DstIdx], FT), + CGF->MakeAddrLValue(Addrs[SrcIdx], FT)); + } +}; + +struct GenCopyAssignment : GenBinaryFunc { + GenCopyAssignment(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitARCStrong(QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD); + llvm::Value *SrcVal = CGF->EmitLoadOfScalar( + Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation()); + CGF->EmitARCStoreStrong(CGF->MakeAddrLValue(Addrs[DstIdx], QT), SrcVal, + false); + } + void callSpecialFunction(QualType FT, CharUnits Offset, + std::array Addrs) { + CGF->callCStructCopyAssignmentOperator( + CGF->MakeAddrLValue(Addrs[DstIdx], FT), + CGF->MakeAddrLValue(Addrs[SrcIdx], FT)); + } +}; + +struct GenMoveAssignment : GenBinaryFunc { + GenMoveAssignment(ASTContext &Ctx) + : GenBinaryFunc(Ctx) {} + + void visitARCStrong(QualType QT, const FieldDecl *FD, + CharUnits CurStackOffset, std::array Addrs) { + Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD); + Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD); + LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT); + llvm::Value *SrcVal = + CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal(); + CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV); + LValue DstLV = CGF->MakeAddrLValue(Addrs[DstIdx], QT); + llvm::Value *DstVal = + CGF->EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal(); + CGF->EmitStoreOfScalar(SrcVal, DstLV); + CGF->EmitARCRelease(DstVal, ARCImpreciseLifetime); + } + + void callSpecialFunction(QualType FT, CharUnits Offset, + std::array Addrs) { + CGF->callCStructMoveAssignmentOperator( + CGF->MakeAddrLValue(Addrs[DstIdx], FT), + CGF->MakeAddrLValue(Addrs[SrcIdx], FT)); + } +}; + +} // namespace + +void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF, + Address Addr, QualType Type) { + CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Type)); +} + +// Default-initialize a variable that is a non-trivial struct or an array of +// such structure. +void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) { + GenDefaultInitialize Gen(getContext()); + Address DstPtr = Builder.CreateBitCast(Dst.getAddress(), CGM.Int8PtrPtrTy); + Gen.setCGF(this); + QualType QT = Dst.getType(); + QT = Dst.isVolatile() ? QT.withVolatile() : QT; + Gen.visit(QT, nullptr, CharUnits::Zero(), std::array({{DstPtr}})); +} + +template +static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, + bool IsVolatile, CodeGenFunction &CGF, + std::array Addrs) { + for (unsigned I = 0; I < N; ++I) + Addrs[I] = CGF.Builder.CreateBitCast(Addrs[I], CGF.CGM.Int8PtrPtrTy); + QT = IsVolatile ? QT.withVolatile() : QT; + Gen.callFunc(FuncName, QT, Addrs, CGF); +} + +// Functions to emit calls to the special functions of a non-trivial C struct. +void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) { + bool IsVolatile = Dst.isVolatile(); + Address DstPtr = Dst.getAddress(); + QualType QT = Dst.getType(); + GenDefaultInitializeFuncName GenName(DstPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenDefaultInitialize(getContext()), FuncName, QT, + IsVolatile, *this, std::array({{DstPtr}})); +} + +void CodeGenFunction::callCStructDestructor(LValue Dst) { + bool IsVolatile = Dst.isVolatile(); + Address DstPtr = Dst.getAddress(); + QualType QT = Dst.getType(); + GenDestructorFuncName GenName(DstPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenDestructor(getContext()), FuncName, QT, IsVolatile, + *this, std::array({{DstPtr}})); +} + +void CodeGenFunction::callCStructCopyConstructor(LValue Dst, LValue Src) { + bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); + Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + QualType QT = Dst.getType(); + GenBinaryFuncName GenName("__copy_constructor_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenCopyConstructor(getContext()), FuncName, QT, + IsVolatile, *this, + std::array({{DstPtr, SrcPtr}})); +} + +void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src + +) { + bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); + Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + QualType QT = Dst.getType(); + GenBinaryFuncName GenName("__copy_assignment_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenCopyAssignment(getContext()), FuncName, QT, IsVolatile, + *this, std::array({{DstPtr, SrcPtr}})); +} + +void CodeGenFunction::callCStructMoveConstructor(LValue Dst, LValue Src) { + bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); + Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + QualType QT = Dst.getType(); + GenBinaryFuncName GenName("__move_constructor_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenMoveConstructor(getContext()), FuncName, QT, + IsVolatile, *this, + std::array({{DstPtr, SrcPtr}})); +} + +void CodeGenFunction::callCStructMoveAssignmentOperator(LValue Dst, LValue Src + +) { + bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); + Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + QualType QT = Dst.getType(); + GenBinaryFuncName GenName("__move_assignment_", DstPtr.getAlignment(), + SrcPtr.getAlignment(), getContext()); + std::string FuncName = GenName.getName(QT, IsVolatile); + callSpecialFunction(GenMoveAssignment(getContext()), FuncName, QT, IsVolatile, + *this, std::array({{DstPtr, SrcPtr}})); +} Index: cfe/trunk/lib/CodeGen/CMakeLists.txt =================================================================== --- cfe/trunk/lib/CodeGen/CMakeLists.txt +++ cfe/trunk/lib/CodeGen/CMakeLists.txt @@ -56,6 +56,7 @@ CGExprScalar.cpp CGGPUBuiltin.cpp CGLoopInfo.cpp + CGNonTrivialStruct.cpp CGObjC.cpp CGObjCGNU.cpp CGObjCMac.cpp Index: cfe/trunk/lib/CodeGen/CodeGenFunction.h =================================================================== --- cfe/trunk/lib/CodeGen/CodeGenFunction.h +++ cfe/trunk/lib/CodeGen/CodeGenFunction.h @@ -1536,6 +1536,7 @@ return false; case QualType::DK_cxx_destructor: case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: return getLangOpts().Exceptions; case QualType::DK_objc_strong_lifetime: return getLangOpts().Exceptions && @@ -3383,6 +3384,16 @@ CXXDtorType Type, const CXXRecordDecl *RD); + // These functions emit calls to the special functions of non-trivial C + // structs. + void defaultInitNonTrivialCStructVar(LValue Dst); + void callCStructDefaultConstructor(LValue Dst); + void callCStructDestructor(LValue Dst); + void callCStructCopyConstructor(LValue Dst, LValue Src); + void callCStructMoveConstructor(LValue Dst, LValue Src); + void callCStructCopyAssignmentOperator(LValue Dst, LValue Src); + void callCStructMoveAssignmentOperator(LValue Dst, LValue Src); + RValue EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method, const CGCallee &Callee, @@ -3557,6 +3568,7 @@ static Destroyer destroyARCStrongPrecise; static Destroyer destroyARCWeak; static Destroyer emitARCIntrinsicUse; + static Destroyer destroyNonTrivialCStruct; void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr); llvm::Value *EmitObjCAutoreleasePoolPush(); Index: cfe/trunk/lib/Lex/PPMacroExpansion.cpp =================================================================== --- cfe/trunk/lib/Lex/PPMacroExpansion.cpp +++ cfe/trunk/lib/Lex/PPMacroExpansion.cpp @@ -1151,6 +1151,7 @@ // Objective-C features .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE? .Case("objc_arc", LangOpts.ObjCAutoRefCount) + .Case("objc_arc_fields", true) .Case("objc_arc_weak", LangOpts.ObjCWeak) .Case("objc_default_synthesize_properties", LangOpts.ObjC2) .Case("objc_fixed_enum", LangOpts.ObjC2) Index: cfe/trunk/lib/Sema/JumpDiagnostics.cpp =================================================================== --- cfe/trunk/lib/Sema/JumpDiagnostics.cpp +++ cfe/trunk/lib/Sema/JumpDiagnostics.cpp @@ -154,6 +154,10 @@ return ScopePair(diag::note_protected_by_objc_weak_init, diag::note_exits_objc_weak); + case QualType::DK_nontrivial_c_struct: + return ScopePair(diag::note_protected_by_non_trivial_c_struct_init, + diag::note_exits_dtor); + case QualType::DK_cxx_destructor: OutDiag = diag::note_exits_dtor; break; @@ -254,6 +258,10 @@ Diags = ScopePair(diag::note_enters_block_captures_weak, diag::note_exits_block_captures_weak); break; + case QualType::DK_nontrivial_c_struct: + Diags = ScopePair(diag::note_enters_block_captures_non_trivial_c_struct, + diag::note_exits_block_captures_non_trivial_c_struct); + break; case QualType::DK_none: llvm_unreachable("non-lifetime captured variable"); } Index: cfe/trunk/lib/Sema/SemaDecl.cpp =================================================================== --- cfe/trunk/lib/Sema/SemaDecl.cpp +++ cfe/trunk/lib/Sema/SemaDecl.cpp @@ -11324,6 +11324,9 @@ } } + if (var->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) + getCurFunction()->setHasBranchProtectedScope(); + // Warn about externally-visible variables being defined without a // prior declaration. We only want to do this for global // declarations, but we also specifically need to avoid doing it for @@ -15214,6 +15217,7 @@ // Get the type for the field. const Type *FDTy = FD->getType().getTypePtr(); + Qualifiers QS = FD->getType().getQualifiers(); if (!FD->isAnonymousStructOrUnion()) { // Remember all fields written by the user. @@ -15355,7 +15359,9 @@ FD->setType(T); } else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() && Record && !ObjCFieldLifetimeErrReported && - (!getLangOpts().CPlusPlus || Record->isUnion())) { + ((!getLangOpts().CPlusPlus && + QS.getObjCLifetime() == Qualifiers::OCL_Weak) || + Record->isUnion())) { // It's an error in ARC or Weak if a field has lifetime. // We don't want to report this in a system header, though, // so we just make the field unavailable. @@ -15391,6 +15397,18 @@ Record->setHasObjectMember(true); } } + + if (Record && !getLangOpts().CPlusPlus) { + QualType FT = FD->getType(); + if (FT.isNonTrivialToPrimitiveDefaultInitialize()) + Record->setNonTrivialToPrimitiveDefaultInitialize(); + QualType::PrimitiveCopyKind PCK = FT.isNonTrivialToPrimitiveCopy(); + if (PCK != QualType::PCK_Trivial && PCK != QualType::PCK_VolatileTrivial) + Record->setNonTrivialToPrimitiveCopy(); + if (FT.isDestructedType()) + Record->setNonTrivialToPrimitiveDestroy(); + } + if (Record && FD->getType().isVolatileQualified()) Record->setHasVolatileMember(true); // Keep track of the number of named members. Index: cfe/trunk/lib/Sema/SemaExpr.cpp =================================================================== --- cfe/trunk/lib/Sema/SemaExpr.cpp +++ cfe/trunk/lib/Sema/SemaExpr.cpp @@ -776,6 +776,9 @@ return VAK_Valid; } + if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) + return VAK_Invalid; + if (Ty.isCXX98PODType(Context)) return VAK_Valid; @@ -837,7 +840,10 @@ break; case VAK_Invalid: - if (Ty->isObjCObjectType()) + if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct) + Diag(E->getLocStart(), + diag::err_cannot_pass_non_trivial_c_struct_to_vararg) << Ty << CT; + else if (Ty->isObjCObjectType()) DiagRuntimeBehavior( E->getLocStart(), nullptr, PDiag(diag::err_cannot_pass_objc_interface_to_vararg) Index: cfe/trunk/test/ARCMT/checking.m =================================================================== --- cfe/trunk/test/ARCMT/checking.m +++ cfe/trunk/test/ARCMT/checking.m @@ -116,7 +116,7 @@ } struct S { - A* a; // expected-error {{ARC forbids Objective-C objects in struct}} + A* a; }; @interface B Index: cfe/trunk/test/CodeGenObjC/nontrivial-c-struct-exception.m =================================================================== --- cfe/trunk/test/CodeGenObjC/nontrivial-c-struct-exception.m +++ cfe/trunk/test/CodeGenObjC/nontrivial-c-struct-exception.m @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -fobjc-exceptions -fexceptions -fobjc-arc-exceptions -emit-llvm -o - %s | FileCheck %s + +// CHECK: %[[STRUCT_STRONG:.*]] = type { i32, i8* } + +typedef struct { + int i; + id f1; +} Strong; + +// CHECK: define void @testStrongException() +// CHECK: %[[AGG_TMP:.*]] = alloca %[[STRUCT_STRONG]], align 8 +// CHECK: %[[AGG_TMP1:.*]] = alloca %[[STRUCT_STRONG]], align 8 +// CHECK: %[[CALL:.*]] = call [2 x i64] @genStrong() +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONG]]* %[[AGG_TMP]] to [2 x i64]* +// CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V0]], align 8 +// CHECK: invoke [2 x i64] @genStrong() + +// CHECK: call void @calleeStrong([2 x i64] %{{.*}}, [2 x i64] %{{.*}}) +// CHECK-NEXT: ret void + +// CHECK: landingpad { i8*, i32 } +// CHECK: %[[V9:.*]] = bitcast %[[STRUCT_STRONG]]* %[[AGG_TMP]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V9]]) +// CHECK: br label + +// CHECK: resume + +Strong genStrong(void); +void calleeStrong(Strong, Strong); + +void testStrongException(void) { + calleeStrong(genStrong(), genStrong()); +} Index: cfe/trunk/test/CodeGenObjC/nontrivial-c-struct-func-name-collision.m =================================================================== --- cfe/trunk/test/CodeGenObjC/nontrivial-c-struct-func-name-collision.m +++ cfe/trunk/test/CodeGenObjC/nontrivial-c-struct-func-name-collision.m @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -emit-llvm -verify -o - %s + +typedef struct { // expected-error {{special function __default_constructor_8_s8 for non-trivial C struct has incorrect type}} + int i; + id f1; +} StrongSmall; + +int __default_constructor_8_s8(double a) { + return 0; +} + +void testIncorrectFunctionType(void) { + StrongSmall x; +} Index: cfe/trunk/test/CodeGenObjC/strong-in-c-struct.m =================================================================== --- cfe/trunk/test/CodeGenObjC/strong-in-c-struct.m +++ cfe/trunk/test/CodeGenObjC/strong-in-c-struct.m @@ -0,0 +1,522 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -emit-llvm -o - %s | FileCheck %s + +typedef void (^BlockTy)(void); + +typedef struct { + int a[4]; +} Trivial; + +typedef struct { + Trivial f0; + id f1; +} Strong; + +typedef struct { + int i; + id f1; +} StrongSmall; + +typedef struct { + Strong f0; + id f1; + double d; +} StrongOuter; + +typedef struct { + int f0; + volatile id f1; +} StrongVolatile; + +typedef struct { + BlockTy f0; +} StrongBlock; + +typedef struct { + int i; + id f0[2][2]; +} IDArray; + +typedef struct { + double d; + Strong f0[2][2]; +} StructArray; + +typedef struct { + id f0; + int i : 9; +} Bitfield0; + +typedef struct { + char c; + int i0 : 2; + int i1 : 4; + id f0; + int i2 : 31; + int i3 : 1; + id f1; + int : 0; + int a[3]; + id f2; + double d; + int i4 : 1; + volatile int i5 : 2; + volatile char i6; +} Bitfield1; + +StrongSmall getStrongSmall(void); +StrongOuter getStrongOuter(void); +void calleeStrongSmall(StrongSmall); +void func(Strong *); + +// CHECK: %[[STRUCT_BITFIELD1:.*]] = type { i8, i8, i8*, i32, i8*, [3 x i32], i8*, double, i8, i8 } + +// CHECK: define void @test_constructor_destructor_StrongOuter() +// CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER:.*]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: call void @__default_constructor_8_s16_s24(i8** %[[V0]]) +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: call void @__destructor_8_s16_s24(i8** %[[V1]]) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_s16_s24(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: call void @__default_constructor_8_s16(i8** %[[V0]]) +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 24 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: %[[V4:.*]] = bitcast i8** %[[V3]] to i8* +// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V4]], i8 0, i64 8, i1 false) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_s16(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 16 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: %[[V4:.*]] = bitcast i8** %[[V3]] to i8* +// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V4]], i8 0, i64 8, i1 false) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__destructor_8_s16_s24(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: call void @__destructor_8_s16(i8** %[[V0]]) +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 24 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: call void @objc_storeStrong(i8** %[[V3]], i8* null) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__destructor_8_s16(i8** %[[DST:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 16 +// CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** +// CHECK: call void @objc_storeStrong(i8** %[[V3]], i8* null) +// CHECK: ret void + +void test_constructor_destructor_StrongOuter(void) { + StrongOuter t; +} + +// CHECK: define void @test_copy_constructor_StrongOuter(%[[STRUCT_STRONGOUTER:.*]]* %[[S:.*]]) +// CHECK: %[[S_ADDR:.*]] = alloca %[[STRUCT_STRONGOUTER]]*, align 8 +// CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER]], align 8 +// CHECK: store %[[STRUCT_STRONGOUTER]]* %[[S]], %[[STRUCT_STRONGOUTER]]** %[[S_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load %[[STRUCT_STRONGOUTER]]*, %[[STRUCT_STRONGOUTER]]** %[[S_ADDR]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[V0]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w16_s16_s24_t32w8(i8** %[[V1]], i8** %[[V2]]) +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** +// CHECK: call void @__destructor_8_s16_s24(i8** %[[V3]]) +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: call void @__copy_constructor_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: %[[V9:.*]] = call i8* @objc_retain(i8* %[[V8]]) +// CHECK: store i8* %[[V9]], i8** %[[V4]], align 8 +// CHECK: %[[V10:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V11:.*]] = getelementptr inbounds i8, i8* %[[V10]], i64 32 +// CHECK: %[[V12:.*]] = bitcast i8* %[[V11]] to i8** +// CHECK: %[[V13:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V14:.*]] = getelementptr inbounds i8, i8* %[[V13]], i64 32 +// CHECK: %[[V15:.*]] = bitcast i8* %[[V14]] to i8** +// CHECK: %[[V16:.*]] = bitcast i8** %[[V12]] to i64* +// CHECK: %[[V17:.*]] = bitcast i8** %[[V15]] to i64* +// CHECK: %[[V18:.*]] = load i64, i64* %[[V17]], align 8 +// CHECK: store i64 %[[V18]], i64* %[[V16]], align 8 +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w16_s16(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[V2]], i8* align 8 %[[V3]], i64 16, i1 false) +// CHECK: %[[V4:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 16 +// CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** +// CHECK: %[[V7:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 16 +// CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** +// CHECK: %[[V10:.*]] = load i8*, i8** %[[V9]], align 8 +// CHECK: %[[V11:.*]] = call i8* @objc_retain(i8* %[[V10]]) +// CHECK: store i8* %[[V11]], i8** %[[V6]], align 8 +// CHECK: ret void + +void test_copy_constructor_StrongOuter(StrongOuter *s) { + StrongOuter t = *s; +} + +/// CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: call void @objc_storeStrong(i8** %[[V4]], i8* %[[V8]]) + +void test_copy_assignment_StrongOuter(StrongOuter *d, StrongOuter *s) { + *d = *s; +} + +// CHECK: define void @test_move_constructor_StrongOuter() +// CHECK: %[[T1:.*]] = getelementptr inbounds %[[STRUCT_BLOCK_BYREF_T:.*]], %[[STRUCT_BLOCK_BYREF_T]]* %{{.*}}, i32 0, i32 7 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T1]] to i8** +// CHECK: call void @__default_constructor_8_s16_s24(i8** %[[V1]]) +// CHECK: %[[T2:.*]] = getelementptr inbounds %[[STRUCT_BLOCK_BYREF_T]], %[[STRUCT_BLOCK_BYREF_T]]* %{{.*}}, i32 0, i32 7 +// CHECK: %[[V9:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T2]] to i8** +// CHECK: call void @__destructor_8_s16_s24(i8** %[[V9]]) + +// CHECK: define internal void @__Block_byref_object_copy_(i8*, i8*) +// CHECK: call void @__move_constructor_8_8_t0w16_s16_s24_t32w8( + +// CHECK: define linkonce_odr hidden void @__move_constructor_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: call void @__move_constructor_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: store i8* null, i8** %[[V7]], align 8 +// CHECK: store i8* %[[V8]], i8** %[[V4]], align 8 + +// CHECK: define internal void @__Block_byref_object_dispose_(i8*) +// CHECK: call void @__destructor_8_s16_s24( + +void test_move_constructor_StrongOuter(void) { + __block StrongOuter t; + BlockTy b = ^{ (void)t; }; +} + +// CHECK: define linkonce_odr hidden void @__move_assignment_8_8_t0w16_s16_s24_t32w8(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: call void @__move_assignment_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) +// CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* +// CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 +// CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** +// CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 +// CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 +// CHECK: store i8* null, i8** %[[V7]], align 8 +// CHECK: %[[V9:.*]] = load i8*, i8** %[[V4]], align 8 +// CHECK: store i8* %[[V8]], i8** %[[V4]], align 8 +// CHECK: call void @objc_release(i8* %[[V9]]) + +void test_move_assignment_StrongOuter(StrongOuter *p) { + *p = getStrongOuter(); +} + +// CHECK: define void @test_parameter_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONG:.*]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONG]]* %[[A]] to [2 x i64]* +// CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONG]]* %[[A]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V1]]) +// CHECK: ret void + +void test_parameter_StrongSmall(StrongSmall a) { +} + +// CHECK: define void @test_argument_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL:.*]], align 8 +// CHECK: %[[TEMP_LVALUE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* +// CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TEMP_LVALUE]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** %[[V2]]) +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TEMP_LVALUE]] to [2 x i64]* +// CHECK: %[[V4:.*]] = load [2 x i64], [2 x i64]* %[[V3]], align 8 +// CHECK: call void @calleeStrongSmall([2 x i64] %[[V4]]) +// CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V5]]) +// CHECK: ret void + +void test_argument_StrongSmall(StrongSmall a) { + calleeStrongSmall(a); +} + +// CHECK: define [2 x i64] @test_return_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_STRONGSMALL:.*]], align 8 +// CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* +// CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[RETVAL]] to i8** +// CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** %[[V2]]) +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V3]]) +// CHECK: %[[V4:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[RETVAL]] to [2 x i64]* +// CHECK: %[[V5:.*]] = load [2 x i64], [2 x i64]* %[[V4]], align 8 +// CHECK: ret [2 x i64] %[[V5]] + +StrongSmall test_return_StrongSmall(StrongSmall a) { + return a; +} + +// CHECK: define void @test_destructor_ignored_result() +// CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL:.*]], align 8 +// CHECK: %[[CALL:.*]] = call [2 x i64] @getStrongSmall() +// CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to [2 x i64]* +// CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V0]], align 8 +// CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to i8** +// CHECK: call void @__destructor_8_s8(i8** %[[V1]]) +// CHECK: ret void + +void test_destructor_ignored_result(void) { + getStrongSmall(); +} + +// CHECK: define void @test_copy_constructor_StrongBlock( +// CHECK: call void @__copy_constructor_8_8_sb0( +// CHECK: call void @__destructor_8_sb0( +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_sb0(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 +// CHECK: %[[V3:.*]] = call i8* @objc_retainBlock(i8* %[[V2]]) +// CHECK: store i8* %[[V3]], i8** %[[V0]], align 8 +// CHECK: ret void + +void test_copy_constructor_StrongBlock(StrongBlock *s) { + StrongBlock t = *s; +} + +// CHECK: define void @test_copy_assignment_StrongBlock(%[[STRUCT_STRONGBLOCK:.*]]* %[[D:.*]], %[[STRUCT_STRONGBLOCK]]* %[[S:.*]]) +// CHECK: call void @__copy_assignment_8_8_sb0( + +// CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_sb0(i8** %[[DST:.*]], i8** %[[SRC:.*]]) +// CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 +// CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 +// CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 +// CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 +// CHECK: %[[V3:.*]] = call i8* @objc_retainBlock(i8* %[[V2]]) +// CHECK: %[[V4:.*]] = load i8*, i8** %[[V0]], align 8 +// CHECK: store i8* %[[V3]], i8** %[[V0]], align 8 +// CHECK: call void @objc_release(i8* %[[V4]]) +// CHECK: ret void + +void test_copy_assignment_StrongBlock(StrongBlock *d, StrongBlock *s) { + *d = *s; +} + +// CHECK: define void @test_copy_constructor_StrongVolatile0( +// CHECK: call void @__copy_constructor_8_8_t0w4_sv8( +// CHECK: call void @__destructor_8_sv8( + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w4_sv8( +// CHECK: %[[V8:.*]] = load volatile i8*, i8** %{{.*}}, align 8 +// CHECK: %[[V9:.*]] = call i8* @objc_retain(i8* %[[V8]]) +// CHECK: store volatile i8* %[[V9]], i8** %{{.*}}, align 8 + +void test_copy_constructor_StrongVolatile0(StrongVolatile *s) { + StrongVolatile t = *s; +} + +// CHECK: define void @test_copy_constructor_StrongVolatile1( +// CHECK: call void @__copy_constructor_8_8_tv0w128_sv16( + +void test_copy_constructor_StrongVolatile1(Strong *s) { + volatile Strong t = *s; +} + +// CHECK: define void @test_block_capture_Strong() +// CHECK: call void @__default_constructor_8_s16( +// CHECK: call void @__copy_constructor_8_8_t0w16_s16( +// CHECK: call void @__destructor_8_s16( +// CHECK: call void @__destructor_8_s16( +// CHECK: ret void + +// CHECK: define internal void @__copy_helper_block_.1(i8*, i8*) +// CHECK: call void @__copy_constructor_8_8_t0w16_s16( +// CHECK: ret void + +// CHECK: define internal void @__destroy_helper_block_.2( +// CHECK: call void @__destructor_8_s16( +// CHECK: ret void + +void test_block_capture_Strong(void) { + Strong t; + BlockTy b = ^(){ (void)t; }; +} + +// CHECK: define void @test_variable_length_array(i32 %[[N:.*]]) +// CHECK: %[[N_ADDR:.*]] = alloca i32, align 4 +// CHECK: store i32 %[[N]], i32* %[[N_ADDR]], align 4 +// CHECK: %[[V0:.*]] = load i32, i32* %[[N_ADDR]], align 4 +// CHECK: %[[V1:.*]] = zext i32 %[[V0]] to i64 +// CHECK: %[[VLA:.*]] = alloca %[[STRUCT_STRONG:.*]], i64 %[[V1]], align 8 +// CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONG]]* %[[VLA]] to i8** +// CHECK: %[[V4:.*]] = mul nuw i64 24, %[[V1]] +// CHECK: %[[V5:.*]] = bitcast i8** %[[V3]] to i8* +// CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 %[[V4]] +// CHECK: %[[DSTARRAY_END:.*]] = bitcast i8* %[[V6]] to i8** +// CHECK: br label + +// CHECK: %[[DSTADDR_CUR:.*]] = phi i8** [ %[[V3]], {{.*}} ], [ %[[V7:.*]], {{.*}} ] +// CHECK: %[[DONE:.*]] = icmp eq i8** %[[DSTADDR_CUR]], %[[DSTARRAY_END]] +// CHECK: br i1 %[[DONE]], label + +// CHECK: call void @__default_constructor_8_s16(i8** %[[DSTADDR_CUR]]) +// CHECK: %[[V8:.*]] = bitcast i8** %[[DSTADDR_CUR]] to i8* +// CHECK: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 24 +// CHECK: %[[V7]] = bitcast i8* %[[V9]] to i8** +// CHECK: br label + +// CHECK: call void @func(%[[STRUCT_STRONG]]* %[[VLA]]) +// CHECK: %[[V10:.*]] = getelementptr inbounds %[[STRUCT_STRONG]], %[[STRUCT_STRONG]]* %[[VLA]], i64 %[[V1]] +// CHECK: %[[ARRAYDESTROY_ISEMPTY:.*]] = icmp eq %[[STRUCT_STRONG]]* %[[VLA]], %[[V10]] +// CHECK: br i1 %[[ARRAYDESTROY_ISEMPTY]], label + +// CHECK: %[[ARRAYDESTROY_ELEMENTPAST:.*]] = phi %[[STRUCT_STRONG]]* [ %[[V10]], {{.*}} ], [ %[[ARRAYDESTROY_ELEMENT:.*]], {{.*}} ] +// CHECK: %[[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds %[[STRUCT_STRONG]], %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENTPAST]], i64 -1 +// CHECK: %[[V11:.*]] = bitcast %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENT]] to i8** +// CHECK: call void @__destructor_8_s16(i8** %[[V11]]) +// CHECK: %[[ARRAYDESTROY_DONE:.*]] = icmp eq %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENT]], %[[VLA]] +// CHECK: br i1 %[[ARRAYDESTROY_DONE]], label + +// CHECK: ret void + +void test_variable_length_array(int n) { + Strong a[n]; + func(a); +} + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s8n4_s8_AE( +// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %{{.*}}, i8 0, i64 32, i1 false) +void test_constructor_destructor_IDArray(void) { + IDArray t; +} + +// CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s24n4_s24_AE( +void test_constructor_destructor_StructArray(void) { + StructArray t; +} + +// Check that IRGen copies the 9-bit bitfield emitting i16 load and store. + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_t8w2( +// CHECK: %[[V4:.*]] = bitcast i8** %{{.*}} to i8* +// CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 8 +// CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** +// CHECK: %[[V7:.*]] = bitcast i8** %{{.*}} to i8* +// CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 8 +// CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** +// CHECK: %[[V10:.*]] = bitcast i8** %[[V6]] to i16* +// CHECK: %[[V11:.*]] = bitcast i8** %[[V9]] to i16* +// CHECK: %[[V12:.*]] = load i16, i16* %[[V11]], align 8 +// CHECK: store i16 %[[V12]], i16* %[[V10]], align 8 +// CHECK: ret void + +void test_copy_constructor_Bitfield0(Bitfield0 *a) { + Bitfield0 t = *a; +} + +// CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w2_s8_t16w4_s24_t32w12_s48_t56w9_tv513w2_tv520w8 +// CHECK: %[[V4:.*]] = load i16, i16* %{{.*}}, align 8 +// CHECK: store i16 %[[V4]], i16* %{{.*}}, align 8 +// CHECK: %[[V21:.*]] = load i32, i32* %{{.*}}, align 8 +// CHECK: store i32 %[[V21]], i32* %{{.*}}, align 8 +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %{{.*}}, i8* align 8 %{{.*}}, i64 12, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %{{.*}}, i8* align 8 %{{.*}}, i64 9, i1 false) +// CHECK: %[[V54:.*]] = bitcast i8** %[[V0:.*]] to %[[STRUCT_BITFIELD1]]* +// CHECK: %[[I5:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V54]], i32 0, i32 8 +// CHECK: %[[V55:.*]] = bitcast i8** %[[V1:.*]] to %[[STRUCT_BITFIELD1]]* +// CHECK: %[[I51:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V55]], i32 0, i32 8 +// CHECK: %[[BF_LOAD:.*]] = load volatile i8, i8* %[[I51]], align 8 +// CHECK: %[[BF_SHL:.*]] = shl i8 %[[BF_LOAD]], 5 +// CHECK: %[[BF_ASHR:.*]] = ashr i8 %[[BF_SHL]], 6 +// CHECK: %[[BF_CAST:.*]] = sext i8 %[[BF_ASHR]] to i32 +// CHECK: %[[V56:.*]] = trunc i32 %[[BF_CAST]] to i8 +// CHECK: %[[BF_LOAD2:.*]] = load volatile i8, i8* %[[I5]], align 8 +// CHECK: %[[BF_VALUE:.*]] = and i8 %[[V56]], 3 +// CHECK: %[[BF_SHL3:.*]] = shl i8 %[[BF_VALUE]], 1 +// CHECK: %[[BF_CLEAR:.*]] = and i8 %[[BF_LOAD2]], -7 +// CHECK: %[[BF_SET:.*]] = or i8 %[[BF_CLEAR]], %[[BF_SHL3]] +// CHECK: store volatile i8 %[[BF_SET]], i8* %[[I5]], align 8 +// CHECK: %[[V57:.*]] = bitcast i8** %[[V0]] to %[[STRUCT_BITFIELD1]]* +// CHECK: %[[I6:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V57]], i32 0, i32 9 +// CHECK: %[[V58:.*]] = bitcast i8** %[[V1]] to %[[STRUCT_BITFIELD1]]* +// CHECK: %[[I64:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V58]], i32 0, i32 9 +// CHECK: %[[V59:.*]] = load volatile i8, i8* %[[I64]], align 1 +// CHECK: store volatile i8 %[[V59]], i8* %[[I6]], align 1 + +void test_copy_constructor_Bitfield1(Bitfield1 *a) { + Bitfield1 t = *a; +} Index: cfe/trunk/test/Lexer/has_feature_objc_arc.m =================================================================== --- cfe/trunk/test/Lexer/has_feature_objc_arc.m +++ cfe/trunk/test/Lexer/has_feature_objc_arc.m @@ -13,8 +13,16 @@ void no_objc_arc_weak_feature(); #endif +#if __has_feature(objc_arc_fields) +void has_objc_arc_fields(); +#else +void no_objc_arc_fields(); +#endif + // CHECK-ARC: void has_objc_arc_feature(); // CHECK-ARC: void has_objc_arc_weak_feature(); +// CHECK-ARC: void has_objc_arc_fields(); // CHECK-ARCLITE: void has_objc_arc_feature(); // CHECK-ARCLITE: void no_objc_arc_weak_feature(); +// CHECK-ARCLITE: void has_objc_arc_fields(); Index: cfe/trunk/test/SemaObjC/arc-decls.m =================================================================== --- cfe/trunk/test/SemaObjC/arc-decls.m +++ cfe/trunk/test/SemaObjC/arc-decls.m @@ -3,7 +3,7 @@ // rdar://8843524 struct A { - id x; // expected-error {{ARC forbids Objective-C objects in struct}} + id x; }; union u { @@ -13,7 +13,7 @@ @interface I { struct A a; struct B { - id y[10][20]; // expected-error {{ARC forbids Objective-C objects in struct}} + id y[10][20]; id z; } b; @@ -23,7 +23,7 @@ // rdar://10260525 struct r10260525 { - id (^block) (); // expected-error {{ARC forbids blocks in struct}} + id (^block) (); }; struct S { Index: cfe/trunk/test/SemaObjC/arc-system-header.m =================================================================== --- cfe/trunk/test/SemaObjC/arc-system-header.m +++ cfe/trunk/test/SemaObjC/arc-system-header.m @@ -23,8 +23,7 @@ } void test5(struct Test5 *p) { - p->field = 0; // expected-error {{'field' is unavailable in ARC}} - // expected-note@arc-system-header.h:25 {{field has non-trivial ownership qualification}} + p->field = 0; } id test6() { @@ -49,8 +48,7 @@ extern void doSomething(Test9 arg); void test9() { - Test9 foo2 = {0, 0}; // expected-error {{'field' is unavailable in ARC}} - // expected-note@arc-system-header.h:56 {{field has non-trivial ownership qualification}} + Test9 foo2 = {0, 0}; doSomething(foo2); } #endif Index: cfe/trunk/test/SemaObjC/strong-in-c-struct.m =================================================================== --- cfe/trunk/test/SemaObjC/strong-in-c-struct.m +++ cfe/trunk/test/SemaObjC/strong-in-c-struct.m @@ -0,0 +1,56 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -fsyntax-only -verify %s + +typedef struct { + id a; +} Strong; + +void callee_variadic(const char *, ...); + +void test_variadic(void) { + Strong t; + callee_variadic("s", t); // expected-error {{cannot pass non-trivial C object of type 'Strong' by value to variadic function}} +} + +void test_jump0(int cond) { + switch (cond) { + case 0: + ; + Strong x; // expected-note {{jump bypasses initialization of variable of non-trivial C struct type}} + break; + case 1: // expected-error {{cannot jump from switch statement to this case label}} + x.a = 0; + break; + } +} + +void test_jump1(void) { + static void *ips[] = { &&L0 }; +L0: // expected-note {{possible target of indirect goto}} + ; + Strong x; // expected-note {{jump exits scope of variable with non-trivial destructor}} + goto *ips; // expected-error {{cannot jump}} +} + +typedef void (^BlockTy)(void); +void func(BlockTy); +void func2(Strong); + +void test_block_scope0(int cond) { + Strong x; // expected-note {{jump enters lifetime of block which captures a C struct that is non-trivial to destroy}} + switch (cond) { + case 0: + func(^{ func2(x); }); + break; + default: // expected-error {{cannot jump from switch statement to this case label}} + break; + } +} + +void test_block_scope1(void) { + static void *ips[] = { &&L0 }; +L0: // expected-note {{possible target of indirect goto}} + ; + Strong x; // expected-note {{jump exits scope of variable with non-trivial destructor}} expected-note {{jump exits lifetime of block which captures a C struct that is non-trivial to destroy}} + func(^{ func2(x); }); + goto *ips; // expected-error {{cannot jump}} +}