diff --git a/clang/docs/DiagnosticsReference.rst b/clang/docs/DiagnosticsReference.rst --- a/clang/docs/DiagnosticsReference.rst +++ b/clang/docs/DiagnosticsReference.rst @@ -6325,6 +6325,15 @@ +----------------------------------------------------------------------------+ +-Wimplicit-block-var-alloc +-------------------------- +**Diagnostic text:** + ++-----------------------------------------------------------------------------------------------------------------------------------+ +|:warning:`warning:` |nbsp| :diagtext:`variable` |nbsp| :placeholder:`A` |nbsp| :diagtext:`will be initialized in a heap allocation`| ++-----------------------------------------------------------------------------------------------------------------------------------+ + + -Wimplicit-const-int-float-conversion ------------------------------------- This diagnostic is enabled by default. diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -991,6 +991,10 @@ unsigned ImplicitParamKind : 3; unsigned EscapingByref : 1; + + /// Whether this is a __block variable that is captured by a block + /// referenced in its own initializer. + unsigned CapturedByOwnInit : 1; }; union { @@ -1473,9 +1477,17 @@ /// escaping block. bool isNonEscapingByref() const; - void setEscapingByref() { - NonParmVarDeclBits.EscapingByref = true; - } + /// Indicates this is a __block variable that is captured by a block + /// referenced in its own initializer. + bool isCapturedByOwnInit() const; + + void setEscapingByref() { NonParmVarDeclBits.EscapingByref = true; } + + void setCapturedByOwnInit() { NonParmVarDeclBits.CapturedByOwnInit = true; } + + /// Check if this is a __block variable which needs to be initialized + /// directly on the heap. + bool needsInitOnHeap() const; /// Retrieve the variable declaration from which this variable could /// be instantiated, if it is an instantiation (rather than a non-template). diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -5343,6 +5343,11 @@ def err_undefined_inline_var : Error<"inline variable %q0 is not defined">; def note_used_here : Note<"used here">; +def warn_implicit_block_var_alloc : Warning<"variable %q0 will be initialized in a heap allocation">, + InGroup>, DefaultIgnore; +def note_because_captured_by_block : Note< + "because it is captured by a block used in its own initializer">; + def err_internal_linkage_redeclaration : Error< "'internal_linkage' attribute does not appear on the first declaration of %0">; def warn_internal_linkage_local_storage : Warning< diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -2487,6 +2487,17 @@ return hasAttr() && !NonParmVarDeclBits.EscapingByref; } +bool VarDecl::isCapturedByOwnInit() const { + return hasAttr() && NonParmVarDeclBits.CapturedByOwnInit; +} + +bool VarDecl::needsInitOnHeap() const { + // Record types are the only TEK_Aggregate types that can legally be the type + // of a __block variable. + return isCapturedByOwnInit() && + isa(getType().getAtomicUnqualifiedType()); +} + VarDecl *VarDecl::getTemplateInstantiationPattern() const { const VarDecl *VD = this; diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h --- a/clang/lib/CodeGen/CGBlocks.h +++ b/clang/lib/CodeGen/CGBlocks.h @@ -146,6 +146,7 @@ unsigned FieldIndex; CharUnits ByrefAlignment; CharUnits FieldOffset; + bool ForceNoopCopy; }; /// CGBlockInfo - Information to generate a block literal. diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -44,6 +44,13 @@ name = name.substr(1); } +BlockByrefHelpers::BlockByrefHelpers(const BlockByrefInfo &info) + : CopyHelper(nullptr), DisposeHelper(nullptr), + // The alignment we care about for the purposes of uniquing byref + // helpers is the alignment of the actual byref value field. + Alignment(info.ByrefAlignment.alignmentAtOffset(info.FieldOffset)), + ForceNoopCopy(info.ForceNoopCopy) {} + // Anchor the vtable to this translation unit. BlockByrefHelpers::~BlockByrefHelpers() {} @@ -2193,9 +2200,8 @@ BlockFieldFlags Flags; public: - ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags) - : BlockByrefHelpers(alignment), Flags(flags) {} - + ObjectByrefHelpers(const BlockByrefInfo &info, BlockFieldFlags flags) + : BlockByrefHelpers(info), Flags(flags) {} void emitCopy(CodeGenFunction &CGF, Address destField, Address srcField) override { destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy); @@ -2227,7 +2233,7 @@ /// Emits the copy/dispose helpers for an ARC __block __weak variable. class ARCWeakByrefHelpers final : public BlockByrefHelpers { public: - ARCWeakByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {} + ARCWeakByrefHelpers(const BlockByrefInfo &info) : BlockByrefHelpers(info) {} void emitCopy(CodeGenFunction &CGF, Address destField, Address srcField) override { @@ -2248,7 +2254,7 @@ /// that's not of block-pointer type. class ARCStrongByrefHelpers final : public BlockByrefHelpers { public: - ARCStrongByrefHelpers(CharUnits alignment) : BlockByrefHelpers(alignment) {} + ARCStrongByrefHelpers(const BlockByrefInfo &info) : BlockByrefHelpers(info) {} void emitCopy(CodeGenFunction &CGF, Address destField, Address srcField) override { @@ -2284,8 +2290,8 @@ /// variable that's of block-pointer type. class ARCStrongBlockByrefHelpers final : public BlockByrefHelpers { public: - ARCStrongBlockByrefHelpers(CharUnits alignment) - : BlockByrefHelpers(alignment) {} + ARCStrongBlockByrefHelpers(const BlockByrefInfo &info) + : BlockByrefHelpers(info) {} void emitCopy(CodeGenFunction &CGF, Address destField, Address srcField) override { @@ -2314,9 +2320,9 @@ const Expr *CopyExpr; public: - CXXByrefHelpers(CharUnits alignment, QualType type, + CXXByrefHelpers(const BlockByrefInfo &info, QualType type, const Expr *copyExpr) - : BlockByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {} + : BlockByrefHelpers(info), VarType(type), CopyExpr(copyExpr) {} bool needsCopy() const override { return CopyExpr != nullptr; } void emitCopy(CodeGenFunction &CGF, Address destField, @@ -2342,8 +2348,8 @@ QualType VarType; public: - NonTrivialCStructByrefHelpers(CharUnits alignment, QualType type) - : BlockByrefHelpers(alignment), VarType(type) {} + NonTrivialCStructByrefHelpers(const BlockByrefInfo &info, QualType type) + : BlockByrefHelpers(info), VarType(type) {} void emitCopy(CodeGenFunction &CGF, Address destField, Address srcField) override { @@ -2408,7 +2414,7 @@ CGF.StartFunction(FD, ReturnTy, Fn, FI, args); - if (generator.needsCopy()) { + if (generator.needsCopy() && !generator.ForceNoopCopy) { llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0); // dst->x @@ -2541,26 +2547,21 @@ auto &byrefInfo = getBlockByrefInfo(&var); - // The alignment we care about for the purposes of uniquing byref - // helpers is the alignment of the actual byref value field. - CharUnits valueAlignment = - byrefInfo.ByrefAlignment.alignmentAtOffset(byrefInfo.FieldOffset); - if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) { const Expr *copyExpr = CGM.getContext().getBlockVarCopyInit(&var).getCopyExpr(); if (!copyExpr && record->hasTrivialDestructor()) return nullptr; - return ::buildByrefHelpers( - CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr)); + return ::buildByrefHelpers(CGM, byrefInfo, + CXXByrefHelpers(byrefInfo, type, copyExpr)); } // If type is a non-trivial C struct type that is non-trivial to // destructly move or destroy, build the copy and dispose helpers. if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct || type.isDestructedType() == QualType::DK_nontrivial_c_struct) - return ::buildByrefHelpers( - CGM, byrefInfo, NonTrivialCStructByrefHelpers(valueAlignment, type)); + return ::buildByrefHelpers(CGM, byrefInfo, + NonTrivialCStructByrefHelpers(byrefInfo, type)); // Otherwise, if we don't have a retainable type, there's nothing to do. // that the runtime does extra copies. @@ -2582,7 +2583,7 @@ // byref routines. case Qualifiers::OCL_Weak: return ::buildByrefHelpers(CGM, byrefInfo, - ARCWeakByrefHelpers(valueAlignment)); + ARCWeakByrefHelpers(byrefInfo)); // ARC __strong __block variables need to be retained. case Qualifiers::OCL_Strong: @@ -2590,13 +2591,13 @@ // transfer possible. if (type->isBlockPointerType()) { return ::buildByrefHelpers(CGM, byrefInfo, - ARCStrongBlockByrefHelpers(valueAlignment)); + ARCStrongBlockByrefHelpers(byrefInfo)); - // Otherwise, we transfer ownership of the retain from the stack - // to the heap. + // Otherwise, we transfer ownership of the retain from the stack + // to the heap. } else { return ::buildByrefHelpers(CGM, byrefInfo, - ARCStrongByrefHelpers(valueAlignment)); + ARCStrongByrefHelpers(byrefInfo)); } } llvm_unreachable("fell out of lifetime switch!"); @@ -2616,7 +2617,7 @@ flags |= BLOCK_FIELD_IS_WEAK; return ::buildByrefHelpers(CGM, byrefInfo, - ObjectByrefHelpers(valueAlignment, flags)); + ObjectByrefHelpers(byrefInfo, flags)); } Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr, @@ -2734,12 +2735,25 @@ info.FieldIndex = types.size() - 1; info.FieldOffset = varOffset; info.ByrefAlignment = std::max(varAlign, getPointerAlign()); + info.ForceNoopCopy = D->needsInitOnHeap(); auto pair = BlockByrefInfos.insert({D, info}); assert(pair.second && "info was inserted recursively?"); return pair.first->second; } +void CodeGenFunction::emitByrefInitOnHeap(Address addr) { + Address out = CreateDefaultAlignTempAlloca(addr.getType(), "initOnHeap.out"); + BlockFieldFlags flags = BLOCK_FIELD_IS_BYREF; + llvm::Value *args[] = {Builder.CreateBitCast(out.getPointer(), VoidPtrTy), + Builder.CreateBitCast(addr.getPointer(), VoidPtrTy), + llvm::ConstantInt::get(Int32Ty, flags.getBitMask())}; + + EmitNounwindRuntimeCall(CGM.getBlockObjectAssign(), args); + + BuildBlockRelease(addr.getPointer(), flags, false); +} + /// Initialize the structural components of a __block variable, i.e. /// everything but the actual object. void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { @@ -2846,6 +2860,9 @@ auto layoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type); storeHeaderField(layoutInfo, getPointerSize(), "byref.layout"); } + + if (emission.NeedsInitOnHeap) + emitByrefInitOnHeap(addr); } void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags, diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -1396,6 +1396,8 @@ bool isEscapingByRef = D.isEscapingByref(); emission.IsEscapingByRef = isEscapingByRef; + emission.IsCapturedByOwnInit = D.isCapturedByOwnInit(); + emission.NeedsInitOnHeap = D.needsInitOnHeap(); CharUnits alignment = getContext().getDeclAlign(&D); @@ -1601,68 +1603,6 @@ return emission; } -static bool isCapturedBy(const VarDecl &, const Expr *); - -/// Determines whether the given __block variable is potentially -/// captured by the given statement. -static bool isCapturedBy(const VarDecl &Var, const Stmt *S) { - if (const Expr *E = dyn_cast(S)) - return isCapturedBy(Var, E); - for (const Stmt *SubStmt : S->children()) - if (isCapturedBy(Var, SubStmt)) - return true; - return false; -} - -/// Determines whether the given __block variable is potentially -/// captured by the given expression. -static bool isCapturedBy(const VarDecl &Var, const Expr *E) { - // Skip the most common kinds of expressions that make - // hierarchy-walking expensive. - E = E->IgnoreParenCasts(); - - if (const BlockExpr *BE = dyn_cast(E)) { - const BlockDecl *Block = BE->getBlockDecl(); - for (const auto &I : Block->captures()) { - if (I.getVariable() == &Var) - return true; - } - - // No need to walk into the subexpressions. - return false; - } - - if (const StmtExpr *SE = dyn_cast(E)) { - const CompoundStmt *CS = SE->getSubStmt(); - for (const auto *BI : CS->body()) - if (const auto *BIE = dyn_cast(BI)) { - if (isCapturedBy(Var, BIE)) - return true; - } - else if (const auto *DS = dyn_cast(BI)) { - // special case declarations - for (const auto *I : DS->decls()) { - if (const auto *VD = dyn_cast((I))) { - const Expr *Init = VD->getInit(); - if (Init && isCapturedBy(Var, Init)) - return true; - } - } - } - else - // FIXME. Make safe assumption assuming arbitrary statements cause capturing. - // Later, provide code to poke into statements for capture analysis. - return true; - return false; - } - - for (const Stmt *SubStmt : E->children()) - if (isCapturedBy(Var, SubStmt)) - return true; - - return false; -} - /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. bool CodeGenFunction::isTrivialInitializer(const Expr *Init) { @@ -1808,8 +1748,7 @@ // Check whether this is a byref variable that's potentially // captured and moved by its own initializer. If so, we'll need to // emit the initializer first, then copy into the variable. - bool capturedByInit = - Init && emission.IsEscapingByRef && isCapturedBy(D, Init); + bool capturedByInit = emission.IsCapturedByOwnInit; bool locIsByrefHeader = !capturedByInit; const Address Loc = @@ -1865,6 +1804,10 @@ initializeWhatIsTechnicallyUninitialized(Loc); LValue lv = MakeAddrLValue(Loc, type); lv.setNonGC(true); + if (emission.NeedsInitOnHeap) { + drillIntoBlockVariable(*this, lv, &D); + capturedByInit = false; // We don't need to drill again. + } return EmitExprAsInit(Init, &D, lv, capturedByInit); } @@ -1914,6 +1857,9 @@ return; } case TEK_Aggregate: + assert( + !capturedByInit && + "Capture-by-initializer should have been handled by NeedsInitOnHeap!"); if (type->isAtomicType()) { EmitAtomicInit(const_cast(init), lvalue); } else { @@ -1922,7 +1868,6 @@ Overlap = AggValueSlot::DoesNotOverlap; else if (auto *FD = dyn_cast(D)) Overlap = getOverlapForFieldInit(FD); - // TODO: how can we delay here if D is captured by its initializer? EmitAggExpr(init, AggValueSlot::forLValue( lvalue, *this, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, @@ -2011,27 +1956,33 @@ // us from jumping into any of these scopes anyway. if (!HaveInsertPoint()) return; - const VarDecl &D = *emission.Variable; + // If we're pre-copying, _Block_object_destroy will handle destruction so we + // don't need to directly destroy the variable. + if (!emission.NeedsInitOnHeap) { + const VarDecl &D = *emission.Variable; - // Check the type for a cleanup. - if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) - emitAutoVarTypeCleanup(emission, dtorKind); + // Check the type for a cleanup. + if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) + emitAutoVarTypeCleanup(emission, dtorKind); - // In GC mode, honor objc_precise_lifetime. - if (getLangOpts().getGC() != LangOptions::NonGC && - D.hasAttr()) { - EHStack.pushCleanup(NormalCleanup, &D); - } + // In GC mode, honor objc_precise_lifetime. + if (getLangOpts().getGC() != LangOptions::NonGC && + D.hasAttr()) { + EHStack.pushCleanup(NormalCleanup, &D); + } - // Handle the cleanup attribute. - if (const CleanupAttr *CA = D.getAttr()) { - const FunctionDecl *FD = CA->getFunctionDecl(); + // Handle the cleanup attribute. + if (const CleanupAttr *CA = D.getAttr()) { + const FunctionDecl *FD = CA->getFunctionDecl(); - llvm::Constant *F = CGM.GetAddrOfFunction(FD); - assert(F && "Could not find function!"); + llvm::Constant *F = CGM.GetAddrOfFunction(FD); + assert(F && "Could not find function!"); - const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD); - EHStack.pushCleanup(NormalAndEHCleanup, F, &Info, &D); + const CGFunctionInfo &Info = + CGM.getTypes().arrangeFunctionDeclaration(FD); + EHStack.pushCleanup(NormalAndEHCleanup, F, &Info, + &D); + } } // If this is a block variable, call _Block_object_destroy diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -2018,6 +2018,7 @@ class AutoVarEmission; + void emitByrefInitOnHeap(Address addr); void emitByrefStructureInit(const AutoVarEmission &emission); /// Enter a cleanup to destroy a __block variable. Note that this @@ -2930,6 +2931,14 @@ /// escaping block. bool IsEscapingByRef; + /// True if the variable is a __block variable that is captured by a block + // referenced in its own initializer. + bool IsCapturedByOwnInit; + + /// True if the variable is a __block variable that needs to be initialized + /// directly on the heap. + bool NeedsInitOnHeap; + /// True if the variable is of aggregate type and has a constant /// initializer. bool IsConstantAggregate; diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h --- a/clang/lib/CodeGen/CodeGenModule.h +++ b/clang/lib/CodeGen/CodeGenModule.h @@ -245,6 +245,8 @@ void reportDiagnostics(DiagnosticsEngine &Diags, StringRef MainFile); }; +class BlockByrefInfo; + /// A pair of helper functions for a __block variable. class BlockByrefHelpers : public llvm::FoldingSetNode { // MSVC requires this type to be complete in order to process this @@ -258,13 +260,16 @@ /// have different helper functions. CharUnits Alignment; - BlockByrefHelpers(CharUnits alignment) - : CopyHelper(nullptr), DisposeHelper(nullptr), Alignment(alignment) {} + /// Emit a trivial copy helper; true if the variable is NeedsInitOnHeap. + bool ForceNoopCopy; + + BlockByrefHelpers(const BlockByrefInfo &byrefInfo); BlockByrefHelpers(const BlockByrefHelpers &) = default; virtual ~BlockByrefHelpers(); void Profile(llvm::FoldingSetNodeID &id) const { id.AddInteger(Alignment.getQuantity()); + id.AddInteger(ForceNoopCopy); profileImpl(id); } virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0; diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp --- a/clang/lib/Sema/Sema.cpp +++ b/clang/lib/Sema/Sema.cpp @@ -1897,6 +1897,13 @@ SourceLocation Loc = VD->getLocation(); Expr *VarRef = new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); + // Note: If needsInitOnHeap is true, the move/copy constructor will not + // actually be called, so in theory we don't have to require that it exists. + // But needsInitOnHeap is based on a conservative estimate of __block + // variables that might be retained in their own initializers; it could be + // refined in the future, which could cause variables to suddenly start + // requiring move constructors again. To avoid the backwards compatibility + // hazard, require it regardless of needsInitOnHeap. ExprResult Result = S.PerformMoveOrCopyInitialization( InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(), VarRef, /*AllowNRVO=*/true); @@ -1915,6 +1922,32 @@ } } +/// Determines whether the given __block variable is potentially +/// captured by a block within the given statement. +static const BlockExpr *blockThatCaptures(const VarDecl &Var, const Stmt *S) { + if (const Expr *E = dyn_cast(S)) { + // Skip the most common kinds of expressions that make + // hierarchy-walking expensive. + S = E->IgnoreParenCasts(); + } + + if (const BlockExpr *BE = dyn_cast(S)) { + const BlockDecl *Block = BE->getBlockDecl(); + for (const auto &I : Block->captures()) { + if (I.getVariable() == &Var) + return BE; + } + + // No need to walk into the subexpressions. + return nullptr; + } + + for (const Stmt *SubStmt : S->children()) + if (const BlockExpr *BE = blockThatCaptures(Var, SubStmt)) + return BE; + return nullptr; +} + static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { // Set the EscapingByref flag of __block variables captured by // escaping blocks. @@ -1927,6 +1960,32 @@ if (BD->doesNotEscape()) continue; VD->setEscapingByref(); + + // Possibly set CapturedByOwnInit, which implies needsInitOnHeap (which + // we warn about) if the variable is a record. + // But only once per variable: + if (VD->isCapturedByOwnInit()) + continue; + + if (Expr *I = VD->getInit()) + if (const BlockExpr *BE = blockThatCaptures(*VD, I)) { + VD->setCapturedByOwnInit(); + + if (VD->needsInitOnHeap()) { + S.Diag(VD->getLocation(), diag::warn_implicit_block_var_alloc) + << VD; + S.Diag(BE->getCaretLocation(), + diag::note_because_captured_by_block); + } + } + + // __block variables might require us to capture a copy-initializer. + // It's currently invalid to ever have a __block variable with an + // array type; should we diagnose that here? + // Regardless, we don't want to ignore array nesting when + // constructing this copy. + if (VD->getType()->isStructureOrClassType()) + checkEscapingByref(VD, S); } // Check whether the captured variable is or contains an object of // non-trivial C union type. @@ -1939,18 +1998,6 @@ Sema::NTCUK_Destruct|Sema::NTCUK_Copy); } } - - for (VarDecl *VD : FSI.ByrefBlockVars) { - // __block variables might require us to capture a copy-initializer. - if (!VD->isEscapingByref()) - continue; - // It's currently invalid to ever have a __block variable with an - // array type; should we diagnose that here? - // Regardless, we don't want to ignore array nesting when - // constructing this copy. - if (VD->getType()->isStructureOrClassType()) - checkEscapingByref(VD, S); - } } /// Pop a function (or block or lambda or captured region) scope from the stack. diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp --- a/clang/lib/Serialization/ASTReaderDecl.cpp +++ b/clang/lib/Serialization/ASTReaderDecl.cpp @@ -1423,6 +1423,7 @@ VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt(); VD->NonParmVarDeclBits.ImplicitParamKind = Record.readInt(); VD->NonParmVarDeclBits.EscapingByref = Record.readInt(); + VD->NonParmVarDeclBits.CapturedByOwnInit = Record.readInt(); } auto VarLinkage = Linkage(Record.readInt()); VD->setCachedLinkage(VarLinkage); diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp --- a/clang/lib/Serialization/ASTWriterDecl.cpp +++ b/clang/lib/Serialization/ASTWriterDecl.cpp @@ -1003,6 +1003,7 @@ else Record.push_back(0); Record.push_back(D->isEscapingByref()); + Record.push_back(D->isCapturedByOwnInit()); } Record.push_back(D->getLinkageInternal()); @@ -2185,13 +2186,14 @@ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isObjCForDecl - Abv->Add(BitCodeAbbrevOp(0)); // isInline - Abv->Add(BitCodeAbbrevOp(0)); // isInlineSpecified - Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr - Abv->Add(BitCodeAbbrevOp(0)); // isInitCapture - Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope - Abv->Add(BitCodeAbbrevOp(0)); // ImplicitParamKind - Abv->Add(BitCodeAbbrevOp(0)); // EscapingByref + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isInline + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isInlineSpecified + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isConstexpr + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isInitCapture + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isPrevDeclInSameScope + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ImplicitParamKind + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // EscapingByref + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CapturedByOwnInit Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // HasConstant* Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum) diff --git a/clang/test/CodeGenCXX/block-capture.cpp b/clang/test/CodeGenCXX/block-capture.cpp --- a/clang/test/CodeGenCXX/block-capture.cpp +++ b/clang/test/CodeGenCXX/block-capture.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -x c++ -std=c++11 -fblocks -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x c++ -std=c++11 -fblocks -Wimplicit-block-var-alloc -verify -emit-llvm %s -o - | \ +// RUN: FileCheck --implicit-check-not "call{{.*}}_ZN3Big" %s // CHECK: %struct.__block_byref_baz = type { i8*, %struct.__block_byref_baz*, i32, i32, i32 } // CHECK: [[baz:%[0-9a-z_]*]] = alloca %struct.__block_byref_baz @@ -8,8 +9,66 @@ // CHECK: [[disposable:%[0-9a-z_]*]] = bitcast %struct.__block_byref_baz* [[baz]] to i8* // CHECK: call void @_Block_object_dispose(i8* [[disposable]] -int main() { +int a() { __block int baz = [&]() { return 0; }(); - ^{ (void)baz; }; + (void)^{ + (void)baz; + }; return 0; } + +class Big { +public: + Big(const Big &); + ~Big(); + +private: + Big(); + int s[100]; +}; +Big get_Big(Big * (^)()); + +// CHECK: define void @_Z17implicit_copy_bigv +// CHECK: call void @_Block_object_assign(i8* {{.*}}, i8* {{.*}}, i32 8) +// CHECK: call void @_Block_object_dispose(i8* {{.*}}, i32 8) +// CHECK: %bar1 = getelementptr inbounds %struct.__block_byref_bar, %struct.__block_byref_bar* %{{.*}}, i32 0, i32 6 +// CHECK: call void @_Z7get_BigU13block_pointerFP3BigvE(%class.Big* sret(%class.Big) align 4 %bar1 +// CHECK: call void @_Block_object_dispose + +// CHECK: define internal void @__Block_byref_object_copy_ +// (no call to copy constructor) + +// CHECK: define internal void @__Block_byref_object_dispose_ +// CHECK: call void @_ZN3BigD1Ev + +void implicit_copy_big() { + // expected-warning@+1{{variable 'bar' will be initialized in a heap allocation}} + __block Big bar = + ({ // Make sure this works with statement expressions. + get_Big( + // expected-note@+1{{because it is captured by a block used in its own initializer}} + ^{ + return &bar; + }); + }); +} + +struct Small { + int s[2]; +}; +extern Small get_Small(_Atomic(Small) * (^)()); + +// CHECK: %bay1 = getelementptr inbounds %struct.__block_byref_bay, %struct.__block_byref_bay* %{{.*}}, i32 0, i32 4 +// CHECK: [[call:%[0-9a-z_\.]*]] = call i64 @_Z9get_SmallU13block_pointerFPU7_Atomic5SmallvE(%struct.Small* ()* %{{.*}}) +// CHECK: [[dive:%[0-9a-z_\.]*]] = getelementptr inbounds %struct.Small, %struct.Small* %bay1, i32 0, i32 0 +// CHECK: [[cast:%[0-9]*]] = bitcast [2 x i32]* [[dive]] to i64* +// CHECK: store i64 [[call]], i64* [[cast]], align 8 + +void implicit_copy_small_atomic() { + // expected-warning@+1{{variable 'bay' will be initialized in a heap allocation}} + __block _Atomic(Small) bay = get_Small( + // expected-note@+1{{because it is captured by a block used in its own initializer}} + ^{ + return &bay; + }); +}