diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h --- a/clang/lib/CodeGen/CGBlocks.h +++ b/clang/lib/CodeGen/CGBlocks.h @@ -257,10 +257,6 @@ // This could be zero if no forced alignment is required. CharUnits BlockHeaderForcedGapSize; - /// An instruction which dominates the full-expression that the - /// block is inside. - llvm::Instruction *DominatingIP; - /// The next block in the block-info chain. Invalid if this block /// info is not part of the CGF's block-info chain, which is true /// if it corresponds to a global block or a block whose expression diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -36,7 +36,7 @@ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false), HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false), CapturesNonExternalType(false), LocalAddress(Address::invalid()), - StructureType(nullptr), Block(block), DominatingIP(nullptr) { + StructureType(nullptr), Block(block) { // Skip asm prefix, if any. 'name' is usually taken directly from // the mangled name of the enclosing function. @@ -775,151 +775,23 @@ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true); } -/// Enter the scope of a block. This should be run at the entrance to -/// a full-expression so that the block's cleanups are pushed at the -/// right place in the stack. -static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) { - assert(CGF.HaveInsertPoint()); - - // Allocate the block info and place it at the head of the list. - CGBlockInfo &blockInfo = - *new CGBlockInfo(block, CGF.CurFn->getName()); - blockInfo.NextBlockInfo = CGF.FirstBlockInfo; - CGF.FirstBlockInfo = &blockInfo; - - // Compute information about the layout, etc., of this block, - // pushing cleanups as necessary. - computeBlockInfo(CGF.CGM, &CGF, blockInfo); - - // Nothing else to do if it can be global. - if (blockInfo.CanBeGlobal) return; - - // Make the allocation for the block. - blockInfo.LocalAddress = CGF.CreateTempAlloca(blockInfo.StructureType, - blockInfo.BlockAlign, "block"); - - // If there are cleanups to emit, enter them (but inactive). - if (!blockInfo.NeedsCopyDispose) return; - - // Walk through the captures (in order) and find the ones not - // captured by constant. - for (const auto &CI : block->captures()) { - // Ignore __block captures; there's nothing special in the - // on-stack block that we need to do for them. - if (CI.isByRef()) continue; - - // Ignore variables that are constant-captured. - const VarDecl *variable = CI.getVariable(); - CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); - if (capture.isConstant()) continue; - - // Ignore objects that aren't destructed. - QualType VT = getCaptureFieldType(CGF, CI); - QualType::DestructionKind dtorKind = VT.isDestructedType(); - if (dtorKind == QualType::DK_none) continue; - - CodeGenFunction::Destroyer *destroyer; - - // Block captures count as local values and have imprecise semantics. - // They also can't be arrays, so need to worry about that. - // - // For const-qualified captures, emit clang.arc.use to ensure the captured - // object doesn't get released while we are still depending on its validity - // within the block. - if (VT.isConstQualified() && - VT.getObjCLifetime() == Qualifiers::OCL_Strong && - CGF.CGM.getCodeGenOpts().OptimizationLevel != 0) { - assert(CGF.CGM.getLangOpts().ObjCAutoRefCount && - "expected ObjC ARC to be enabled"); - destroyer = CodeGenFunction::emitARCIntrinsicUse; - } else if (dtorKind == QualType::DK_objc_strong_lifetime) { - destroyer = CodeGenFunction::destroyARCStrongImprecise; - } else { - destroyer = CGF.getDestroyer(dtorKind); - } - - // GEP down to the address. - Address addr = - CGF.Builder.CreateStructGEP(blockInfo.LocalAddress, capture.getIndex()); - - // We can use that GEP as the dominating IP. - if (!blockInfo.DominatingIP) - blockInfo.DominatingIP = cast(addr.getPointer()); - - CleanupKind cleanupKind = InactiveNormalCleanup; - bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind); - if (useArrayEHCleanup) - cleanupKind = InactiveNormalAndEHCleanup; - - CGF.pushDestroy(cleanupKind, addr, VT, - destroyer, useArrayEHCleanup); - - // Remember where that cleanup was. - capture.setCleanup(CGF.EHStack.stable_begin()); - } -} - -/// Enter a full-expression with a non-trivial number of objects to -/// clean up. -void CodeGenFunction::enterNonTrivialFullExpression(const FullExpr *E) { - if (const auto EWC = dyn_cast(E)) { - assert(EWC->getNumObjects() != 0); - for (const ExprWithCleanups::CleanupObject &C : EWC->getObjects()) - if (auto *BD = C.dyn_cast()) - enterBlockScope(*this, BD); - } -} - -/// Find the layout for the given block in a linked list and remove it. -static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head, - const BlockDecl *block) { - while (true) { - assert(head && *head); - CGBlockInfo *cur = *head; - - // If this is the block we're looking for, splice it out of the list. - if (cur->getBlockDecl() == block) { - *head = cur->NextBlockInfo; - return cur; - } - - head = &cur->NextBlockInfo; - } -} - -/// Destroy a chain of block layouts. -void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) { - assert(head && "destroying an empty chain"); - do { - CGBlockInfo *cur = head; - head = cur->NextBlockInfo; - delete cur; - } while (head != nullptr); -} - /// Emit a block literal expression in the current function. llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) { // If the block has no captures, we won't have a pre-computed // layout for it. - if (!blockExpr->getBlockDecl()->hasCaptures()) { + if (!blockExpr->getBlockDecl()->hasCaptures()) // The block literal is emitted as a global variable, and the block invoke // function has to be extracted from its initializer. - if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr)) { + if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr)) return Block; - } - CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName()); - computeBlockInfo(CGM, this, blockInfo); - blockInfo.BlockExpression = blockExpr; - return EmitBlockLiteral(blockInfo); - } - - // Find the block info for this block and take ownership of it. - std::unique_ptr blockInfo; - blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo, - blockExpr->getBlockDecl())); - blockInfo->BlockExpression = blockExpr; - return EmitBlockLiteral(*blockInfo); + CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName()); + computeBlockInfo(CGM, this, blockInfo); + blockInfo.BlockExpression = blockExpr; + if (!blockInfo.CanBeGlobal) + blockInfo.LocalAddress = CreateTempAlloca(blockInfo.StructureType, + blockInfo.BlockAlign, "block"); + return EmitBlockLiteral(blockInfo); } llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { @@ -1161,12 +1033,64 @@ /*captured by init*/ false); } - // Activate the cleanup if layout pushed one. - if (!CI.isByRef()) { - EHScopeStack::stable_iterator cleanup = capture.getCleanup(); - if (cleanup.isValid()) - ActivateCleanupBlock(cleanup, blockInfo.DominatingIP); + // Push a cleanup for the capture if necessary. + if (!blockInfo.NeedsCopyDispose) + continue; + + // Ignore __block captures; there's nothing special in the on-stack block + // that we need to do for them. + if (CI.isByRef()) + continue; + + // Ignore objects that aren't destructed. + QualType::DestructionKind dtorKind = type.isDestructedType(); + if (dtorKind == QualType::DK_none) + continue; + + CodeGenFunction::Destroyer *destroyer; + + // Block captures count as local values and have imprecise semantics. + // They also can't be arrays, so need to worry about that. + // + // For const-qualified captures, emit clang.arc.use to ensure the captured + // object doesn't get released while we are still depending on its validity + // within the block. + if (type.isConstQualified() && + type.getObjCLifetime() == Qualifiers::OCL_Strong && + CGM.getCodeGenOpts().OptimizationLevel != 0) { + assert(CGM.getLangOpts().ObjCAutoRefCount && + "expected ObjC ARC to be enabled"); + destroyer = emitARCIntrinsicUse; + } else if (dtorKind == QualType::DK_objc_strong_lifetime) { + destroyer = destroyARCStrongImprecise; + } else { + destroyer = getDestroyer(dtorKind); } + + CleanupKind cleanupKind = NormalCleanup; + bool useArrayEHCleanup = needsEHCleanup(dtorKind); + if (useArrayEHCleanup) + cleanupKind = NormalAndEHCleanup; + + // Extend the lifetime of the capture to the end of the scope enclosing the + // block expression except when the block decl is in the list of RetExpr's + // cleanup objects, in which case its lifetime ends after the full + // expression. + auto IsBlockDeclInRetExpr = [&]() { + auto *EWC = llvm::dyn_cast_or_null(RetExpr); + if (EWC) + for (auto &C : EWC->getObjects()) + if (auto *BD = C.dyn_cast()) + if (BD == blockDecl) + return true; + return false; + }; + + if (IsBlockDeclInRetExpr()) + pushDestroy(cleanupKind, blockField, type, destroyer, useArrayEHCleanup); + else + pushLifetimeExtendedDestroy(cleanupKind, blockField, type, destroyer, + useArrayEHCleanup); } // Cast to the converted block-pointer type, which happens (somewhat diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h --- a/clang/lib/CodeGen/CGCleanup.h +++ b/clang/lib/CodeGen/CGCleanup.h @@ -284,8 +284,8 @@ return sizeof(EHCleanupScope) + CleanupBits.CleanupSize; } - EHCleanupScope(bool isNormal, bool isEH, bool isActive, - unsigned cleanupSize, unsigned fixupDepth, + EHCleanupScope(bool isNormal, bool isEH, unsigned cleanupSize, + unsigned fixupDepth, EHScopeStack::stable_iterator enclosingNormal, EHScopeStack::stable_iterator enclosingEH) : EHScope(EHScope::Cleanup, enclosingEH), @@ -293,7 +293,7 @@ ActiveFlag(nullptr), ExtInfo(nullptr), FixupDepth(fixupDepth) { CleanupBits.IsNormalCleanup = isNormal; CleanupBits.IsEHCleanup = isEH; - CleanupBits.IsActive = isActive; + CleanupBits.IsActive = true; CleanupBits.IsLifetimeMarker = false; CleanupBits.TestFlagInNormalCleanup = false; CleanupBits.TestFlagInEHCleanup = false; diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp --- a/clang/lib/CodeGen/CGCleanup.cpp +++ b/clang/lib/CodeGen/CGCleanup.cpp @@ -179,12 +179,10 @@ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); bool IsNormalCleanup = Kind & NormalCleanup; bool IsEHCleanup = Kind & EHCleanup; - bool IsActive = !(Kind & InactiveCleanup); bool IsLifetimeMarker = Kind & LifetimeMarker; EHCleanupScope *Scope = new (Buffer) EHCleanupScope(IsNormalCleanup, IsEHCleanup, - IsActive, Size, BranchFixups.size(), InnermostNormalCleanup, diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -762,10 +762,9 @@ // If we're emitting a value with lifetime, we have to do the // initialization *before* we leave the cleanup scopes. - if (const FullExpr *fe = dyn_cast(init)) { - enterFullExpression(fe); + if (const FullExpr *fe = dyn_cast(init)) init = fe->getSubExpr(); - } + CodeGenFunction::RunCleanupsScope Scope(*this); // We have to maintain the illusion that the variable is diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1330,7 +1330,6 @@ case Expr::ExprWithCleanupsClass: { const auto *cleanups = cast(E); - enterFullExpression(cleanups); RunCleanupsScope Scope(*this); LValue LV = EmitLValue(cleanups->getSubExpr()); if (LV.isSimple()) { diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -1349,7 +1349,6 @@ } void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { - CGF.enterFullExpression(E); CodeGenFunction::RunCleanupsScope cleanups(CGF); Visit(E->getSubExpr()); } diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp --- a/clang/lib/CodeGen/CGExprComplex.cpp +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -222,7 +222,6 @@ return Visit(DIE->getExpr()); } ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) { - CGF.enterFullExpression(E); CodeGenFunction::RunCleanupsScope Scope(CGF); ComplexPairTy Vals = Visit(E->getSubExpr()); // Defend against dominance problems caused by jumps out of expression diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2342,7 +2342,6 @@ } Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { - CGF.enterFullExpression(E); CodeGenFunction::RunCleanupsScope Scope(CGF); Value *V = Visit(E->getSubExpr()); // Defend against dominance problems caused by jumps out of expression diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp --- a/clang/lib/CodeGen/CGObjC.cpp +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -3256,7 +3256,6 @@ llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { // The retain needs to happen within the full-expression. if (const ExprWithCleanups *cleanups = dyn_cast(e)) { - enterFullExpression(cleanups); RunCleanupsScope scope(*this); return EmitARCRetainScalarExpr(cleanups->getSubExpr()); } @@ -3272,7 +3271,6 @@ CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { // The retain needs to happen within the full-expression. if (const ExprWithCleanups *cleanups = dyn_cast(e)) { - enterFullExpression(cleanups); RunCleanupsScope scope(*this); return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); } @@ -3383,7 +3381,6 @@ llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { // Look through full-expressions. if (const ExprWithCleanups *cleanups = dyn_cast(e)) { - enterFullExpression(cleanups); RunCleanupsScope scope(*this); return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); } diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -1070,6 +1070,19 @@ EmitBranchThroughCleanup(ReturnBlock); } +namespace { +// RAII struct used to save and restore a return statment's result expression. +struct SaveRetExprRAII { + SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF) + : OldRetExpr(CGF.RetExpr), CGF(CGF) { + CGF.RetExpr = RetExpr; + } + ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; } + const Expr *OldRetExpr; + CodeGenFunction &CGF; +}; +} // namespace + /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand /// if the function returns void, or may be missing one if the function returns /// non-void. Fun stuff :). @@ -1095,15 +1108,19 @@ // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); - // Treat block literals in a return expression as if they appeared - // in their own scope. This permits a small, easily-implemented - // exception to our over-conservative rules about not jumping to - // statements following block literals with non-trivial cleanups. + // Record the result expression of the return statement. The recorded + // expression is used to determine whether a block capture's lifetime should + // end at the end of the full expression as opposed to the end of the scope + // enclosing the block expression. + // + // This permits a small, easily-implemented exception to our over-conservative + // rules about not jumping to statements following block literals with + // non-trivial cleanups. + SaveRetExprRAII SaveRetExpr(RV, *this); + RunCleanupsScope cleanupScope(*this); - if (const FullExpr *fe = dyn_cast_or_null(RV)) { - enterFullExpression(fe); + if (const FullExpr *fe = dyn_cast_or_null(RV)) RV = fe->getSubExpr(); - } // FIXME: Clean this up by using an LValue for ReturnTemp, // EmitStoreThroughLValue, and EmitAnyExpr. diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -5259,15 +5259,6 @@ } const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); - if (const auto *FE = dyn_cast(CS)) - enterFullExpression(FE); - // Processing for statements under 'atomic capture'. - if (const auto *Compound = dyn_cast(CS)) { - for (const Stmt *C : Compound->body()) { - if (const auto *FE = dyn_cast(C)) - enterFullExpression(FE); - } - } auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF, PrePostActionTy &) { diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -444,6 +444,10 @@ /// This is invalid if sret is not in use. Address ReturnValuePointer = Address::invalid(); + /// If a return statement is being visited, this holds the return statment's + /// result expression. + const Expr *RetExpr = nullptr; + /// Return true if a label was seen in the current scope. bool hasLabelBeenSeenInCurrentScope() const { if (CurLexicalScope) @@ -648,9 +652,6 @@ unsigned NextCleanupDestIndex = 1; - /// FirstBlockInfo - The head of a singly-linked-list of block layouts. - CGBlockInfo *FirstBlockInfo = nullptr; - /// EHResumeBlock - Unified block containing a call to llvm.eh.resume. llvm::BasicBlock *EHResumeBlock = nullptr; @@ -1925,7 +1926,6 @@ /// information about the block, including the block invoke function, the /// captured variables, etc. llvm::Value *EmitBlockLiteral(const BlockExpr *); - static void destroyBlockInfos(CGBlockInfo *info); llvm::Function *GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info, @@ -4227,14 +4227,6 @@ void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp); - void enterFullExpression(const FullExpr *E) { - if (const auto *EWC = dyn_cast(E)) - if (EWC->getNumObjects() == 0) - return; - enterNonTrivialFullExpression(E); - } - void enterNonTrivialFullExpression(const FullExpr *E); - void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true); RValue EmitAtomicExpr(AtomicExpr *E); diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -79,12 +79,6 @@ CodeGenFunction::~CodeGenFunction() { assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); - // If there are any unclaimed block infos, go ahead and destroy them - // now. This can happen if IR-gen gets clever and skips evaluating - // something. - if (FirstBlockInfo) - destroyBlockInfos(FirstBlockInfo); - if (getLangOpts().OpenMP && CurFn) CGM.getOpenMPRuntime().functionFinished(*this); diff --git a/clang/lib/CodeGen/EHScopeStack.h b/clang/lib/CodeGen/EHScopeStack.h --- a/clang/lib/CodeGen/EHScopeStack.h +++ b/clang/lib/CodeGen/EHScopeStack.h @@ -85,11 +85,6 @@ NormalAndEHCleanup = EHCleanup | NormalCleanup, - InactiveCleanup = 0x4, - InactiveEHCleanup = EHCleanup | InactiveCleanup, - InactiveNormalCleanup = NormalCleanup | InactiveCleanup, - InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup, - LifetimeMarker = 0x8, NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup, }; diff --git a/clang/test/CodeGenCXX/blocks-cxx11.cpp b/clang/test/CodeGenCXX/blocks-cxx11.cpp --- a/clang/test/CodeGenCXX/blocks-cxx11.cpp +++ b/clang/test/CodeGenCXX/blocks-cxx11.cpp @@ -103,12 +103,11 @@ // CHECK-LABEL: define internal void @"_ZZN20test_block_in_lambda4testENS_1AEENK3$_0clEv"( // CHECK: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], align 8 // CHECK: [[THIS:%.*]] = load [[LAMBDA_T:%.*]]*, [[LAMBDA_T:%.*]]** - // CHECK: [[TO_DESTROY:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 - // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 + // CHECK: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[LAMBDA_T]], [[LAMBDA_T]]* [[THIS]], i32 0, i32 0 - // CHECK-NEXT: call void @_ZN20test_block_in_lambda1AC1ERKS0_({{.*}}* [[T0]], {{.*}}* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) [[T1]]) + // CHECK-NEXT: call void @_ZN20test_block_in_lambda1AC1ERKS0_({{.*}}* [[BLOCK_CAPTURED]], {{.*}}* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) [[T1]]) // CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to void ()* // CHECK-NEXT: call void @_ZN20test_block_in_lambda9takeBlockEU13block_pointerFvvE(void ()* [[T0]]) - // CHECK-NEXT: call void @_ZN20test_block_in_lambda1AD1Ev({{.*}}* [[TO_DESTROY]]) + // CHECK-NEXT: call void @_ZN20test_block_in_lambda1AD1Ev({{.*}}* [[BLOCK_CAPTURED]]) // CHECK-NEXT: ret void } diff --git a/clang/test/CodeGenCXX/blocks.cpp b/clang/test/CodeGenCXX/blocks.cpp --- a/clang/test/CodeGenCXX/blocks.cpp +++ b/clang/test/CodeGenCXX/blocks.cpp @@ -156,10 +156,10 @@ // CHECK-NEXT: [[B:%.*]] = alloca void ()*, align 8 // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:.*]], align 8 // CHECK-NEXT: [[CLEANUP_ACTIVE:%.*]] = alloca i1 + // CHECK-NEXT: [[COND_CLEANUP_SAVE:%.*]] = alloca [[A]]*, align 8 // CHECK-NEXT: [[T0:%.*]] = zext i1 // CHECK-NEXT: store i8 [[T0]], i8* [[COND]], align 1 // CHECK-NEXT: call void @_ZN5test51AC1Ev([[A]]* [[X]]) - // CHECK-NEXT: [[CLEANUP_ADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T0:%.*]] = load i8, i8* [[COND]], align 1 // CHECK-NEXT: [[T1:%.*]] = trunc i8 [[T0]] to i1 // CHECK-NEXT: store i1 false, i1* [[CLEANUP_ACTIVE]] @@ -169,6 +169,7 @@ // CHECK: [[CAPTURE:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: call void @_ZN5test51AC1ERKS0_([[A]]* [[CAPTURE]], [[A]]* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) [[X]]) // CHECK-NEXT: store i1 true, i1* [[CLEANUP_ACTIVE]] + // CHECK-NEXT: store [[A]]* [[CAPTURE]], [[A]]** [[COND_CLEANUP_SAVE]], align 8 // CHECK-NEXT: bitcast [[BLOCK_T]]* [[BLOCK]] to void ()* // CHECK-NEXT: br label // CHECK: br label @@ -178,7 +179,8 @@ // CHECK-NEXT: call void @_ZN5test511doWithBlockEU13block_pointerFvvE( // CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[CLEANUP_ACTIVE]] // CHECK-NEXT: br i1 [[T0]] - // CHECK: call void @_ZN5test51AD1Ev([[A]]* [[CLEANUP_ADDR]]) + // CHECK: [[T3:%.*]] = load [[A]]*, [[A]]** [[COND_CLEANUP_SAVE]], align 8 + // CHECK-NEXT: call void @_ZN5test51AD1Ev([[A]]* [[T3]]) // CHECK-NEXT: br label // CHECK: call void @_ZN5test51AD1Ev([[A]]* [[X]]) // CHECK-NEXT: ret void diff --git a/clang/test/CodeGenObjC/arc-blocks.m b/clang/test/CodeGenObjC/arc-blocks.m --- a/clang/test/CodeGenObjC/arc-blocks.m +++ b/clang/test/CodeGenObjC/arc-blocks.m @@ -34,14 +34,13 @@ // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-NEXT: [[PARM:%.*]] = call i8* @llvm.objc.retain(i8* {{%.*}}) // CHECK-NEXT: store i8* [[PARM]], i8** [[X]] -// CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], // CHECK-NEXT: [[T1:%.*]] = call i8* @llvm.objc.retain(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[SLOT]], // CHECK-NEXT: bitcast // CHECK-NEXT: call void @test2_helper( -// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOTREL]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[SLOT]] // CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]]) [[NUW]], !clang.imprecise_release // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]] // CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]]) [[NUW]], !clang.imprecise_release @@ -296,13 +295,12 @@ // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK: store // CHECK-NEXT: store -// CHECK: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T1:%.*]] = load [[TEST8]]*, [[TEST8]]** [[SELF]], // CHECK-NEXT: store %0* [[T1]], %0** [[T0]] // CHECK-NEXT: bitcast [[BLOCK_T]]* [[BLOCK]] to // CHECK: call void @test8_helper( -// CHECK-NEXT: [[T2:%.*]] = load [[TEST8]]*, [[TEST8]]** [[D0]] +// CHECK-NEXT: [[T2:%.*]] = load [[TEST8]]*, [[TEST8]]** [[T0]] // CHECK-NEXT: call void (...) @llvm.objc.clang.arc.use([[TEST8]]* [[T2]]) // CHECK: ret void @@ -498,11 +496,11 @@ // CHECK-NEXT: [[B:%.*]] = alloca void ()*, align 8 // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:.*]], align 8 // CHECK-NEXT: [[CLEANUP_ACTIVE:%.*]] = alloca i1 + // CHECK-NEXT: [[COND_CLEANUP_SAVE:%.*]] = alloca i8**, // CHECK-NEXT: [[T0:%.*]] = call i8* @llvm.objc.retain(i8* {{%.*}}) // CHECK-NEXT: store i8* [[T0]], i8** [[X]], align 8 // CHECK-NEXT: [[BPTR1:%.*]] = bitcast void ()** [[B]] to i8* // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 8, i8* [[BPTR1]]) - // CHECK-NEXT: [[CLEANUP_ADDR:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]], align 8 // CHECK-NEXT: [[T1:%.*]] = icmp ne i8* [[T0]], null // CHECK-NEXT: store i1 false, i1* [[CLEANUP_ACTIVE]] @@ -514,6 +512,7 @@ // CHECK-NEXT: [[T1:%.*]] = call i8* @llvm.objc.retain(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[CAPTURE]], align 8 // CHECK-NEXT: store i1 true, i1* [[CLEANUP_ACTIVE]] + // CHECK-NEXT: store i8** [[CAPTURE]], i8*** [[COND_CLEANUP_SAVE]], align 8 // CHECK-NEXT: bitcast [[BLOCK_T]]* [[BLOCK]] to void ()* // CHECK-NEXT: br label // CHECK: br label @@ -530,7 +529,8 @@ // CHECK-NEXT: [[T0:%.*]] = load i1, i1* [[CLEANUP_ACTIVE]] // CHECK-NEXT: br i1 [[T0]] - // CHECK: [[T0:%.*]] = load i8*, i8** [[CLEANUP_ADDR]] + // CHECK: [[V12:%.*]] = load i8**, i8*** [[COND_CLEANUP_SAVE]], align 8 + // CHECK: [[T0:%.*]] = load i8*, i8** [[V12]] // CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]]) // CHECK-NEXT: br label @@ -562,7 +562,6 @@ // CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-NEXT: [[BLKVARPTR1:%.*]] = bitcast void ()** [[BLKVAR]] to i8* // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 8, i8* [[BLKVARPTR1]]) - // CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: store void ()* null, void ()** [[BLKVAR]], align 8 } @@ -588,37 +587,31 @@ // CHECK-NEXT: store i8* [[T0]], i8** [[SELF]], align // CHECK-NOT: objc_retain // CHECK-NOT: objc_release -// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B0]], i32 0, i32 5 -// CHECK-NOT: objc_retain -// CHECK-NOT: objc_release -// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B0]], i32 0, i32 5 +// CHECK: [[CAPTURED:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B0]], i32 0, i32 5 // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[SELF]], align // CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retain(i8* [[T1]]) -// CHECK-NEXT: store i8* [[T2]], i8** [[T0]], +// CHECK-NEXT: store i8* [[T2]], i8** [[CAPTURED]], // CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B0]] to i8* ()* // CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retainBlock(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()* // CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]] -// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[DESTROY]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[CAPTURED]] // CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]]) // CHECK-NEXT: store i32 // CHECK-NEXT: br label // CHECK-NOT: objc_retain // CHECK-NOT: objc_release -// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B1]], i32 0, i32 5 -// CHECK-NOT: objc_retain -// CHECK-NOT: objc_release -// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B1]], i32 0, i32 5 +// CHECK: [[CAPTURED:%.*]] = getelementptr inbounds [[BLOCK]], [[BLOCK]]* [[B1]], i32 0, i32 5 // CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[SELF]], align // CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retain(i8* [[T1]]) -// CHECK-NEXT: store i8* [[T2]], i8** [[T0]], +// CHECK-NEXT: store i8* [[T2]], i8** [[CAPTURED]], // CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B1]] to i8* ()* // CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8* // CHECK-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retainBlock(i8* [[T1]]) // CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()* // CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]] -// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[DESTROY]] +// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[CAPTURED]] // CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]]) // CHECK-NEXT: store i32 // CHECK-NEXT: br label @@ -629,7 +622,6 @@ // CHECK-UNOPT-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-UNOPT-NEXT: store i8* null, i8** [[X]] // CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[X]], -// CHECK-UNOPT-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-UNOPT: %[[BLOCK_DESCRIPTOR:.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 4 // CHECK-UNOPT: store %[[STRUCT_BLOCK_DESCRIPTOR]]* bitcast ({ i64, i64, i8*, i8*, i8*, i64 }* @[[BLOCK_DESCRIPTOR_TMP44]] to %[[STRUCT_BLOCK_DESCRIPTOR]]*), %[[STRUCT_BLOCK_DESCRIPTOR]]** %[[BLOCK_DESCRIPTOR]], align 8 // CHECK-UNOPT: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 @@ -638,7 +630,7 @@ // CHECK-UNOPT-NEXT: store i8* [[T1]], i8** [[SLOT]], // CHECK-UNOPT-NEXT: bitcast // CHECK-UNOPT-NEXT: call void @test18_helper( -// CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[SLOTREL]], i8* null) [[NUW:#[0-9]+]] +// CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[SLOT]], i8* null) [[NUW:#[0-9]+]] // CHECK-UNOPT-NEXT: call void @llvm.objc.storeStrong(i8** [[X]], i8* null) [[NUW]] // CHECK-UNOPT-NEXT: ret void extern void test18_helper(id (^)(void)); @@ -672,7 +664,6 @@ // CHECK-NEXT: store void ()* [[T2]], void ()** [[B]] // Block setup. We skip most of this. Note the bare retain. -// CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK: %[[BLOCK_DESCRIPTOR:.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 4 // CHECK: store %[[STRUCT_BLOCK_DESCRIPTOR]]* bitcast ({ i64, i64, i8*, i8*, i8*, i64 }* @[[BLOCK_DESCRIPTOR_TMP48]] to %[[STRUCT_BLOCK_DESCRIPTOR]]*), %[[STRUCT_BLOCK_DESCRIPTOR]]** %[[BLOCK_DESCRIPTOR]], align 8 // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 @@ -688,7 +679,7 @@ test19_sink(^(int x) { b(); }); // Block teardown. -// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[SLOTREL]] +// CHECK-NEXT: [[T0:%.*]] = load void ()*, void ()** [[SLOT]] // CHECK-NEXT: [[T1:%.*]] = bitcast void ()* [[T0]] to i8* // CHECK-NEXT: call void @llvm.objc.release(i8* [[T1]]) @@ -705,11 +696,10 @@ // CHECK-NEXT: [[BLOCK:%.*]] = alloca <[[BLOCKTY:.*]]> // CHECK-NEXT: [[RETAINEDX:%.*]] = call i8* @llvm.objc.retain(i8* %{{.*}}) // CHECK-NEXT: store i8* [[RETAINEDX]], i8** [[XADDR]] -// CHECK-NEXT: [[CAPTUREFIELD:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5 // CHECK: [[BLOCKCAPTURED:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5 // CHECK: [[CAPTURED:%.*]] = load i8*, i8** [[XADDR]] // CHECK: store i8* [[CAPTURED]], i8** [[BLOCKCAPTURED]] -// CHECK: [[CAPTURE:%.*]] = load i8*, i8** [[CAPTUREFIELD]] +// CHECK: [[CAPTURE:%.*]] = load i8*, i8** [[BLOCKCAPTURED]] // CHECK-NEXT: call void (...) @llvm.objc.clang.arc.use(i8* [[CAPTURE]]) // CHECK-NEXT: [[X:%.*]] = load i8*, i8** [[XADDR]] // CHECK-NEXT: call void @llvm.objc.release(i8* [[X]]) @@ -718,12 +708,11 @@ // CHECK-UNOPT-LABEL: define void @test20( // CHECK-UNOPT: [[XADDR:%.*]] = alloca i8* // CHECK-UNOPT-NEXT: [[BLOCK:%.*]] = alloca <[[BLOCKTY:.*]]> -// CHECK-UNOPT: [[CAPTUREFIELD:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5 // CHECK-UNOPT: [[BLOCKCAPTURED:%.*]] = getelementptr inbounds <[[BLOCKTY]]>, <[[BLOCKTY]]>* [[BLOCK]], i32 0, i32 5 // CHECK-UNOPT: [[CAPTURED:%.*]] = load i8*, i8** [[XADDR]] // CHECK-UNOPT: [[RETAINED:%.*]] = call i8* @llvm.objc.retain(i8* [[CAPTURED]]) // CHECK-UNOPT: store i8* [[RETAINED]], i8** [[BLOCKCAPTURED]] -// CHECK-UNOPT: call void @llvm.objc.storeStrong(i8** [[CAPTUREFIELD]], i8* null) +// CHECK-UNOPT: call void @llvm.objc.storeStrong(i8** [[BLOCKCAPTURED]], i8* null) void test20_callee(void (^)()); void test20(const id x) { @@ -740,5 +729,23 @@ test21_callee(1, ^{ (void)x; }); } +// The lifetime of 'x', which is captured by the block in the statement +// expression, should be extended. + +// CHECK-COMMON-LABEL: define i8* @test22( +// CHECK-COMMON: %[[BLOCK_CAPTURED:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %{{.*}}*, i8* }>, <{ i8*, i32, i32, i8*, %{{.*}}*, i8* }>* %{{.*}}, i32 0, i32 5 +// CHECK-COMMON: %[[V3:.*]] = call i8* @llvm.objc.retain(i8* %{{.*}}) +// CHECK-COMMON: store i8* %[[V3]], i8** %[[BLOCK_CAPTURED]], align 8 +// CHECK-COMMON: call void @test22_1() +// CHECK-UNOPT: call void @llvm.objc.storeStrong(i8** %[[BLOCK_CAPTURED]], i8* null) +// CHECK: %[[V15:.*]] = load i8*, i8** %[[BLOCK_CAPTURED]], align 8 +// CHECK: call void @llvm.objc.release(i8* %[[V15]]) + +id test22(int c, id x) { + extern id test22_0(void); + extern void test22_1(void); + return c ? test22_0() : ({ id (^b)(void) = ^{ return x; }; test22_1(); b(); }); +} + // CHECK: attributes [[NUW]] = { nounwind } // CHECK-UNOPT: attributes [[NUW]] = { nounwind } diff --git a/clang/test/CodeGenObjC/arc-foreach.m b/clang/test/CodeGenObjC/arc-foreach.m --- a/clang/test/CodeGenObjC/arc-foreach.m +++ b/clang/test/CodeGenObjC/arc-foreach.m @@ -65,14 +65,13 @@ // CHECK-LP64-NEXT: [[T3:%.*]] = load i8*, i8** [[T2]] // CHECK-LP64-NEXT: store i8* [[T3]], i8** [[X]] -// CHECK-LP64: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 -// CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 +// CHECK-LP64: [[CAPTURED:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-LP64-NEXT: [[T1:%.*]] = load i8*, i8** [[X]] // CHECK-LP64-NEXT: [[T2:%.*]] = call i8* @llvm.objc.retain(i8* [[T1]]) -// CHECK-LP64-NEXT: store i8* [[T2]], i8** [[T0]] +// CHECK-LP64-NEXT: store i8* [[T2]], i8** [[CAPTURED]] // CHECK-LP64-NEXT: [[BLOCK1:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] // CHECK-LP64-NEXT: call void @use_block(void ()* [[BLOCK1]]) -// CHECK-LP64-NEXT: call void @llvm.objc.storeStrong(i8** [[D0]], i8* null) +// CHECK-LP64-NEXT: call void @llvm.objc.storeStrong(i8** [[CAPTURED]], i8* null) // CHECK-LP64-NOT: call void (...) @llvm.objc.clang.arc.use( // CHECK-LP64-OPT: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i64 0, i32 5 @@ -118,12 +117,11 @@ // CHECK-LP64-NEXT: [[T3:%.*]] = load i8*, i8** [[T2]] // CHECK-LP64-NEXT: call i8* @llvm.objc.initWeak(i8** [[X]], i8* [[T3]]) -// CHECK-LP64: [[D0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-LP64: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-LP64-NEXT: call void @llvm.objc.copyWeak(i8** [[T0]], i8** [[X]]) // CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] to // CHECK-LP64: call void @use_block -// CHECK-LP64-NEXT: call void @llvm.objc.destroyWeak(i8** [[D0]]) +// CHECK-LP64-NEXT: call void @llvm.objc.destroyWeak(i8** [[T0]]) // CHECK-LP64-NEXT: call void @llvm.objc.destroyWeak(i8** [[X]]) // rdar://problem/9817306 @@ -207,7 +205,6 @@ // CHECK-LP64: [[SELF_ADDR:%.*]] = alloca [[TY:%.*]]*, // CHECK-LP64: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, // CHECK-LP64: store [[TY]]* %self, [[TY]]** [[SELF_ADDR]] -// CHECK-LP64: [[T0:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>* [[BLOCK]], i32 0, i32 5 // CHECK-LP64: [[BC:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, [[TY]]* }>* [[BLOCK]], i32 0, i32 5 // CHECK-LP64: [[T1:%.*]] = load [[TY]]*, [[TY]]** [[SELF_ADDR]] // CHECK-LP64: [[T2:%.*]] = bitcast [[TY]]* [[T1]] to i8* @@ -218,7 +215,7 @@ // CHECK-LP64-OPT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], // CHECK-LP64-OPT: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i64 0, i32 5 -// CHECK-LP64: [[T5:%.*]] = bitcast [[TY]]** [[T0]] to i8** +// CHECK-LP64: [[T5:%.*]] = bitcast [[TY]]** [[BC]] to i8** // CHECK-LP64: call void @llvm.objc.storeStrong(i8** [[T5]], i8* null) // CHECK-LP64-NOT: call void (...) @llvm.objc.clang.arc.use([[TY]]* [[T5]]) // CHECK-LP64: switch i32 {{%.*}}, label %[[UNREACHABLE:.*]] [ diff --git a/clang/test/CodeGenObjC/noescape.m b/clang/test/CodeGenObjC/noescape.m --- a/clang/test/CodeGenObjC/noescape.m +++ b/clang/test/CodeGenObjC/noescape.m @@ -95,7 +95,6 @@ // CHECK-NOARC: store i8* %[[B]], i8** %[[B_ADDR]], align 8 // CHECK-ARC: store i8* null, i8** %[[B_ADDR]], align 8 // CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[B_ADDR]], i8* %[[B]]) -// CHECK-ARC: %[[V0:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 5 // CHECK: %[[BLOCK_ISA:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 0 // CHECK: store i8* bitcast (i8** @_NSConcreteGlobalBlock to i8*), i8** %[[BLOCK_ISA]], align 8 // CHECK: %[[BLOCK_FLAGS:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 1 @@ -109,7 +108,7 @@ // CHECK-ARC: %[[V3:.*]] = call i8* @llvm.objc.retain(i8* %[[V2]]) // CHECK-ARC: store i8* %[[V3]], i8** %[[BLOCK_CAPTURED]], align 8 // CHECK: call void @noescapeFunc0( -// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[V0]], i8* null) +// CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[BLOCK_CAPTURED]], i8* null) // CHECK-ARC: call void @llvm.objc.storeStrong(i8** %[[B_ADDR]], i8* null) // Non-escaping blocks don't need copy/dispose helper functions. diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl --- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl +++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue.cl @@ -95,7 +95,7 @@ // COMMON: [[WAIT_EVNT:%[0-9]+]] = addrspacecast %opencl.clk_event_t{{.*}}** %event_wait_list to %opencl.clk_event_t{{.*}}* addrspace(4)* // COMMON: [[EVNT:%[0-9]+]] = addrspacecast %opencl.clk_event_t{{.*}}** %clk_event to %opencl.clk_event_t{{.*}}* addrspace(4)* // COMMON: store i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* [[INVL2:@__device_side_enqueue_block_invoke[^ ]*]] to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %block.invoke - // COMMON: [[BL:%[0-9]+]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32{{.*}}, i32{{.*}}, i32{{.*}} }>* %block3 to %struct.__opencl_block_literal_generic* + // COMMON: [[BL:%[0-9]+]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32{{.*}}, i32{{.*}}, i32{{.*}} }>* %block4 to %struct.__opencl_block_literal_generic* // COMMON: [[BL_I8:%[0-9]+]] = addrspacecast %struct.__opencl_block_literal_generic* [[BL]] to i8 addrspace(4)* // COMMON-LABEL: call i32 @__enqueue_kernel_basic_events // COMMON-SAME: (%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}}* addrspace(4)* [[WAIT_EVNT]], %opencl.clk_event_t{{.*}}* addrspace(4)* [[EVNT]],