Index: include/clang/Basic/Attr.td =================================================================== --- include/clang/Basic/Attr.td +++ include/clang/Basic/Attr.td @@ -1383,6 +1383,12 @@ let Documentation = [Undocumented]; } +def NoEscape : InheritableParamAttr { + let Spellings = [GNU<"noescape">, CXX11<"clang", "noescape">]; + let Subjects = SubjectList<[ParmVar]>; + let Documentation = [NoEscapeDocs]; +} + def AssumeAligned : InheritableAttr { let Spellings = [GCC<"assume_aligned">]; let Subjects = SubjectList<[ObjCMethod, Function]>; Index: include/clang/Basic/AttrDocs.td =================================================================== --- include/clang/Basic/AttrDocs.td +++ include/clang/Basic/AttrDocs.td @@ -112,6 +112,46 @@ }]; } +def NoEscapeDocs : Documentation { + let Category = DocCatVariable; + let Content = [{ +``noescape`` placed on a function parameter of a pointer type is used to inform +the compiler that the pointer cannot escape: that is, no reference to the object +the pointer points to that is derived from the parameter value will survive +after the function returns. Users are responsible for making sure parameters +annotated with ``noescape`` do not actuallly escape. + +For example: + +.. code-block:: c + int *gp; + + void nonescapingFunc(__attribute__((noescape)) int *p) { + *p += 100; // OK. + } + + void escapingFunc(__attribute__((noescape)) int *p) { + gp = p; // Not OK. + } + +Additionally, for block pointers, the same restriction apply to copies of +blocks. For example: + + typedef void (^BlockTy)(); + BlockTy g0, g1; + + void nonescapingFunc(__attribute__((noescape)) BlockTy block) { + block(); // OK. + } + + void escapingFunc(__attribute__((noescape)) BlockTy block) { + g0 = block; // Not OK. + g1 = Block_copy(block); // Not OK either. + } + + }]; +} + def CarriesDependencyDocs : Documentation { let Category = DocCatFunction; let Content = [{ Index: include/clang/CodeGen/CGFunctionInfo.h =================================================================== --- include/clang/CodeGen/CGFunctionInfo.h +++ include/clang/CodeGen/CGFunctionInfo.h @@ -438,8 +438,14 @@ // Implementation detail of CGFunctionInfo, factored out so it can be named // in the TrailingObjects base class of CGFunctionInfo. -struct CGFunctionInfoArgInfo { +struct ArgTypeInfo { CanQualType type; + bool isNoEscape; + ArgTypeInfo(CanQualType t, bool ne = false) : type(t), isNoEscape(ne) {} +}; + +struct CGFunctionInfoArgInfo { + ArgTypeInfo typeInfo; ABIArgInfo info; }; @@ -515,7 +521,7 @@ const FunctionType::ExtInfo &extInfo, ArrayRef paramInfos, CanQualType resultType, - ArrayRef argTypes, + ArrayRef argTypes, RequiredArgs required); void operator delete(void *p) { ::operator delete(p); } @@ -594,7 +600,9 @@ isNoCallerSavedRegs()); } - CanQualType getReturnType() const { return getArgsBuffer()[0].type; } + CanQualType getReturnType() const { + return getArgsBuffer()[0].typeInfo.type; + } ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; } const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; } @@ -638,8 +646,10 @@ ID.AddInteger(paramInfo.getOpaqueValue()); } getReturnType().Profile(ID); - for (const auto &I : arguments()) - I.type.Profile(ID); + for (const auto &I : arguments()) { + I.typeInfo.type.Profile(ID); + ID.AddBoolean(I.typeInfo.isNoEscape); + } } static void Profile(llvm::FoldingSetNodeID &ID, bool InstanceMethod, @@ -648,7 +658,7 @@ ArrayRef paramInfos, RequiredArgs required, CanQualType resultType, - ArrayRef argTypes) { + ArrayRef argTypes) { ID.AddInteger(info.getCC()); ID.AddBoolean(InstanceMethod); ID.AddBoolean(ChainCall); @@ -664,9 +674,10 @@ ID.AddInteger(paramInfo.getOpaqueValue()); } resultType.Profile(ID); - for (ArrayRef::iterator + for (ArrayRef::iterator i = argTypes.begin(), e = argTypes.end(); i != e; ++i) { - i->Profile(ID); + i->type.Profile(ID); + ID.AddBoolean(i->isNoEscape); } } }; Index: include/clang/CodeGen/CodeGenABITypes.h =================================================================== --- include/clang/CodeGen/CodeGenABITypes.h +++ include/clang/CodeGen/CodeGenABITypes.h @@ -68,7 +68,7 @@ const CGFunctionInfo &arrangeFreeFunctionCall(CodeGenModule &CGM, CanQualType returnType, - ArrayRef argTypes, + ArrayRef argTypes, FunctionType::ExtInfo info, RequiredArgs args); Index: lib/CodeGen/CGBlocks.cpp =================================================================== --- lib/CodeGen/CGBlocks.cpp +++ lib/CodeGen/CGBlocks.cpp @@ -1039,7 +1039,9 @@ QualType FnType = BPT->getPointeeType(); // And the rest of the arguments. - EmitCallArgs(Args, FnType->getAs(), E->arguments()); + const auto *BE = dyn_cast(E->getCallee()); + EmitCallArgs(Args, FnType->getAs(), E->arguments(), + BE ? BE->getBlockDecl() : nullptr); // Load the function. llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign()); Index: lib/CodeGen/CGCXXABI.h =================================================================== --- lib/CodeGen/CGCXXABI.h +++ lib/CodeGen/CGCXXABI.h @@ -310,7 +310,7 @@ /// initialized with the type of 'this'. virtual AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, - SmallVectorImpl &ArgTys) = 0; + SmallVectorImpl &ArgTys) = 0; /// Returns true if the given destructor type should be emitted as a linkonce /// delegating thunk, regardless of whether the dtor is defined in this TU or Index: lib/CodeGen/CGCall.h =================================================================== --- lib/CodeGen/CGCall.h +++ lib/CodeGen/CGCall.h @@ -172,8 +172,9 @@ RValue RV; QualType Ty; bool NeedsCopy; - CallArg(RValue rv, QualType ty, bool needscopy) - : RV(rv), Ty(ty), NeedsCopy(needscopy) + bool IsNoEscape; + CallArg(RValue rv, QualType ty, bool needscopy, bool isNoEscape) + : RV(rv), Ty(ty), NeedsCopy(needscopy), IsNoEscape(isNoEscape) { } }; @@ -204,8 +205,9 @@ llvm::Instruction *IsActiveIP; }; - void add(RValue rvalue, QualType type, bool needscopy = false) { - push_back(CallArg(rvalue, type, needscopy)); + void add(RValue rvalue, QualType type, bool needscopy = false, + bool isNoEscape = false) { + push_back(CallArg(rvalue, type, needscopy, isNoEscape)); } /// Add all the arguments from another CallArgList to this one. After doing Index: lib/CodeGen/CGCall.cpp =================================================================== --- lib/CodeGen/CGCall.cpp +++ lib/CodeGen/CGCall.cpp @@ -132,14 +132,19 @@ /// Adds the formal parameters in FPT to the given prefix. If any parameter in /// FPT has pass_object_size attrs, then we'll add parameters for those, too. static void appendParameterTypes(const CodeGenTypes &CGT, - SmallVectorImpl &prefix, + SmallVectorImpl &prefix, SmallVectorImpl ¶mInfos, + const FunctionDecl *FD, CanQual FPT) { // Fast path: don't touch param info if we don't need to. if (!FPT->hasExtParameterInfos()) { assert(paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"); - prefix.append(FPT->param_type_begin(), FPT->param_type_end()); + for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { + bool IsNoEscape = FD ? FD->getParamDecl(I)->hasAttr() : false; + prefix.push_back({FPT->getParamType(I), IsNoEscape}); + } + return; } @@ -152,7 +157,8 @@ auto ExtInfos = FPT->getExtParameterInfos(); assert(ExtInfos.size() == FPT->getNumParams()); for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { - prefix.push_back(FPT->getParamType(I)); + bool IsNoEscape = FD ? FD->getParamDecl(I)->hasAttr() : false; + prefix.push_back({FPT->getParamType(I), IsNoEscape}); if (ExtInfos[I].hasPassObjectSize()) prefix.push_back(CGT.getContext().getSizeType()); } @@ -165,14 +171,14 @@ /// type, on top of any implicit parameters already stored. static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, - SmallVectorImpl &prefix, + SmallVectorImpl &prefix, CanQual FTP, const FunctionDecl *FD) { SmallVector paramInfos; RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD); // FIXME: Kill copy. - appendParameterTypes(CGT, prefix, paramInfos, FTP); + appendParameterTypes(CGT, prefix, paramInfos, FD, FTP); CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, @@ -186,7 +192,7 @@ const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionType(CanQual FTP, const FunctionDecl *FD) { - SmallVector argTypes; + SmallVector argTypes; return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, FTP, FD); } @@ -242,7 +248,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD) { - SmallVector argTypes; + SmallVector argTypes; // Add the 'this' pointer. if (RD) @@ -288,7 +294,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type) { - SmallVector argTypes; + SmallVector argTypes; SmallVector paramInfos; argTypes.push_back(GetThisType(Context, MD->getParent())); @@ -311,7 +317,7 @@ // Add the formal parameters. if (PassParams) - appendParameterTypes(*this, argTypes, paramInfos, FTP); + appendParameterTypes(*this, argTypes, paramInfos, MD, FTP); CGCXXABI::AddedStructorArgs AddedArgs = TheCXXABI.buildStructorSignature(MD, Type, argTypes); @@ -331,7 +337,7 @@ FunctionType::ExtInfo extInfo = FTP->getExtInfo(); CanQualType resultType = TheCXXABI.HasThisReturn(GD) - ? argTypes.front() + ? argTypes.front().type : TheCXXABI.hasMostDerivedReturn(GD) ? CGM.getContext().VoidPtrTy : Context.VoidTy; @@ -340,19 +346,20 @@ paramInfos, required); } -static SmallVector +static SmallVector getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { - SmallVector argTypes; + SmallVector argTypes; for (auto &arg : args) - argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); + argTypes.push_back({ctx.getCanonicalParamType(arg.Ty), arg.IsNoEscape}); return argTypes; } -static SmallVector +static SmallVector getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { - SmallVector argTypes; + SmallVector argTypes; for (auto &arg : args) - argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); + argTypes.push_back({ctx.getCanonicalParamType(arg->getType()), + arg->hasAttr()}); return argTypes; } @@ -382,9 +389,9 @@ unsigned ExtraSuffixArgs, bool PassProtoArgs) { // FIXME: Kill copy. - SmallVector ArgTypes; + SmallVector ArgTypes; for (const auto &Arg : args) - ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); + ArgTypes.push_back({Context.getCanonicalParamType(Arg.Ty), Arg.IsNoEscape}); // +1 for implicit this, which should always be args[0]. unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; @@ -394,7 +401,7 @@ RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D); GlobalDecl GD(D, CtorKind); CanQualType ResultType = TheCXXABI.HasThisReturn(GD) - ? ArgTypes.front() + ? ArgTypes.front().type : TheCXXABI.hasMostDerivedReturn(GD) ? CGM.getContext().VoidPtrTy : Context.VoidTy; @@ -454,12 +461,13 @@ const CGFunctionInfo & CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType) { - SmallVector argTys; + SmallVector argTys; argTys.push_back(Context.getCanonicalParamType(receiverType)); argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); // FIXME: Kill copy? for (const auto *I : MD->parameters()) { - argTys.push_back(Context.getCanonicalParamType(I->getType())); + argTys.push_back({Context.getCanonicalParamType(I->getType()), + I->hasAttr()}); } FunctionType::ExtInfo einfo; @@ -512,7 +520,7 @@ CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { assert(MD->isVirtual() && "only virtual memptrs have thunks"); CanQual FTP = GetFormalType(MD); - CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; + ArgTypeInfo ArgTys[] = { GetThisType(Context, MD->getParent()) }; return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, ArgTys, FTP->getExtInfo(), {}, RequiredArgs(1)); @@ -524,7 +532,7 @@ assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); CanQual FTP = GetFormalType(CD); - SmallVector ArgTys; + SmallVector ArgTys; const CXXRecordDecl *RD = CD->getParent(); ArgTys.push_back(GetThisType(Context, RD)); if (CT == Ctor_CopyingClosure) @@ -576,9 +584,10 @@ } // FIXME: Kill copy. - SmallVector argTypes; + SmallVector argTypes; for (const auto &arg : args) - argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); + argTypes.push_back( + {CGT.getContext().getCanonicalParamType(arg.Ty), arg.IsNoEscape}); return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, chainCall, argTypes, fnType->getExtInfo(), paramInfos, @@ -623,9 +632,9 @@ CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args) { // FIXME: Kill copy. - SmallVector argTypes; + SmallVector argTypes; for (const auto &Arg : args) - argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); + argTypes.push_back({Context.getCanonicalParamType(Arg.Ty), Arg.IsNoEscape}); return arrangeLLVMFunctionInfo( GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), @@ -644,7 +653,7 @@ const CGFunctionInfo & CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, - ArrayRef argTypes) { + ArrayRef argTypes) { return arrangeLLVMFunctionInfo( resultType, /*instanceMethod=*/false, /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); @@ -720,12 +729,12 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, bool instanceMethod, bool chainCall, - ArrayRef argTypes, + ArrayRef argTypes, FunctionType::ExtInfo info, ArrayRef paramInfos, RequiredArgs required) { assert(std::all_of(argTypes.begin(), argTypes.end(), - [](CanQualType T) { return T.isCanonicalAsParam(); })); + [](ArgTypeInfo T) { return T.type.isCanonicalAsParam(); })); // Lookup or create unique function info. llvm::FoldingSetNodeID ID; @@ -768,7 +777,7 @@ for (auto &I : FI->arguments()) if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) - I.info.setCoerceToType(ConvertType(I.type)); + I.info.setCoerceToType(ConvertType(I.typeInfo.type)); bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; assert(erased && "Not in set?"); @@ -782,7 +791,7 @@ const FunctionType::ExtInfo &info, ArrayRef paramInfos, CanQualType resultType, - ArrayRef argTypes, + ArrayRef argTypes, RequiredArgs required) { assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); @@ -806,9 +815,9 @@ FI->ArgStructAlign = 0; FI->NumArgs = argTypes.size(); FI->HasExtParameterInfos = !paramInfos.empty(); - FI->getArgsBuffer()[0].type = resultType; + FI->getArgsBuffer()[0].typeInfo.type = resultType; for (unsigned i = 0, e = argTypes.size(); i != e; ++i) - FI->getArgsBuffer()[i + 1].type = argTypes[i]; + FI->getArgsBuffer()[i + 1].typeInfo = argTypes[i]; for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; return FI; @@ -1416,7 +1425,7 @@ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { assert(I != FI.arg_end()); - QualType ArgType = I->type; + QualType ArgType = I->typeInfo.type; const ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. auto &IRArgs = ArgInfo[ArgNo]; @@ -1598,7 +1607,7 @@ case ABIArgInfo::Indirect: { assert(NumIRArgs == 1); // indirect arguments are always on the stack, which is alloca addr space. - llvm::Type *LTy = ConvertTypeForMem(it->type); + llvm::Type *LTy = ConvertTypeForMem(it->typeInfo.type); ArgTypes[FirstIRArg] = LTy->getPointerTo( CGM.getDataLayout().getAllocaAddrSpace()); break; @@ -1632,7 +1641,7 @@ case ABIArgInfo::Expand: auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; - getExpandedTypes(it->type, ArgTypesIter); + getExpandedTypes(it->typeInfo.type, ArgTypesIter); assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); break; } @@ -1970,7 +1979,7 @@ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), E = FI.arg_end(); I != E; ++I, ++ArgNo) { - QualType ParamType = I->type; + QualType ParamType = I->typeInfo.type; const ABIArgInfo &AI = I->info; llvm::AttrBuilder Attrs; @@ -2092,6 +2101,9 @@ break; } + if (I->typeInfo.isNoEscape) + Attrs.addAttribute(llvm::Attribute::NoCapture); + if (Attrs.hasAttributes()) { unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); @@ -2235,7 +2247,7 @@ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i, ++info_it, ++ArgNo) { const VarDecl *Arg = *i; - QualType Ty = info_it->type; + QualType Ty = info_it->typeInfo.type; const ABIArgInfo &ArgI = info_it->info; bool isPromoted = @@ -3006,6 +3018,7 @@ // local alloca. We need to turn that into an r-value suitable // for EmitCall. Address local = GetAddrOfLocalVar(param); + bool isNoEscape = param->hasAttr(); QualType type = param->getType(); @@ -3015,7 +3028,7 @@ // GetAddrOfLocalVar returns a pointer-to-pointer for references, // but the argument needs to be the original pointer. if (type->isReferenceType()) { - args.add(RValue::get(Builder.CreateLoad(local)), type); + args.add(RValue::get(Builder.CreateLoad(local)), type, false, isNoEscape); // In ARC, move out of consumed arguments so that the release cleanup // entered by StartFunction doesn't cause an over-release. This isn't @@ -3029,12 +3042,12 @@ auto null = llvm::ConstantPointerNull::get(cast(ptr->getType())); Builder.CreateStore(null, local); - args.add(RValue::get(ptr), type); + args.add(RValue::get(ptr), type, false, isNoEscape); // For the most part, we just need to load the alloca, except that // aggregate r-values are actually pointers to temporaries. } else { - args.add(convertTempToRValue(local, type, loc), type); + args.add(convertTempToRValue(local, type, loc), type, false, isNoEscape); } } @@ -3403,8 +3416,14 @@ "The code below depends on only adding one arg per EmitCallArg"); (void)InitialArgSize; RValue RVArg = Args.back().RV; + unsigned ParamIdx = ParamsToSkip + Idx; EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, - ParamsToSkip + Idx); + ParamIdx); + + if (AC.getDecl() && ParamIdx < AC.getNumParams()) + Args.back().IsNoEscape = + AC.getParamDecl(ParamIdx)->hasAttr(); + // @llvm.objectsize should never have side-effects and shouldn't need // destruction/cleanups, so we can safely "emit" it after its arg, // regardless of right-to-leftness @@ -3870,7 +3889,7 @@ case ABIArgInfo::Extend: case ABIArgInfo::Direct: { if (!isa(ArgInfo.getCoerceToType()) && - ArgInfo.getCoerceToType() == ConvertType(info_it->type) && + ArgInfo.getCoerceToType() == ConvertType(info_it->typeInfo.type) && ArgInfo.getDirectOffset() == 0) { assert(NumIRArgs == 1); llvm::Value *V; Index: lib/CodeGen/CGClass.cpp =================================================================== --- lib/CodeGen/CGClass.cpp +++ lib/CodeGen/CGClass.cpp @@ -2083,7 +2083,7 @@ bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) { CallArgList Args; CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()), - /*NeedsCopy=*/false); + /*NeedsCopy=*/false, /*IsNoEscapse*/false); // Forward the parameters. if (InheritedFromVBase && Index: lib/CodeGen/CGDecl.cpp =================================================================== --- lib/CodeGen/CGDecl.cpp +++ lib/CodeGen/CGDecl.cpp @@ -542,7 +542,7 @@ // __attribute__((cleanup(f))) void *g; // // To fix this we insert a bitcast here. - QualType ArgTy = FnInfo.arg_begin()->type; + QualType ArgTy = FnInfo.arg_begin()->typeInfo.type; llvm::Value *Arg = CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy)); Index: lib/CodeGen/CGExprCXX.cpp =================================================================== --- lib/CodeGen/CGExprCXX.cpp +++ lib/CodeGen/CGExprCXX.cpp @@ -1583,9 +1583,8 @@ AlignValT); } - // FIXME: Why do we not pass a CalleeDecl here? EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), - /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip); + allocator, /*ParamsToSkip*/ParamsToSkip); RValue RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); Index: lib/CodeGen/CGObjCGNU.cpp =================================================================== --- lib/CodeGen/CGObjCGNU.cpp +++ lib/CodeGen/CGObjCGNU.cpp @@ -1456,7 +1456,7 @@ } // Reset the receiver in case the lookup modified it - ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false); + ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false, false); imp = EnforceType(Builder, imp, MSI.MessengerType); Index: lib/CodeGen/CGObjCMac.cpp =================================================================== --- lib/CodeGen/CGObjCMac.cpp +++ lib/CodeGen/CGObjCMac.cpp @@ -238,7 +238,7 @@ // id objc_getProperty (id, SEL, ptrdiff_t, bool) CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType()); CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType()); - CanQualType Params[] = { + ArgTypeInfo Params[] = { IdType, SelType, Ctx.getPointerDiffType()->getCanonicalTypeUnqualified(), Ctx.BoolTy}; llvm::FunctionType *FTy = @@ -253,7 +253,7 @@ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool) CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType()); CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType()); - CanQualType Params[] = { + ArgTypeInfo Params[] = { IdType, SelType, Ctx.getPointerDiffType()->getCanonicalTypeUnqualified(), @@ -278,7 +278,7 @@ // void objc_setProperty_nonatomic_copy(id self, SEL _cmd, // id newValue, ptrdiff_t offset); - SmallVector Params; + SmallVector Params; CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType()); CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType()); Params.push_back(IdType); @@ -305,7 +305,7 @@ CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // void objc_copyStruct (void *, const void *, size_t, bool, bool) - SmallVector Params; + SmallVector Params; Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.getSizeType()); @@ -325,7 +325,7 @@ CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); /// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper); - SmallVector Params; + SmallVector Params; Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.VoidPtrTy); @@ -339,7 +339,7 @@ CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // void objc_enumerationMutation (id) - SmallVector Params; + SmallVector Params; Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType())); llvm::FunctionType *FTy = Types.GetFunctionType( @@ -351,7 +351,7 @@ CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // Class objc_lookUpClass (const char *) - SmallVector Params; + SmallVector Params; Params.push_back( Ctx.getCanonicalType(Ctx.getPointerType(Ctx.CharTy.withConst()))); llvm::FunctionType *FTy = Index: lib/CodeGen/CGVTables.cpp =================================================================== --- lib/CodeGen/CGVTables.cpp +++ lib/CodeGen/CGVTables.cpp @@ -305,9 +305,9 @@ assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, - CallFnInfo.arg_begin()[i].type, + CallFnInfo.arg_begin()[i].typeInfo.type, CurFnInfo->arg_begin()[i].info, - CurFnInfo->arg_begin()[i].type)); + CurFnInfo->arg_begin()[i].typeInfo.type)); #endif // Determine whether we have a return value slot to use. Index: lib/CodeGen/CodeGenABITypes.cpp =================================================================== --- lib/CodeGen/CodeGenABITypes.cpp +++ lib/CodeGen/CodeGenABITypes.cpp @@ -57,7 +57,7 @@ const CGFunctionInfo & CodeGen::arrangeFreeFunctionCall(CodeGenModule &CGM, CanQualType returnType, - ArrayRef argTypes, + ArrayRef argTypes, FunctionType::ExtInfo info, RequiredArgs args) { return CGM.getTypes().arrangeLLVMFunctionInfo( Index: lib/CodeGen/CodeGenFunction.h =================================================================== --- lib/CodeGen/CodeGenFunction.h +++ lib/CodeGen/CodeGenFunction.h @@ -335,6 +335,7 @@ public: AbstractCallee() : CalleeDecl(nullptr) {} AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {} + AbstractCallee(const BlockDecl *BD) : CalleeDecl(BD) {} AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {} bool hasFunctionDecl() const { return dyn_cast_or_null(CalleeDecl); @@ -343,11 +344,15 @@ unsigned getNumParams() const { if (const auto *FD = dyn_cast(CalleeDecl)) return FD->getNumParams(); + if (const auto *BD = dyn_cast(CalleeDecl)) + return BD->getNumParams(); return cast(CalleeDecl)->param_size(); } const ParmVarDecl *getParamDecl(unsigned I) const { if (const auto *FD = dyn_cast(CalleeDecl)) return FD->getParamDecl(I); + if (const auto *BD = dyn_cast(CalleeDecl)) + return BD->getParamDecl(I); return *(cast(CalleeDecl)->param_begin() + I); } }; Index: lib/CodeGen/CodeGenTypes.h =================================================================== --- lib/CodeGen/CodeGenTypes.h +++ lib/CodeGen/CodeGenTypes.h @@ -277,7 +277,7 @@ const FunctionArgList &args); const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(CanQualType resultType, - ArrayRef argTypes); + ArrayRef argTypes); const CGFunctionInfo &arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args); @@ -327,7 +327,7 @@ const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, - ArrayRef argTypes, + ArrayRef argTypes, FunctionType::ExtInfo info, ArrayRef paramInfos, RequiredArgs args); Index: lib/CodeGen/ItaniumCXXABI.cpp =================================================================== --- lib/CodeGen/ItaniumCXXABI.cpp +++ lib/CodeGen/ItaniumCXXABI.cpp @@ -209,7 +209,7 @@ AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, - SmallVectorImpl &ArgTys) override; + SmallVectorImpl &ArgTys) override; bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const override { @@ -1365,7 +1365,7 @@ CGCXXABI::AddedStructorArgs ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T, - SmallVectorImpl &ArgTys) { + SmallVectorImpl &ArgTys) { ASTContext &Context = getContext(); // All parameters are already in place except VTT, which goes after 'this'. @@ -1453,7 +1453,8 @@ CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); Args.insert(Args.begin() + 1, - CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false)); + CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false, + /*isNoEscape*/false)); return AddedStructorArgs::prefix(1); // Added one arg. } Index: lib/CodeGen/MicrosoftCXXABI.cpp =================================================================== --- lib/CodeGen/MicrosoftCXXABI.cpp +++ lib/CodeGen/MicrosoftCXXABI.cpp @@ -208,7 +208,7 @@ AddedStructorArgs buildStructorSignature(const CXXMethodDecl *MD, StructorType T, - SmallVectorImpl &ArgTys) override; + SmallVectorImpl &ArgTys) override; /// Non-base dtors should be emitted as delegating thunks in this ABI. bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, @@ -1263,7 +1263,7 @@ CGCXXABI::AddedStructorArgs MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T, - SmallVectorImpl &ArgTys) { + SmallVectorImpl &ArgTys) { AddedStructorArgs Added; // TODO: 'for base' flag if (T == StructorType::Deleting) { @@ -1518,7 +1518,8 @@ RValue RV = RValue::get(MostDerivedArg); if (FPT->isVariadic()) { Args.insert(Args.begin() + 1, - CallArg(RV, getContext().IntTy, /*needscopy=*/false)); + CallArg(RV, getContext().IntTy, /*needscopy=*/false, + /*isNoEscapse*/false)); return AddedStructorArgs::prefix(1); } Args.add(RV, getContext().IntTy); Index: lib/CodeGen/SwiftCallingConv.cpp =================================================================== --- lib/CodeGen/SwiftCallingConv.cpp +++ lib/CodeGen/SwiftCallingConv.cpp @@ -831,7 +831,7 @@ for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) { auto &argInfo = FI.arg_begin()[i]; - argInfo.info = classifyArgumentType(CGM, argInfo.type); + argInfo.info = classifyArgumentType(CGM, argInfo.typeInfo.type); } } Index: lib/CodeGen/TargetInfo.cpp =================================================================== --- lib/CodeGen/TargetInfo.cpp +++ lib/CodeGen/TargetInfo.cpp @@ -640,7 +640,7 @@ if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type); + I.info = classifyArgumentType(I.typeInfo.type); } Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, @@ -712,7 +712,7 @@ if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &Arg : FI.arguments()) - Arg.info = classifyArgumentType(Arg.type); + Arg.info = classifyArgumentType(Arg.typeInfo.type); } Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, @@ -806,7 +806,7 @@ FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type); + I.info = classifyArgumentType(I.typeInfo.type); } Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, @@ -1681,7 +1681,7 @@ // First pass do all the vector types. const Type *Base = nullptr; uint64_t NumElts = 0; - const QualType& Ty = I.type; + const QualType& Ty = I.typeInfo.type; if ((Ty->isVectorType() || Ty->isBuiltinType()) && isHomogeneousAggregate(Ty, Base, NumElts)) { if (State.FreeSSERegs >= NumElts) { @@ -1698,7 +1698,7 @@ // Second pass, do the rest! const Type *Base = nullptr; uint64_t NumElts = 0; - const QualType& Ty = I.type; + const QualType& Ty = I.typeInfo.type; bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts); if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) { @@ -1756,7 +1756,7 @@ } else { // If not vectorcall, revert to normal behavior. for (auto &I : FI.arguments()) { - I.info = classifyArgumentType(I.type, State); + I.info = classifyArgumentType(I.typeInfo.type, State); UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); } } @@ -1832,7 +1832,7 @@ ABIArgInfo &Ret = FI.getReturnInfo(); if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && isArgInAlloca(I->info)) { - addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); + addFieldToArgStruct(FrameFields, StackOffset, I->info, I->typeInfo.type); ++I; } @@ -1851,7 +1851,7 @@ // Put arguments passed in memory into the struct. for (; I != E; ++I) { if (isArgInAlloca(I->info)) - addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); + addFieldToArgStruct(FrameFields, StackOffset, I->info, I->typeInfo.type); } FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, @@ -3535,10 +3535,10 @@ it != ie; ++it, ++ArgNo) { bool IsNamedArg = ArgNo < NumRequiredArgs; - if (IsRegCall && it->type->isStructureOrClassType()) - it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); + if (IsRegCall && it->typeInfo.type->isStructureOrClassType()) + it->info = classifyRegCallStructType(it->typeInfo.type, NeededInt, NeededSSE); else - it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, + it->info = classifyArgumentType(it->typeInfo.type, FreeIntRegs, NeededInt, NeededSSE, IsNamedArg); // AMD64-ABI 3.2.3p3: If there are no registers available for any @@ -3549,7 +3549,7 @@ FreeIntRegs -= NeededInt; FreeSSERegs -= NeededSSE; } else { - it->info = getIndirectResult(it->type, FreeIntRegs); + it->info = getIndirectResult(it->typeInfo.type, FreeIntRegs); } } } @@ -3909,19 +3909,19 @@ // Vectorcall in x64 only permits the first 6 arguments to be passed // as XMM/YMM registers. if (Count < VectorcallMaxParamNumAsReg) - I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); + I.info = classify(I.typeInfo.type, FreeSSERegs, false, IsVectorCall, IsRegCall); else { // Since these cannot be passed in registers, pretend no registers // are left. unsigned ZeroSSERegsAvail = 0; - I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, + I.info = classify(I.typeInfo.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, IsVectorCall, IsRegCall); } ++Count; } for (auto &I : FI.arguments()) { - I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info); + I.info = reclassifyHvaArgType(I.typeInfo.type, FreeSSERegs, I.info); } } @@ -3955,7 +3955,7 @@ computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall); } else { for (auto &I : FI.arguments()) - I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); + I.info = classify(I.typeInfo.type, FreeSSERegs, false, IsVectorCall, IsRegCall); } } @@ -4269,7 +4269,7 @@ // We rely on the default argument classification for the most part. // One exception: An aggregate containing a single floating-point // or vector item must be passed in a register if one is available. - const Type *T = isSingleElementStruct(I.type, getContext()); + const Type *T = isSingleElementStruct(I.typeInfo.type, getContext()); if (T) { const BuiltinType *BT = T->getAs(); if (IsQPXVectorTy(T) || @@ -4280,7 +4280,7 @@ continue; } } - I.info = classifyArgumentType(I.type); + I.info = classifyArgumentType(I.typeInfo.type); } } @@ -4813,7 +4813,7 @@ FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &it : FI.arguments()) - it.info = classifyArgumentType(it.type); + it.info = classifyArgumentType(it.typeInfo.type); } Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, @@ -5527,7 +5527,7 @@ classifyReturnType(FI.getReturnType(), FI.isVariadic()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type, FI.isVariadic()); + I.info = classifyArgumentType(I.typeInfo.type, FI.isVariadic()); // Always honor user-specified calling convention. if (FI.getCallingConvention() != llvm::CallingConv::C) @@ -6085,7 +6085,7 @@ if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type); + I.info = classifyArgumentType(I.typeInfo.type); // Always honor user-specified calling convention. if (FI.getCallingConvention() != llvm::CallingConv::C) @@ -6193,7 +6193,7 @@ if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type); + I.info = classifyArgumentType(I.typeInfo.type); } Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, @@ -6873,7 +6873,7 @@ uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type, Offset); + I.info = classifyArgumentType(I.typeInfo.type, Offset); } Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, @@ -7091,7 +7091,7 @@ if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type); + I.info = classifyArgumentType(I.typeInfo.type); } ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { @@ -7195,7 +7195,7 @@ if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &I : FI.arguments()) - I.info = classifyArgumentType(I.type, State); + I.info = classifyArgumentType(I.typeInfo.type, State); } ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; @@ -7320,9 +7320,9 @@ unsigned CC = FI.getCallingConvention(); for (auto &Arg : FI.arguments()) if (CC == llvm::CallingConv::AMDGPU_KERNEL) - Arg.info = classifyArgumentType(Arg.type); + Arg.info = classifyArgumentType(Arg.typeInfo.type); else - Arg.info = DefaultABIInfo::classifyArgumentType(Arg.type); + Arg.info = DefaultABIInfo::classifyArgumentType(Arg.typeInfo.type); } /// \brief Classify argument of given type \p Ty. @@ -7500,7 +7500,7 @@ FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); for (auto &Arg : FI.arguments()) - Arg.info = classifyArgumentType(Arg.type); + Arg.info = classifyArgumentType(Arg.typeInfo.type); } namespace { @@ -7763,7 +7763,7 @@ void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); for (auto &I : FI.arguments()) - I.info = classifyType(I.type, 16 * 8); + I.info = classifyType(I.typeInfo.type, 16 * 8); } namespace { Index: lib/Sema/SemaDeclAttr.cpp =================================================================== --- lib/Sema/SemaDeclAttr.cpp +++ lib/Sema/SemaDeclAttr.cpp @@ -1517,6 +1517,22 @@ Attr.getAttributeSpellingListIndex())); } +static void handleNoEscapeAttr(Sema &S, Decl *D, const AttributeList &Attr) { + if (D->isInvalidDecl()) + return; + + // noescape only applies to pointer types. + QualType T = cast(D)->getType(); + if (!S.isValidPointerAttrType(T, /* RefOkay */ true)) { + S.Diag(Attr.getLoc(), diag::warn_attribute_pointers_only) + << Attr.getName() << Attr.getRange() << 0; + return; + } + + D->addAttr(::new (S.Context) NoEscapeAttr( + Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); +} + static void handleAssumeAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) { Expr *E = Attr.getArgAsExpr(0), @@ -6120,6 +6136,9 @@ case AttributeList::AT_ReturnsNonNull: handleReturnsNonNullAttr(S, D, Attr); break; + case AttributeList::AT_NoEscape: + handleNoEscapeAttr(S, D, Attr); + break; case AttributeList::AT_AssumeAligned: handleAssumeAlignedAttr(S, D, Attr); break; Index: test/CodeGenCXX/noescape.cpp =================================================================== --- /dev/null +++ test/CodeGenCXX/noescape.cpp @@ -0,0 +1,59 @@ +// RUN: %clang_cc1 -std=c++11 -emit-llvm -o - %s | FileCheck %s + +struct S { + int a[4]; + S(int *, int * __attribute__((noescape))); + S &operator=(int * __attribute__((noescape))); + void m0(int *, int * __attribute__((noescape))); + virtual void vm1(int *, int * __attribute__((noescape))); +}; + +// CHECK: define void @_ZN1SC2EPiS0_(%struct.S* {{.*}}, {{.*}}, {{.*}} nocapture) +// CHECK: define void @_ZN1SC1EPiS0_(%struct.S* {{.*}}, {{.*}}, {{.*}} nocapture) {{.*}} { +// CHECK: call void @_ZN1SC2EPiS0_(%struct.S* {{.*}}, {{.*}}, {{.*}} nocapture {{.*}}) + +S::S(int *, int * __attribute__((noescape))) {} + +// CHECK: define {{.*}} %struct.S* @_ZN1SaSEPi(%struct.S* {{.*}}, {{.*}} nocapture) +S &S::operator=(int * __attribute__((noescape))) { return *this; } + +// CHECK: define void @_ZN1S2m0EPiS0_(%struct.S* {{.*}}, {{.*}} nocapture) +void S::m0(int *, int * __attribute__((noescape))) {} + +// CHECK: define void @_ZN1S3vm1EPiS0_(%struct.S* {{.*}}, {{.*}} nocapture) +void S::vm1(int *, int * __attribute__((noescape))) {} + +// CHECK-LABEL: define void @_Z5test0P1SPiS1_( +// CHECK: call void @_ZN1SC1EPiS0_(%struct.S* {{.*}}, {{.*}}, {{.*}} nocapture {{.*}}) +// CHECK: call {{.*}} %struct.S* @_ZN1SaSEPi(%struct.S* {{.*}}, {{.*}} nocapture {{.*}}) +// CHECK: call void @_ZN1S2m0EPiS0_(%struct.S* {{.*}}, {{.*}}, {{.*}} nocapture {{.*}}) +// CHECK: call void {{.*}}(%struct.S* {{.*}}, {{.*}}, {{.*}} nocapture {{.*}}) +void test0(S *s, int *p0, int *p1) { + S t(p0, p1); + t = p1; + s->m0(p0, p1); + s->vm1(p0, p1); +} + +namespace std { + typedef decltype(sizeof(0)) size_t; +} + +// CHECK: define {{.*}} @_ZnwmPv({{.*}}, {{.*}} nocapture {{.*}}) +void *operator new(std::size_t, void * __attribute__((noescape)) p) { + return p; +} + +// CHECK-LABEL: define i8* @_Z5test1Pv( +// CHECK : %call = call {{.*}} @_ZnwmPv({{.*}}, {{.*}} nocapture {{.*}}) +void *test1(void *p0) { + return ::operator new(16, p0); +} + +// CHECK-LABEL: define void @_Z5test2PiS_( +// CHECK: call void @"_ZZ5test2PiS_ENK3$_0clES_S_"({{.*}}, {{.*}}, {{.*}} nocapture {{.*}}) +// CHECK: define internal void @"_ZZ5test2PiS_ENK3$_0clES_S_"({{.*}}, {{.*}}, {{.*}} nocapture) +void test2(int *p0, int *p1) { + auto t = [](int *, int * __attribute__((noescape))){}; + t(p0, p1); +} Index: test/CodeGenObjC/noescape.m =================================================================== --- /dev/null +++ test/CodeGenObjC/noescape.m @@ -0,0 +1,67 @@ +// RUN: %clang_cc1 -fblocks -emit-llvm -o - %s | FileCheck %s + +typedef void (^BlockTy)(void); + +union U { + int *i; + long long *ll; +} __attribute__((transparent_union)); + +void noescapeFunc0(id, __attribute__((noescape)) BlockTy); +void noescapeFunc1(__attribute__((noescape)) int *); +void noescapeFunc2(__attribute__((noescape)) id); +void noescapeFunc3(__attribute__((noescape)) union U); + +// CHECK-LABEL: define void @test0( +// CHECK: call void @noescapeFunc0({{.*}}, {{.*}} nocapture {{.*}}) +// CHECK: declare void @noescapeFunc0(i8*, {{.*}} nocapture) +void test0(BlockTy b) { + noescapeFunc0(0, b); +} + +// CHECK-LABEL: define void @test1( +// CHECK: call void @noescapeFunc1({{.*}} nocapture {{.*}}) +// CHECK: declare void @noescapeFunc1({{.*}} nocapture) +void test1(int *i) { + noescapeFunc1(i); +} + +// CHECK-LABEL: define void @test2( +// CHECK: call void @noescapeFunc2({{.*}} nocapture {{.*}}) +// CHECK: declare void @noescapeFunc2({{.*}} nocapture) +void test2(id i) { + noescapeFunc2(i); +} + +// CHECK-LABEL: define void @test3( +// CHECK: call void @noescapeFunc3({{.*}} nocapture {{.*}}) +// CHECK: declare void @noescapeFunc3({{.*}} nocapture) +void test3(union U u) { + noescapeFunc3(u); +} + +// CHECK: define internal void @"\01-[C0 m0:]"({{.*}}, {{.*}}, {{.*}} nocapture {{.*}}) + +// CHECK-LABEL: define void @test4( +// CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i32*)*)(i8* {{.*}}, i8* {{.*}}, i32* nocapture {{.*}}) + +@interface C0 +-(void) m0:(int*)__attribute__((noescape)) p0; +@end + +@implementation C0 +-(void) m0:(int*)__attribute__((noescape)) p0 { +} +@end + +void test4(C0 *c0, int *p) { + [c0 m0:p]; +} + +// CHECK-LABEL: define void @test5( +// CHECK: call void {{.*}}(i8* bitcast ({ i8**, i32, i32, i8*, {{.*}} }* @{{.*}} to i8*), i32* nocapture {{.*}}) +// CHECK: define internal void @{{.*}}(i8* {{.*}}, i32* nocapture {{.*}}) + +void test5(int *p) { + ^(int *__attribute__((noescape)) p0){}(p); +} Index: test/Misc/pragma-attribute-supported-attributes-list.test =================================================================== --- test/Misc/pragma-attribute-supported-attributes-list.test +++ test/Misc/pragma-attribute-supported-attributes-list.test @@ -2,7 +2,7 @@ // The number of supported attributes should never go down! -// CHECK: #pragma clang attribute supports 62 attributes: +// CHECK: #pragma clang attribute supports 63 attributes: // CHECK-NEXT: AMDGPUFlatWorkGroupSize (SubjectMatchRule_function) // CHECK-NEXT: AMDGPUNumSGPR (SubjectMatchRule_function) // CHECK-NEXT: AMDGPUNumVGPR (SubjectMatchRule_function) @@ -33,6 +33,7 @@ // CHECK-NEXT: MicroMips (SubjectMatchRule_function) // CHECK-NEXT: NoDebug (SubjectMatchRule_hasType_functionType, SubjectMatchRule_objc_method, SubjectMatchRule_variable_not_is_parameter) // CHECK-NEXT: NoDuplicate (SubjectMatchRule_function) +// CHECK-NEXT: NoEscape (SubjectMatchRule_variable_is_parameter) // CHECK-NEXT: NoMicroMips (SubjectMatchRule_function) // CHECK-NEXT: NoSanitize (SubjectMatchRule_function, SubjectMatchRule_objc_method, SubjectMatchRule_variable_is_global) // CHECK-NEXT: NoSanitizeSpecific (SubjectMatchRule_function, SubjectMatchRule_variable_is_global) Index: test/SemaObjCXX/noescape.mm =================================================================== --- /dev/null +++ test/SemaObjCXX/noescape.mm @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -fblocks -std=c++11 %s + +typedef void (^BlockTy)(); + +struct S { + int i; + void m(); +}; + +void noescapeFunc0(id, __attribute__((noescape)) BlockTy); +void noescapeFunc1(id, [[clang::noescape]] BlockTy); +void noescapeFunc2(__attribute__((noescape)) int *); +void noescapeFunc3(__attribute__((noescape)) id); +void noescapeFunc4(__attribute__((noescape)) int &); + +void invalidFunc0(int __attribute__((noescape))); // expected-warning {{'noescape' attribute only applies to pointer arguments}} +void invalidFunc1(int __attribute__((noescape(0)))); // expected-error {{'noescape' attribute takes no arguments}} +void invalidFunc2(int0 *__attribute__((noescape))); // expected-error {{use of undeclared identifier 'int0'; did you mean 'int'?}} +void invalidFunc3(__attribute__((noescape)) int (S::*Ty)); // expected-warning {{'noescape' attribute only applies to pointer arguments}} +void invalidFunc4(__attribute__((noescape)) void (S::*Ty)()); // expected-warning {{'noescape' attribute only applies to pointer arguments}} +int __attribute__((noescape)) g; // expected-warning {{'noescape' attribute only applies to parameters}}