diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 9ccbe87fab66..dd3f85617522 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1,5183 +1,5180 @@ //===--- CGCall.cpp - Encapsulate calling convention details --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // These classes wrap the information about a call or function // definition used to handle ABI compliancy. // //===----------------------------------------------------------------------===// #include "CGCall.h" #include "ABIInfo.h" #include "CGBlocks.h" #include "CGCXXABI.h" #include "CGCleanup.h" #include "CGRecordLayout.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "TargetInfo.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/CodeGen/SwiftCallingConv.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/Transforms/Utils/Local.h" using namespace clang; using namespace CodeGen; /***/ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { switch (CC) { default: return llvm::CallingConv::C; case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; case CC_Win64: return llvm::CallingConv::Win64; case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; // TODO: Add support for __pascal to LLVM. case CC_X86Pascal: return llvm::CallingConv::C; // TODO: Add support for __vectorcall to LLVM. case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); case CC_PreserveMost: return llvm::CallingConv::PreserveMost; case CC_PreserveAll: return llvm::CallingConv::PreserveAll; case CC_Swift: return llvm::CallingConv::Swift; } } /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR /// qualification. Either or both of RD and MD may be null. A null RD indicates /// that there is no meaningful 'this' type, and a null MD can occur when /// calling a method pointer. CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD) { QualType RecTy; if (RD) RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); else RecTy = Context.VoidTy; if (MD) RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); } /// Returns the canonical formal type of the given C++ method. static CanQual GetFormalType(const CXXMethodDecl *MD) { return MD->getType()->getCanonicalTypeUnqualified() .getAs(); } /// Returns the "extra-canonicalized" return type, which discards /// qualifiers on the return type. Codegen doesn't care about them, /// and it makes ABI code a little easier to be able to assume that /// all parameter and return types are top-level unqualified. static CanQualType GetReturnType(QualType RetTy) { return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); } /// Arrange the argument and result information for a value of the given /// unprototyped freestanding function type. const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionType(CanQual FTNP) { // When translating an unprototyped function type, always use a // variadic type. return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), /*instanceMethod=*/false, /*chainCall=*/false, None, FTNP->getExtInfo(), {}, RequiredArgs(0)); } static void addExtParameterInfosForCall( llvm::SmallVectorImpl ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs) { assert(proto->hasExtParameterInfos()); assert(paramInfos.size() <= prefixArgs); assert(proto->getNumParams() + prefixArgs <= totalArgs); paramInfos.reserve(totalArgs); // Add default infos for any prefix args that don't already have infos. paramInfos.resize(prefixArgs); // Add infos for the prototype. for (const auto &ParamInfo : proto->getExtParameterInfos()) { paramInfos.push_back(ParamInfo); // pass_object_size params have no parameter info. if (ParamInfo.hasPassObjectSize()) paramInfos.emplace_back(); } assert(paramInfos.size() <= totalArgs && "Did we forget to insert pass_object_size args?"); // Add default infos for the variadic and/or suffix arguments. paramInfos.resize(totalArgs); } /// Adds the formal parameters in FPT to the given prefix. If any parameter in /// FPT has pass_object_size attrs, then we'll add parameters for those, too. static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl &prefix, SmallVectorImpl ¶mInfos, CanQual FPT) { // Fast path: don't touch param info if we don't need to. if (!FPT->hasExtParameterInfos()) { assert(paramInfos.empty() && "We have paramInfos, but the prototype doesn't?"); prefix.append(FPT->param_type_begin(), FPT->param_type_end()); return; } unsigned PrefixSize = prefix.size(); // In the vast majority of cases, we'll have precisely FPT->getNumParams() // parameters; the only thing that can change this is the presence of // pass_object_size. So, we preallocate for the common case. prefix.reserve(prefix.size() + FPT->getNumParams()); auto ExtInfos = FPT->getExtParameterInfos(); assert(ExtInfos.size() == FPT->getNumParams()); for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { prefix.push_back(FPT->getParamType(I)); if (ExtInfos[I].hasPassObjectSize()) prefix.push_back(CGT.getContext().getSizeType()); } addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, prefix.size()); } /// Arrange the LLVM function layout for a value of the given function /// type, on top of any implicit parameters already stored. static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl &prefix, CanQual FTP) { SmallVector paramInfos; RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); // FIXME: Kill copy. appendParameterTypes(CGT, prefix, paramInfos, FTP); CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, /*chainCall=*/false, prefix, FTP->getExtInfo(), paramInfos, Required); } /// Arrange the argument and result information for a value of the /// given freestanding function type. const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionType(CanQual FTP) { SmallVector argTypes; return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, FTP); } static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { // Set the appropriate calling convention for the Function. if (D->hasAttr()) return CC_X86StdCall; if (D->hasAttr()) return CC_X86FastCall; if (D->hasAttr()) return CC_X86RegCall; if (D->hasAttr()) return CC_X86ThisCall; if (D->hasAttr()) return CC_X86VectorCall; if (D->hasAttr()) return CC_X86Pascal; if (PcsAttr *PCS = D->getAttr()) return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); if (D->hasAttr()) return CC_AArch64VectorCall; if (D->hasAttr()) return CC_IntelOclBicc; if (D->hasAttr()) return IsWindows ? CC_C : CC_Win64; if (D->hasAttr()) return IsWindows ? CC_X86_64SysV : CC_C; if (D->hasAttr()) return CC_PreserveMost; if (D->hasAttr()) return CC_PreserveAll; return CC_C; } /// Arrange the argument and result information for a call to an /// unknown C++ non-static member function of the given abstract type. /// (A null RD means we don't have any meaningful "this" argument type, /// so fall back to a generic pointer type). /// The member function must be an ordinary function, i.e. not a /// constructor or destructor. const CGFunctionInfo & CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD) { SmallVector argTypes; // Add the 'this' pointer. argTypes.push_back(DeriveThisType(RD, MD)); return ::arrangeLLVMFunctionInfo( *this, true, argTypes, FTP->getCanonicalTypeUnqualified().getAs()); } /// Set calling convention for CUDA/HIP kernel. static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD) { if (FD->hasAttr()) { const FunctionType *FT = FTy->getAs(); CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); FTy = FT->getCanonicalTypeUnqualified(); } } /// Arrange the argument and result information for a declaration or /// definition of the given C++ non-static member function. The /// member function must be an ordinary function, i.e. not a /// constructor or destructor. const CGFunctionInfo & CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { assert(!isa(MD) && "wrong method for constructors!"); assert(!isa(MD) && "wrong method for destructors!"); CanQualType FT = GetFormalType(MD).getAs(); setCUDAKernelCallingConvention(FT, CGM, MD); auto prototype = FT.getAs(); if (MD->isInstance()) { // The abstract case is perfectly fine. const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); } return arrangeFreeFunctionType(prototype); } bool CodeGenTypes::inheritingCtorHasParams( const InheritedConstructor &Inherited, CXXCtorType Type) { // Parameters are unnecessary if we're constructing a base class subobject // and the inherited constructor lives in a virtual base. return Type == Ctor_Complete || !Inherited.getShadowDecl()->constructsVirtualBase() || !Target.getCXXABI().hasConstructorVariants(); } const CGFunctionInfo & CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { auto *MD = cast(GD.getDecl()); SmallVector argTypes; SmallVector paramInfos; argTypes.push_back(DeriveThisType(MD->getParent(), MD)); bool PassParams = true; if (auto *CD = dyn_cast(MD)) { // A base class inheriting constructor doesn't get forwarded arguments // needed to construct a virtual base (or base class thereof). if (auto Inherited = CD->getInheritedConstructor()) PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); } CanQual FTP = GetFormalType(MD); // Add the formal parameters. if (PassParams) appendParameterTypes(*this, argTypes, paramInfos, FTP); CGCXXABI::AddedStructorArgCounts AddedArgs = TheCXXABI.buildStructorSignature(GD, argTypes); if (!paramInfos.empty()) { // Note: prefix implies after the first param. if (AddedArgs.Prefix) paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, FunctionProtoType::ExtParameterInfo{}); if (AddedArgs.Suffix) paramInfos.append(AddedArgs.Suffix, FunctionProtoType::ExtParameterInfo{}); } RequiredArgs required = (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); FunctionType::ExtInfo extInfo = FTP->getExtInfo(); CanQualType resultType = TheCXXABI.HasThisReturn(GD) ? argTypes.front() : TheCXXABI.hasMostDerivedReturn(GD) ? CGM.getContext().VoidPtrTy : Context.VoidTy; return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, /*chainCall=*/false, argTypes, extInfo, paramInfos, required); } static SmallVector getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { SmallVector argTypes; for (auto &arg : args) argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); return argTypes; } static SmallVector getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { SmallVector argTypes; for (auto &arg : args) argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); return argTypes; } static llvm::SmallVector getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs) { llvm::SmallVector result; if (proto->hasExtParameterInfos()) { addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); } return result; } /// Arrange a call to a C++ method, passing the given arguments. /// /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` /// parameter. /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of /// args. /// PassProtoArgs indicates whether `args` has args for the parameters in the /// given CXXConstructorDecl. const CGFunctionInfo & CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs) { // FIXME: Kill copy. SmallVector ArgTypes; for (const auto &Arg : args) ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); // +1 for implicit this, which should always be args[0]. unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; CanQual FPT = GetFormalType(D); RequiredArgs Required = PassProtoArgs ? RequiredArgs::forPrototypePlus( FPT, TotalPrefixArgs + ExtraSuffixArgs) : RequiredArgs::All; GlobalDecl GD(D, CtorKind); CanQualType ResultType = TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : TheCXXABI.hasMostDerivedReturn(GD) ? CGM.getContext().VoidPtrTy : Context.VoidTy; FunctionType::ExtInfo Info = FPT->getExtInfo(); llvm::SmallVector ParamInfos; // If the prototype args are elided, we should only have ABI-specific args, // which never have param info. if (PassProtoArgs && FPT->hasExtParameterInfos()) { // ABI-specific suffix arguments are treated the same as variadic arguments. addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, ArgTypes.size()); } return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, /*chainCall=*/false, ArgTypes, Info, ParamInfos, Required); } /// Arrange the argument and result information for the declaration or /// definition of the given function. const CGFunctionInfo & CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { if (const CXXMethodDecl *MD = dyn_cast(FD)) if (MD->isInstance()) return arrangeCXXMethodDeclaration(MD); CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); assert(isa(FTy)); setCUDAKernelCallingConvention(FTy, CGM, FD); // When declaring a function without a prototype, always use a // non-variadic type. if (CanQual noProto = FTy.getAs()) { return arrangeLLVMFunctionInfo( noProto->getReturnType(), /*instanceMethod=*/false, /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); } return arrangeFreeFunctionType(FTy.castAs()); } /// Arrange the argument and result information for the declaration or /// definition of an Objective-C method. const CGFunctionInfo & CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { // It happens that this is the same as a call with no optional // arguments, except also using the formal 'self' type. return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); } /// Arrange the argument and result information for the function type /// through which to perform a send to the given Objective-C method, /// using the given receiver type. The receiver type is not always /// the 'self' type of the method or even an Objective-C pointer type. /// This is *not* the right method for actually performing such a /// message send, due to the possibility of optional arguments. const CGFunctionInfo & CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType) { SmallVector argTys; SmallVector extParamInfos(2); argTys.push_back(Context.getCanonicalParamType(receiverType)); argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); // FIXME: Kill copy? for (const auto *I : MD->parameters()) { argTys.push_back(Context.getCanonicalParamType(I->getType())); auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( I->hasAttr()); extParamInfos.push_back(extParamInfo); } FunctionType::ExtInfo einfo; bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); if (getContext().getLangOpts().ObjCAutoRefCount && MD->hasAttr()) einfo = einfo.withProducesResult(true); RequiredArgs required = (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); return arrangeLLVMFunctionInfo( GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, /*chainCall=*/false, argTys, einfo, extParamInfos, required); } const CGFunctionInfo & CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args) { auto argTypes = getArgTypesForCall(Context, args); FunctionType::ExtInfo einfo; return arrangeLLVMFunctionInfo( GetReturnType(returnType), /*instanceMethod=*/false, /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); } const CGFunctionInfo & CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { // FIXME: Do we need to handle ObjCMethodDecl? const FunctionDecl *FD = cast(GD.getDecl()); if (isa(GD.getDecl()) || isa(GD.getDecl())) return arrangeCXXStructorDeclaration(GD); return arrangeFunctionDeclaration(FD); } /// Arrange a thunk that takes 'this' as the first parameter followed by /// varargs. Return a void pointer, regardless of the actual return type. /// The body of the thunk will end in a musttail call to a function of the /// correct type, and the caller will bitcast the function to the correct /// prototype. const CGFunctionInfo & CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { assert(MD->isVirtual() && "only methods have thunks"); CanQual FTP = GetFormalType(MD); CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, ArgTys, FTP->getExtInfo(), {}, RequiredArgs(1)); } const CGFunctionInfo & CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT) { assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); CanQual FTP = GetFormalType(CD); SmallVector ArgTys; const CXXRecordDecl *RD = CD->getParent(); ArgTys.push_back(DeriveThisType(RD, CD)); if (CT == Ctor_CopyingClosure) ArgTys.push_back(*FTP->param_type_begin()); if (RD->getNumVBases() > 0) ArgTys.push_back(Context.IntTy); CallingConv CC = Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true); return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, /*chainCall=*/false, ArgTys, FunctionType::ExtInfo(CC), {}, RequiredArgs::All); } /// Arrange a call as unto a free function, except possibly with an /// additional number of formal parameters considered required. static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall) { assert(args.size() >= numExtraRequiredArgs); llvm::SmallVector paramInfos; // In most cases, there are no optional arguments. RequiredArgs required = RequiredArgs::All; // If we have a variadic prototype, the required arguments are the // extra prefix plus the arguments in the prototype. if (const FunctionProtoType *proto = dyn_cast(fnType)) { if (proto->isVariadic()) required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); if (proto->hasExtParameterInfos()) addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, args.size()); // If we don't have a prototype at all, but we're supposed to // explicitly use the variadic convention for unprototyped calls, // treat all of the arguments as required but preserve the nominal // possibility of variadics. } else if (CGM.getTargetCodeGenInfo() .isNoProtoCallVariadic(args, cast(fnType))) { required = RequiredArgs(args.size()); } // FIXME: Kill copy. SmallVector argTypes; for (const auto &arg : args) argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), /*instanceMethod=*/false, chainCall, argTypes, fnType->getExtInfo(), paramInfos, required); } /// Figure out the rules for calling a function with the given formal /// type using the given arguments. The arguments are necessary /// because the function might be unprototyped, in which case it's /// target-dependent in crazy ways. const CGFunctionInfo & CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, const FunctionType *fnType, bool chainCall) { return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, chainCall ? 1 : 0, chainCall); } /// A block function is essentially a free function with an /// extra implicit argument. const CGFunctionInfo & CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *fnType) { return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, /*chainCall=*/false); } const CGFunctionInfo & CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, const FunctionArgList ¶ms) { auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); auto argTypes = getArgTypesForDeclaration(Context, params); return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), /*instanceMethod*/ false, /*chainCall*/ false, argTypes, proto->getExtInfo(), paramInfos, RequiredArgs::forPrototypePlus(proto, 1)); } const CGFunctionInfo & CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args) { // FIXME: Kill copy. SmallVector argTypes; for (const auto &Arg : args) argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); return arrangeLLVMFunctionInfo( GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), /*paramInfos=*/ {}, RequiredArgs::All); } const CGFunctionInfo & CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args) { auto argTypes = getArgTypesForDeclaration(Context, args); return arrangeLLVMFunctionInfo( GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); } const CGFunctionInfo & CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, ArrayRef argTypes) { return arrangeLLVMFunctionInfo( resultType, /*instanceMethod=*/false, /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); } /// Arrange a call to a C++ method, passing the given arguments. /// /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It /// does not count `this`. const CGFunctionInfo & CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *proto, RequiredArgs required, unsigned numPrefixArgs) { assert(numPrefixArgs + 1 <= args.size() && "Emitting a call with less args than the required prefix?"); // Add one to account for `this`. It's a bit awkward here, but we don't count // `this` in similar places elsewhere. auto paramInfos = getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); // FIXME: Kill copy. auto argTypes = getArgTypesForCall(Context, args); FunctionType::ExtInfo info = proto->getExtInfo(); return arrangeLLVMFunctionInfo( GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, /*chainCall=*/false, argTypes, info, paramInfos, required); } const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { return arrangeLLVMFunctionInfo( getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, None, FunctionType::ExtInfo(), {}, RequiredArgs::All); } const CGFunctionInfo & CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, const CallArgList &args) { assert(signature.arg_size() <= args.size()); if (signature.arg_size() == args.size()) return signature; SmallVector paramInfos; auto sigParamInfos = signature.getExtParameterInfos(); if (!sigParamInfos.empty()) { paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); paramInfos.resize(args.size()); } auto argTypes = getArgTypesForCall(Context, args); assert(signature.getRequiredArgs().allowsOptionalArgs()); return arrangeLLVMFunctionInfo(signature.getReturnType(), signature.isInstanceMethod(), signature.isChainCall(), argTypes, signature.getExtInfo(), paramInfos, signature.getRequiredArgs()); } namespace clang { namespace CodeGen { void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); } } /// Arrange the argument and result information for an abstract value /// of a given function type. This is the method which all of the /// above functions ultimately defer to. const CGFunctionInfo & CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, bool instanceMethod, bool chainCall, ArrayRef argTypes, FunctionType::ExtInfo info, ArrayRef paramInfos, RequiredArgs required) { assert(llvm::all_of(argTypes, [](CanQualType T) { return T.isCanonicalAsParam(); })); // Lookup or create unique function info. llvm::FoldingSetNodeID ID; CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, required, resultType, argTypes); void *insertPos = nullptr; CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); if (FI) return *FI; unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); // Construct the function info. We co-allocate the ArgInfos. FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, paramInfos, resultType, argTypes, required); FunctionInfos.InsertNode(FI, insertPos); bool inserted = FunctionsBeingProcessed.insert(FI).second; (void)inserted; assert(inserted && "Recursively being processed?"); // Compute ABI information. if (CC == llvm::CallingConv::SPIR_KERNEL) { // Force target independent argument handling for the host visible // kernel functions. computeSPIRKernelABIInfo(CGM, *FI); } else if (info.getCC() == CC_Swift) { swiftcall::computeABIInfo(CGM, *FI); } else { getABIInfo().computeInfo(*FI); } // Loop over all of the computed argument and return value info. If any of // them are direct or extend without a specified coerce type, specify the // default now. ABIArgInfo &retInfo = FI->getReturnInfo(); if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) retInfo.setCoerceToType(ConvertType(FI->getReturnType())); for (auto &I : FI->arguments()) if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) I.info.setCoerceToType(ConvertType(I.type)); bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; assert(erased && "Not in set?"); return *FI; } CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &info, ArrayRef paramInfos, CanQualType resultType, ArrayRef argTypes, RequiredArgs required) { assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); assert(!required.allowsOptionalArgs() || required.getNumRequiredArgs() <= argTypes.size()); void *buffer = operator new(totalSizeToAlloc( argTypes.size() + 1, paramInfos.size())); CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); FI->CallingConvention = llvmCC; FI->EffectiveCallingConvention = llvmCC; FI->ASTCallingConvention = info.getCC(); FI->InstanceMethod = instanceMethod; FI->ChainCall = chainCall; FI->CmseNSCall = info.getCmseNSCall(); FI->NoReturn = info.getNoReturn(); FI->ReturnsRetained = info.getProducesResult(); FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); FI->NoCfCheck = info.getNoCfCheck(); FI->Required = required; FI->HasRegParm = info.getHasRegParm(); FI->RegParm = info.getRegParm(); FI->ArgStruct = nullptr; FI->ArgStructAlign = 0; FI->NumArgs = argTypes.size(); FI->HasExtParameterInfos = !paramInfos.empty(); FI->getArgsBuffer()[0].type = resultType; for (unsigned i = 0, e = argTypes.size(); i != e; ++i) FI->getArgsBuffer()[i + 1].type = argTypes[i]; for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; return FI; } /***/ namespace { // ABIArgInfo::Expand implementation. // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. struct TypeExpansion { enum TypeExpansionKind { // Elements of constant arrays are expanded recursively. TEK_ConstantArray, // Record fields are expanded recursively (but if record is a union, only // the field with the largest size is expanded). TEK_Record, // For complex types, real and imaginary parts are expanded recursively. TEK_Complex, // All other types are not expandable. TEK_None }; const TypeExpansionKind Kind; TypeExpansion(TypeExpansionKind K) : Kind(K) {} virtual ~TypeExpansion() {} }; struct ConstantArrayExpansion : TypeExpansion { QualType EltTy; uint64_t NumElts; ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_ConstantArray; } }; struct RecordExpansion : TypeExpansion { SmallVector Bases; SmallVector Fields; RecordExpansion(SmallVector &&Bases, SmallVector &&Fields) : TypeExpansion(TEK_Record), Bases(std::move(Bases)), Fields(std::move(Fields)) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_Record; } }; struct ComplexExpansion : TypeExpansion { QualType EltTy; ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_Complex; } }; struct NoExpansion : TypeExpansion { NoExpansion() : TypeExpansion(TEK_None) {} static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_None; } }; } // namespace static std::unique_ptr getTypeExpansion(QualType Ty, const ASTContext &Context) { if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { return std::make_unique( AT->getElementType(), AT->getSize().getZExtValue()); } if (const RecordType *RT = Ty->getAs()) { SmallVector Bases; SmallVector Fields; const RecordDecl *RD = RT->getDecl(); assert(!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."); if (RD->isUnion()) { // Unions can be here only in degenerative cases - all the fields are same // after flattening. Thus we have to use the "largest" field. const FieldDecl *LargestFD = nullptr; CharUnits UnionSize = CharUnits::Zero(); for (const auto *FD : RD->fields()) { if (FD->isZeroLengthBitField(Context)) continue; assert(!FD->isBitField() && "Cannot expand structure with bit-field members."); CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); if (UnionSize < FieldSize) { UnionSize = FieldSize; LargestFD = FD; } } if (LargestFD) Fields.push_back(LargestFD); } else { if (const auto *CXXRD = dyn_cast(RD)) { assert(!CXXRD->isDynamicClass() && "cannot expand vtable pointers in dynamic classes"); for (const CXXBaseSpecifier &BS : CXXRD->bases()) Bases.push_back(&BS); } for (const auto *FD : RD->fields()) { if (FD->isZeroLengthBitField(Context)) continue; assert(!FD->isBitField() && "Cannot expand structure with bit-field members."); Fields.push_back(FD); } } return std::make_unique(std::move(Bases), std::move(Fields)); } if (const ComplexType *CT = Ty->getAs()) { return std::make_unique(CT->getElementType()); } return std::make_unique(); } static int getExpansionSize(QualType Ty, const ASTContext &Context) { auto Exp = getTypeExpansion(Ty, Context); if (auto CAExp = dyn_cast(Exp.get())) { return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); } if (auto RExp = dyn_cast(Exp.get())) { int Res = 0; for (auto BS : RExp->Bases) Res += getExpansionSize(BS->getType(), Context); for (auto FD : RExp->Fields) Res += getExpansionSize(FD->getType(), Context); return Res; } if (isa(Exp.get())) return 2; assert(isa(Exp.get())); return 1; } void CodeGenTypes::getExpandedTypes(QualType Ty, SmallVectorImpl::iterator &TI) { auto Exp = getTypeExpansion(Ty, Context); if (auto CAExp = dyn_cast(Exp.get())) { for (int i = 0, n = CAExp->NumElts; i < n; i++) { getExpandedTypes(CAExp->EltTy, TI); } } else if (auto RExp = dyn_cast(Exp.get())) { for (auto BS : RExp->Bases) getExpandedTypes(BS->getType(), TI); for (auto FD : RExp->Fields) getExpandedTypes(FD->getType(), TI); } else if (auto CExp = dyn_cast(Exp.get())) { llvm::Type *EltTy = ConvertType(CExp->EltTy); *TI++ = EltTy; *TI++ = EltTy; } else { assert(isa(Exp.get())); *TI++ = ConvertType(Ty); } } static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref Fn) { CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); CharUnits EltAlign = BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); for (int i = 0, n = CAE->NumElts; i < n; i++) { llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); Fn(Address(EltAddr, EltAlign)); } } void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, llvm::Function::arg_iterator &AI) { assert(LV.isSimple() && "Unexpected non-simple lvalue during struct expansion."); auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast(Exp.get())) { forConstantArrayExpansion( *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); ExpandTypeFromArgs(CAExp->EltTy, LV, AI); }); } else if (auto RExp = dyn_cast(Exp.get())) { Address This = LV.getAddress(*this); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. Address Base = GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, /*NullCheckValue=*/false, SourceLocation()); LValue SubLV = MakeAddrLValue(Base, BS->getType()); // Recurse onto bases. ExpandTypeFromArgs(BS->getType(), SubLV, AI); } for (auto FD : RExp->Fields) { // FIXME: What are the right qualifiers here? LValue SubLV = EmitLValueForFieldInitialization(LV, FD); ExpandTypeFromArgs(FD->getType(), SubLV, AI); } } else if (isa(Exp.get())) { auto realValue = &*AI++; auto imagValue = &*AI++; EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); } else { // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a // primitive store. assert(isa(Exp.get())); if (LV.isBitField()) EmitStoreThroughLValue(RValue::get(&*AI++), LV); else EmitStoreOfScalar(&*AI++, LV); } } void CodeGenFunction::ExpandTypeToArgs( QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, SmallVectorImpl &IRCallArgs, unsigned &IRCallArgPos) { auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast(Exp.get())) { Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) : Arg.getKnownRValue().getAggregateAddress(); forConstantArrayExpansion( *this, CAExp, Addr, [&](Address EltAddr) { CallArg EltArg = CallArg( convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), CAExp->EltTy); ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, IRCallArgPos); }); } else if (auto RExp = dyn_cast(Exp.get())) { Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) : Arg.getKnownRValue().getAggregateAddress(); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. Address Base = GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, /*NullCheckValue=*/false, SourceLocation()); CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); // Recurse onto bases. ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, IRCallArgPos); } LValue LV = MakeAddrLValue(This, Ty); for (auto FD : RExp->Fields) { CallArg FldArg = CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, IRCallArgPos); } } else if (isa(Exp.get())) { ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); IRCallArgs[IRCallArgPos++] = CV.first; IRCallArgs[IRCallArgPos++] = CV.second; } else { assert(isa(Exp.get())); auto RV = Arg.getKnownRValue(); assert(RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."); // Insert a bitcast as needed. llvm::Value *V = RV.getScalarVal(); if (IRCallArgPos < IRFuncTy->getNumParams() && V->getType() != IRFuncTy->getParamType(IRCallArgPos)) V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); IRCallArgs[IRCallArgPos++] = V; } } /// Create a temporary allocation for the purposes of coercion. static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name = "tmp") { // Don't use an alignment that's worse than what LLVM would prefer. auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); } /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are /// accessing some number of bytes out of it, try to gep into the struct to get /// at its inner goodness. Dive as deep as possible without entering an element /// with an in-memory size smaller than DstSize. static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF) { // We can't dive into a zero-element struct. if (SrcSTy->getNumElements() == 0) return SrcPtr; llvm::Type *FirstElt = SrcSTy->getElementType(0); // If the first elt is at least as large as what we're looking for, or if the // first element is the same size as the whole struct, we can enter it. The // comparison must be made on the store size and not the alloca size. Using // the alloca size may overstate the size of the load. uint64_t FirstEltSize = CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); if (FirstEltSize < DstSize && FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) return SrcPtr; // GEP into the first element. SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); // If the first element is a struct, recurse. llvm::Type *SrcTy = SrcPtr.getElementType(); if (llvm::StructType *SrcSTy = dyn_cast(SrcTy)) return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); return SrcPtr; } /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both /// are either integers or pointers. This does a truncation of the value if it /// is too large or a zero extension if it is too small. /// /// This behaves as if the value were coerced through memory, so on big-endian /// targets the high bits are preserved in a truncation, while little-endian /// targets preserve the low bits. static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF) { if (Val->getType() == Ty) return Val; if (isa(Val->getType())) { // If this is Pointer->Pointer avoid conversion to and from int. if (isa(Ty)) return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); // Convert the pointer to an integer so we can play with its width. Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); } llvm::Type *DestIntTy = Ty; if (isa(DestIntTy)) DestIntTy = CGF.IntPtrTy; if (Val->getType() != DestIntTy) { const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); if (DL.isBigEndian()) { // Preserve the high bits on big-endian targets. // That is what memory coercion does. uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); if (SrcSize > DstSize) { Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); } else { Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); } } else { // Little-endian targets preserve the low bits. No shifts required. Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); } } if (isa(Ty)) Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); return Val; } /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as /// a pointer to an object of type \arg Ty, known to be aligned to /// \arg SrcAlign bytes. /// /// This safely handles the case when the src type is smaller than the /// destination type; in this situation the values of bits which not /// present in the src are undefined. static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF) { llvm::Type *SrcTy = Src.getElementType(); // If SrcTy and Ty are the same, just do a load. if (SrcTy == Ty) return CGF.Builder.CreateLoad(Src); llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); if (llvm::StructType *SrcSTy = dyn_cast(SrcTy)) { Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize.getFixedSize(), CGF); SrcTy = Src.getElementType(); } llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. if ((isa(Ty) || isa(Ty)) && (isa(SrcTy) || isa(SrcTy))) { llvm::Value *Load = CGF.Builder.CreateLoad(Src); return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); } // If load is legal, just bitcast the src pointer. if (!SrcSize.isScalable() && !DstSize.isScalable() && SrcSize.getFixedSize() >= DstSize.getFixedSize()) { // Generally SrcSize is never greater than DstSize, since this means we are // losing bits. However, this can happen in cases where the structure has // additional padding, for example due to a user specified alignment. // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. Src = CGF.Builder.CreateBitCast(Src, Ty->getPointerTo(Src.getAddressSpace())); return CGF.Builder.CreateLoad(Src); } // Otherwise do coercion through memory. This is stupid, but simple. Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); CGF.Builder.CreateMemCpy( Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), Src.getAlignment().getAsAlign(), llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); return CGF.Builder.CreateLoad(Tmp); } // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to // fast-isel. // FIXME: Do we need to recurse here? void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile) { // Prefer scalar stores to first-class aggregate stores. if (llvm::StructType *STy = dyn_cast(Val->getType())) { for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Address EltPtr = Builder.CreateStructGEP(Dest, i); llvm::Value *Elt = Builder.CreateExtractValue(Val, i); Builder.CreateStore(Elt, EltPtr, DestIsVolatile); } } else { Builder.CreateStore(Val, Dest, DestIsVolatile); } } /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, /// where the source and destination may have different types. The /// destination is known to be aligned to \arg DstAlign bytes. /// /// This safely handles the case when the src type is larger than the /// destination type; the upper bits of the src will be lost. static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF) { llvm::Type *SrcTy = Src->getType(); llvm::Type *DstTy = Dst.getElementType(); if (SrcTy == DstTy) { CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); return; } llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); if (llvm::StructType *DstSTy = dyn_cast(DstTy)) { Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize.getFixedSize(), CGF); DstTy = Dst.getElementType(); } llvm::PointerType *SrcPtrTy = llvm::dyn_cast(SrcTy); llvm::PointerType *DstPtrTy = llvm::dyn_cast(DstTy); if (SrcPtrTy && DstPtrTy && SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); return; } // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. if ((isa(SrcTy) || isa(SrcTy)) && (isa(DstTy) || isa(DstTy))) { Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); return; } llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. if (isa(SrcTy) || isa(DstTy) || SrcSize.getFixedSize() <= DstSize.getFixedSize()) { Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); } else { // Otherwise do coercion through memory. This is stupid, but // simple. // Generally SrcSize is never greater than DstSize, since this means we are // losing bits. However, this can happen in cases where the structure has // additional padding, for example due to a user specified alignment. // // FIXME: Assert that we aren't truncating non-padding bits when have access // to that information. Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); CGF.Builder.CreateStore(Src, Tmp); CGF.Builder.CreateMemCpy( Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); } } static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info) { if (unsigned offset = info.getDirectOffset()) { addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, CharUnits::fromQuantity(offset)); addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); } return addr; } namespace { /// Encapsulates information about the way function arguments from /// CGFunctionInfo should be passed to actual LLVM IR function. class ClangToLLVMArgMapping { static const unsigned InvalidIndex = ~0U; unsigned InallocaArgNo; unsigned SRetArgNo; unsigned TotalIRArgs; /// Arguments of LLVM IR function corresponding to single Clang argument. struct IRArgs { unsigned PaddingArgIndex; // Argument is expanded to IR arguments at positions // [FirstArgIndex, FirstArgIndex + NumberOfArgs). unsigned FirstArgIndex; unsigned NumberOfArgs; IRArgs() : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), NumberOfArgs(0) {} }; SmallVector ArgInfo; public: ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, bool OnlyRequiredArgs = false) : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { construct(Context, FI, OnlyRequiredArgs); } bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } unsigned getInallocaArgNo() const { assert(hasInallocaArg()); return InallocaArgNo; } bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } unsigned getSRetArgNo() const { assert(hasSRetArg()); return SRetArgNo; } unsigned totalIRArgs() const { return TotalIRArgs; } bool hasPaddingArg(unsigned ArgNo) const { assert(ArgNo < ArgInfo.size()); return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; } unsigned getPaddingArgNo(unsigned ArgNo) const { assert(hasPaddingArg(ArgNo)); return ArgInfo[ArgNo].PaddingArgIndex; } /// Returns index of first IR argument corresponding to ArgNo, and their /// quantity. std::pair getIRArgs(unsigned ArgNo) const { assert(ArgNo < ArgInfo.size()); return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, ArgInfo[ArgNo].NumberOfArgs); } private: void construct(const ASTContext &Context, const CGFunctionInfo &FI, bool OnlyRequiredArgs); }; void ClangToLLVMArgMapping::construct(const ASTContext &Context, const CGFunctionInfo &FI, bool OnlyRequiredArgs) { unsigned IRArgNo = 0; bool SwapThisWithSRet = false; const ABIArgInfo &RetAI = FI.getReturnInfo(); if (RetAI.getKind() == ABIArgInfo::Indirect) { SwapThisWithSRet = RetAI.isSRetAfterThis(); SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; } unsigned ArgNo = 0; unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; ++I, ++ArgNo) { assert(I != FI.arg_end()); QualType ArgType = I->type; const ABIArgInfo &AI = I->info; // Collect data about IR arguments corresponding to Clang argument ArgNo. auto &IRArgs = ArgInfo[ArgNo]; if (AI.getPaddingType()) IRArgs.PaddingArgIndex = IRArgNo++; switch (AI.getKind()) { case ABIArgInfo::Extend: case ABIArgInfo::Direct: { // FIXME: handle sseregparm someday... llvm::StructType *STy = dyn_cast(AI.getCoerceToType()); if (AI.isDirect() && AI.getCanBeFlattened() && STy) { IRArgs.NumberOfArgs = STy->getNumElements(); } else { IRArgs.NumberOfArgs = 1; } break; } case ABIArgInfo::Indirect: case ABIArgInfo::IndirectAliased: IRArgs.NumberOfArgs = 1; break; case ABIArgInfo::Ignore: case ABIArgInfo::InAlloca: // ignore and inalloca doesn't have matching LLVM parameters. IRArgs.NumberOfArgs = 0; break; case ABIArgInfo::CoerceAndExpand: IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); break; case ABIArgInfo::Expand: IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); break; } if (IRArgs.NumberOfArgs > 0) { IRArgs.FirstArgIndex = IRArgNo; IRArgNo += IRArgs.NumberOfArgs; } // Skip over the sret parameter when it comes second. We already handled it // above. if (IRArgNo == 1 && SwapThisWithSRet) IRArgNo++; } assert(ArgNo == ArgInfo.size()); if (FI.usesInAlloca()) InallocaArgNo = IRArgNo++; TotalIRArgs = IRArgNo; } } // namespace /***/ bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { const auto &RI = FI.getReturnInfo(); return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); } bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { return ReturnTypeUsesSRet(FI) && getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); } bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { if (const BuiltinType *BT = ResultType->getAs()) { switch (BT->getKind()) { default: return false; case BuiltinType::Float: return getTarget().useObjCFPRetForRealType(TargetInfo::Float); case BuiltinType::Double: return getTarget().useObjCFPRetForRealType(TargetInfo::Double); case BuiltinType::LongDouble: return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); } } return false; } bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { if (const ComplexType *CT = ResultType->getAs()) { if (const BuiltinType *BT = CT->getElementType()->getAs()) { if (BT->getKind() == BuiltinType::LongDouble) return getTarget().useObjCFP2RetForComplexLongDouble(); } } return false; } llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); return GetFunctionType(FI); } llvm::FunctionType * CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { bool Inserted = FunctionsBeingProcessed.insert(&FI).second; (void)Inserted; assert(Inserted && "Recursively being processed?"); llvm::Type *resultType = nullptr; const ABIArgInfo &retAI = FI.getReturnInfo(); switch (retAI.getKind()) { case ABIArgInfo::Expand: case ABIArgInfo::IndirectAliased: llvm_unreachable("Invalid ABI kind for return argument"); case ABIArgInfo::Extend: case ABIArgInfo::Direct: resultType = retAI.getCoerceToType(); break; case ABIArgInfo::InAlloca: if (retAI.getInAllocaSRet()) { // sret things on win32 aren't void, they return the sret pointer. QualType ret = FI.getReturnType(); llvm::Type *ty = ConvertType(ret); unsigned addressSpace = Context.getTargetAddressSpace(ret); resultType = llvm::PointerType::get(ty, addressSpace); } else { resultType = llvm::Type::getVoidTy(getLLVMContext()); } break; case ABIArgInfo::Indirect: case ABIArgInfo::Ignore: resultType = llvm::Type::getVoidTy(getLLVMContext()); break; case ABIArgInfo::CoerceAndExpand: resultType = retAI.getUnpaddedCoerceAndExpandType(); break; } ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); SmallVector ArgTypes(IRFunctionArgs.totalIRArgs()); // Add type for sret argument. if (IRFunctionArgs.hasSRetArg()) { QualType Ret = FI.getReturnType(); llvm::Type *Ty = ConvertType(Ret); unsigned AddressSpace = Context.getTargetAddressSpace(Ret); ArgTypes[IRFunctionArgs.getSRetArgNo()] = llvm::PointerType::get(Ty, AddressSpace); } // Add type for inalloca argument. if (IRFunctionArgs.hasInallocaArg()) { auto ArgStruct = FI.getArgStruct(); assert(ArgStruct); ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); } // Add in all of the required arguments. unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie = it + FI.getNumRequiredArgs(); for (; it != ie; ++it, ++ArgNo) { const ABIArgInfo &ArgInfo = it->info; // Insert a padding type to ensure proper alignment. if (IRFunctionArgs.hasPaddingArg(ArgNo)) ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = ArgInfo.getPaddingType(); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgInfo.getKind()) { case ABIArgInfo::Ignore: case ABIArgInfo::InAlloca: assert(NumIRArgs == 0); break; case ABIArgInfo::Indirect: { assert(NumIRArgs == 1); // indirect arguments are always on the stack, which is alloca addr space. llvm::Type *LTy = ConvertTypeForMem(it->type); ArgTypes[FirstIRArg] = LTy->getPointerTo( CGM.getDataLayout().getAllocaAddrSpace()); break; } case ABIArgInfo::IndirectAliased: { assert(NumIRArgs == 1); llvm::Type *LTy = ConvertTypeForMem(it->type); ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: { // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. llvm::Type *argType = ArgInfo.getCoerceToType(); llvm::StructType *st = dyn_cast(argType); if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { assert(NumIRArgs == st->getNumElements()); for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) ArgTypes[FirstIRArg + i] = st->getElementType(i); } else { assert(NumIRArgs == 1); ArgTypes[FirstIRArg] = argType; } break; } case ABIArgInfo::CoerceAndExpand: { auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { *ArgTypesIter++ = EltTy; } assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); break; } case ABIArgInfo::Expand: auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; getExpandedTypes(it->type, ArgTypesIter); assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); break; } } bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; assert(Erased && "Not in set?"); return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); } llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { const CXXMethodDecl *MD = cast(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs(); if (!isFuncTypeConvertible(FPT)) return llvm::StructType::get(getLLVMContext()); return GetFunctionType(GD); } static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT) { if (!FPT) return; if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && FPT->isNothrow()) FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, bool HasOptnone, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs) { // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. if (!HasOptnone) { if (CodeGenOpts.OptimizeSize) FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); if (CodeGenOpts.OptimizeSize == 2) FuncAttrs.addAttribute(llvm::Attribute::MinSize); } if (CodeGenOpts.DisableRedZone) FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); if (CodeGenOpts.IndirectTlsSegRefs) FuncAttrs.addAttribute("indirect-tls-seg-refs"); if (CodeGenOpts.NoImplicitFloat) FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); if (AttrOnCallSite) { // Attributes that should go on the call site only. if (!CodeGenOpts.SimplifyLibCalls || CodeGenOpts.isNoBuiltinFunc(Name.data())) FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); if (!CodeGenOpts.TrapFuncName.empty()) FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); } else { StringRef FpKind; switch (CodeGenOpts.getFramePointer()) { case CodeGenOptions::FramePointerKind::None: FpKind = "none"; break; case CodeGenOptions::FramePointerKind::NonLeaf: FpKind = "non-leaf"; break; case CodeGenOptions::FramePointerKind::All: FpKind = "all"; break; } FuncAttrs.addAttribute("frame-pointer", FpKind); FuncAttrs.addAttribute("less-precise-fpmad", llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); if (CodeGenOpts.NullPointerIsValid) FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode.str()); if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { FuncAttrs.addAttribute( "denormal-fp-math-f32", CodeGenOpts.FP32DenormalMode.str()); } FuncAttrs.addAttribute("no-trapping-math", llvm::toStringRef(LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)); // Strict (compliant) code is the default, so only add this attribute to // indicate that we are trying to workaround a problem case. if (!CodeGenOpts.StrictFloatCastOverflow) FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); // TODO: Are these all needed? // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. FuncAttrs.addAttribute("no-infs-fp-math", llvm::toStringRef(LangOpts.NoHonorInfs)); FuncAttrs.addAttribute("no-nans-fp-math", llvm::toStringRef(LangOpts.NoHonorNaNs)); FuncAttrs.addAttribute("unsafe-fp-math", llvm::toStringRef(LangOpts.UnsafeFPMath)); FuncAttrs.addAttribute("use-soft-float", llvm::toStringRef(CodeGenOpts.SoftFloat)); FuncAttrs.addAttribute("stack-protector-buffer-size", llvm::utostr(CodeGenOpts.SSPBufferSize)); FuncAttrs.addAttribute("no-signed-zeros-fp-math", llvm::toStringRef(LangOpts.NoSignedZero)); if (getLangOpts().OpenCL) { FuncAttrs.addAttribute( "correctly-rounded-divide-sqrt-fp-math", llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); } // TODO: Reciprocal estimate codegen options should apply to instructions? const std::vector &Recips = CodeGenOpts.Reciprocals; if (!Recips.empty()) FuncAttrs.addAttribute("reciprocal-estimates", llvm::join(Recips, ",")); if (!CodeGenOpts.PreferVectorWidth.empty() && CodeGenOpts.PreferVectorWidth != "none") FuncAttrs.addAttribute("prefer-vector-width", CodeGenOpts.PreferVectorWidth); if (CodeGenOpts.StackRealignment) FuncAttrs.addAttribute("stackrealign"); if (CodeGenOpts.Backchain) FuncAttrs.addAttribute("backchain"); if (CodeGenOpts.EnableSegmentedStacks) FuncAttrs.addAttribute("split-stack"); if (CodeGenOpts.SpeculativeLoadHardening) FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); } if (getLangOpts().assumeFunctionsAreConvergent()) { // Conservatively, mark all functions and calls in CUDA and OpenCL as // convergent (meaning, they may call an intrinsically convergent op, such // as __syncthreads() / barrier(), and so can't have certain optimizations // applied around them). LLVM will remove this attribute where it safely // can. FuncAttrs.addAttribute(llvm::Attribute::Convergent); } if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { // Exceptions aren't supported in CUDA device code. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { StringRef Var, Value; std::tie(Var, Value) = Attr.split('='); FuncAttrs.addAttribute(Var, Value); } } void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { llvm::AttrBuilder FuncAttrs; getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), /* AttrOnCallSite = */ false, FuncAttrs); // TODO: call GetCPUAndFeaturesAttributes? F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); } void CodeGenModule::addDefaultFunctionDefinitionAttributes( llvm::AttrBuilder &attrs) { getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, /*for call*/ false, attrs); GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); } static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA = nullptr) { auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { SmallString<32> AttributeName; AttributeName += "no-builtin-"; AttributeName += BuiltinName; FuncAttrs.addAttribute(AttributeName); }; // First, handle the language options passed through -fno-builtin. if (LangOpts.NoBuiltin) { // -fno-builtin disables them all. FuncAttrs.addAttribute("no-builtins"); return; } // Then, add attributes for builtins specified through -fno-builtin-. llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); // Now, let's check the __attribute__((no_builtin("...")) attribute added to // the source. if (!NBA) return; // If there is a wildcard in the builtin names specified through the // attribute, disable them all. if (llvm::is_contained(NBA->builtinNames(), "*")) { FuncAttrs.addAttribute("no-builtins"); return; } // And last, add the rest of the builtin names. llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); } /// Construct the IR attribute list of a function or call. /// /// When adding an attribute, please consider where it should be handled: /// /// - getDefaultFunctionAttributes is for attributes that are essentially /// part of the global target configuration (but perhaps can be /// overridden on a per-function basis). Adding attributes there /// will cause them to also be set in frontends that build on Clang's /// target-configuration logic, as well as for code defined in library /// modules such as CUDA's libdevice. /// /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes /// and adds declaration-specific, convention-specific, and /// frontend-specific logic. The last is of particular importance: /// attributes that restrict how the frontend generates code must be /// added here rather than getDefaultFunctionAttributes. /// void CodeGenModule::ConstructAttributeList( StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) { llvm::AttrBuilder FuncAttrs; llvm::AttrBuilder RetAttrs; // Collect function IR attributes from the CC lowering. // We'll collect the paramete and result attributes later. CallingConv = FI.getEffectiveCallingConvention(); if (FI.isNoReturn()) FuncAttrs.addAttribute(llvm::Attribute::NoReturn); if (FI.isCmseNSCall()) FuncAttrs.addAttribute("cmse_nonsecure_call"); // Collect function IR attributes from the callee prototype if we have one. AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, CalleeInfo.getCalleeFunctionProtoType()); const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); bool HasOptnone = false; // The NoBuiltinAttr attached to the target FunctionDecl. const NoBuiltinAttr *NBA = nullptr; // Collect function IR attributes based on declaration-specific // information. // FIXME: handle sseregparm someday... if (TargetDecl) { if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::NoReturn); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::Cold); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::Convergent); if (const FunctionDecl *Fn = dyn_cast(TargetDecl)) { AddAttributesFromFunctionProtoType( getContext(), FuncAttrs, Fn->getType()->getAs()); if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { // A sane operator new returns a non-aliasing pointer. auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); if (getCodeGenOpts().AssumeSaneOperatorNew && (Kind == OO_New || Kind == OO_Array_New)) RetAttrs.addAttribute(llvm::Attribute::NoAlias); } const CXXMethodDecl *MD = dyn_cast(Fn); const bool IsVirtualCall = MD && MD->isVirtual(); // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a // virtual function. These attributes are not inherited by overloads. if (!(AttrOnCallSite && IsVirtualCall)) { if (Fn->isNoReturn()) FuncAttrs.addAttribute(llvm::Attribute::NoReturn); NBA = Fn->getAttr(); } } // 'const', 'pure' and 'noalias' attributed functions are also nounwind. if (TargetDecl->hasAttr()) { FuncAttrs.addAttribute(llvm::Attribute::ReadNone); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } else if (TargetDecl->hasAttr()) { FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } else if (TargetDecl->hasAttr()) { FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } if (TargetDecl->hasAttr()) RetAttrs.addAttribute(llvm::Attribute::NoAlias); if (TargetDecl->hasAttr() && !CodeGenOpts.NullPointerIsValid) RetAttrs.addAttribute(llvm::Attribute::NonNull); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute("no_caller_saved_registers"); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); HasOptnone = TargetDecl->hasAttr(); if (auto *AllocSize = TargetDecl->getAttr()) { Optional NumElemsParam; if (AllocSize->getNumElemsParam().isValid()) NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), NumElemsParam); } if (TargetDecl->hasAttr()) { if (getLangOpts().OpenCLVersion <= 120) { // OpenCL v1.2 Work groups are always uniform FuncAttrs.addAttribute("uniform-work-group-size", "true"); } else { // OpenCL v2.0 Work groups may be whether uniform or not. // '-cl-uniform-work-group-size' compile option gets a hint // to the compiler that the global work-size be a multiple of // the work-group size specified to clEnqueueNDRangeKernel // (i.e. work groups are uniform). FuncAttrs.addAttribute("uniform-work-group-size", llvm::toStringRef(CodeGenOpts.UniformWGSize)); } } } // Attach "no-builtins" attributes to: // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-". // * definitions: "no-builtins" or "no-builtin-" only. // The attributes can come from: // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin- // * FunctionDecl attributes: __attribute__((no_builtin(...))) addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); // Collect function IR attributes based on global settiings. getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); // Override some default IR attributes based on declaration-specific // information. if (TargetDecl) { if (TargetDecl->hasAttr()) FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); if (TargetDecl->hasAttr()) FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); if (TargetDecl->hasAttr()) FuncAttrs.removeAttribute("split-stack"); // Add NonLazyBind attribute to function declarations when -fno-plt // is used. // FIXME: what if we just haven't processed the function definition // yet, or if it's an external definition like C99 inline? if (CodeGenOpts.NoPLT) { if (auto *Fn = dyn_cast(TargetDecl)) { if (!Fn->isDefined() && !AttrOnCallSite) { FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); } } } } // Collect non-call-site function IR attributes from declaration-specific // information. if (!AttrOnCallSite) { if (TargetDecl && TargetDecl->hasAttr()) FuncAttrs.addAttribute("cmse_nonsecure_entry"); // Whether tail calls are enabled. auto shouldDisableTailCalls = [&] { // Should this be honored in getDefaultFunctionAttributes? if (CodeGenOpts.DisableTailCalls) return true; if (!TargetDecl) return false; if (TargetDecl->hasAttr() || TargetDecl->hasAttr()) return true; if (CodeGenOpts.NoEscapingBlockTailCalls) { if (const auto *BD = dyn_cast(TargetDecl)) if (!BD->doesNotEscape()) return true; } return false; }; FuncAttrs.addAttribute("disable-tail-calls", llvm::toStringRef(shouldDisableTailCalls())); // CPU/feature overrides. addDefaultFunctionDefinitionAttributes // handles these separately to set them based on the global defaults. GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); } // Collect attributes from arguments and return values. ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); QualType RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); switch (RetAI.getKind()) { case ABIArgInfo::Extend: if (RetAI.isSignExt()) RetAttrs.addAttribute(llvm::Attribute::SExt); else RetAttrs.addAttribute(llvm::Attribute::ZExt); LLVM_FALLTHROUGH; case ABIArgInfo::Direct: if (RetAI.getInReg()) RetAttrs.addAttribute(llvm::Attribute::InReg); break; case ABIArgInfo::Ignore: break; case ABIArgInfo::InAlloca: case ABIArgInfo::Indirect: { // inalloca and sret disable readnone and readonly FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); break; } case ABIArgInfo::CoerceAndExpand: break; case ABIArgInfo::Expand: case ABIArgInfo::IndirectAliased: llvm_unreachable("Invalid ABI kind for return argument"); } if (const auto *RefTy = RetTy->getAs()) { QualType PTy = RefTy->getPointeeType(); if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) RetAttrs.addDereferenceableAttr( getMinimumObjectSize(PTy).getQuantity()); if (getContext().getTargetAddressSpace(PTy) == 0 && !CodeGenOpts.NullPointerIsValid) RetAttrs.addAttribute(llvm::Attribute::NonNull); if (PTy->isObjectType()) { llvm::Align Alignment = getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); RetAttrs.addAlignmentAttr(Alignment); } } bool hasUsedSRet = false; SmallVector ArgAttrs(IRFunctionArgs.totalIRArgs()); // Attach attributes to sret. if (IRFunctionArgs.hasSRetArg()) { llvm::AttrBuilder SRETAttrs; SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); hasUsedSRet = true; if (RetAI.getInReg()) SRETAttrs.addAttribute(llvm::Attribute::InReg); SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); ArgAttrs[IRFunctionArgs.getSRetArgNo()] = llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); } // Attach attributes to inalloca argument. if (IRFunctionArgs.hasInallocaArg()) { llvm::AttrBuilder Attrs; Attrs.addAttribute(llvm::Attribute::InAlloca); ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = llvm::AttributeSet::get(getLLVMContext(), Attrs); } unsigned ArgNo = 0; for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), E = FI.arg_end(); I != E; ++I, ++ArgNo) { QualType ParamType = I->type; const ABIArgInfo &AI = I->info; llvm::AttrBuilder Attrs; // Add attribute for padding argument, if necessary. if (IRFunctionArgs.hasPaddingArg(ArgNo)) { if (AI.getPaddingInReg()) { ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = llvm::AttributeSet::get( getLLVMContext(), llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); } } // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we // have the corresponding parameter variable. It doesn't make // sense to do it here because parameters are so messed up. switch (AI.getKind()) { case ABIArgInfo::Extend: if (AI.isSignExt()) Attrs.addAttribute(llvm::Attribute::SExt); else Attrs.addAttribute(llvm::Attribute::ZExt); LLVM_FALLTHROUGH; case ABIArgInfo::Direct: if (ArgNo == 0 && FI.isChainCall()) Attrs.addAttribute(llvm::Attribute::Nest); else if (AI.getInReg()) Attrs.addAttribute(llvm::Attribute::InReg); break; case ABIArgInfo::Indirect: { if (AI.getInReg()) Attrs.addAttribute(llvm::Attribute::InReg); if (AI.getIndirectByVal()) Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); auto *Decl = ParamType->getAsRecordDecl(); if (CodeGenOpts.PassByValueIsNoAlias && Decl && Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) // When calling the function, the pointer passed in will be the only // reference to the underlying object. Mark it accordingly. Attrs.addAttribute(llvm::Attribute::NoAlias); // TODO: We could add the byref attribute if not byval, but it would // require updating many testcases. CharUnits Align = AI.getIndirectAlign(); // In a byval argument, it is important that the required // alignment of the type is honored, as LLVM might be creating a // *new* stack object, and needs to know what alignment to give // it. (Sometimes it can deduce a sensible alignment on its own, // but not if clang decides it must emit a packed struct, or the // user specifies increased alignment requirements.) // // This is different from indirect *not* byval, where the object // exists already, and the align attribute is purely // informative. assert(!Align.isZero()); // For now, only add this when we have a byval argument. // TODO: be less lazy about updating test cases. if (AI.getIndirectByVal()) Attrs.addAlignmentAttr(Align.getQuantity()); // byval disables readnone and readonly. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); break; } case ABIArgInfo::IndirectAliased: { CharUnits Align = AI.getIndirectAlign(); Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); Attrs.addAlignmentAttr(Align.getQuantity()); break; } case ABIArgInfo::Ignore: case ABIArgInfo::Expand: case ABIArgInfo::CoerceAndExpand: break; case ABIArgInfo::InAlloca: // inalloca disables readnone and readonly. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) .removeAttribute(llvm::Attribute::ReadNone); continue; } if (const auto *RefTy = ParamType->getAs()) { QualType PTy = RefTy->getPointeeType(); if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) Attrs.addDereferenceableAttr( getMinimumObjectSize(PTy).getQuantity()); if (getContext().getTargetAddressSpace(PTy) == 0 && !CodeGenOpts.NullPointerIsValid) Attrs.addAttribute(llvm::Attribute::NonNull); if (PTy->isObjectType()) { llvm::Align Alignment = getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); Attrs.addAlignmentAttr(Alignment); } } switch (FI.getExtParameterInfo(ArgNo).getABI()) { case ParameterABI::Ordinary: break; case ParameterABI::SwiftIndirectResult: { // Add 'sret' if we haven't already used it for something, but // only if the result is void. if (!hasUsedSRet && RetTy->isVoidType()) { Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); hasUsedSRet = true; } // Add 'noalias' in either case. Attrs.addAttribute(llvm::Attribute::NoAlias); // Add 'dereferenceable' and 'alignment'. auto PTy = ParamType->getPointeeType(); if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { auto info = getContext().getTypeInfoInChars(PTy); Attrs.addDereferenceableAttr(info.first.getQuantity()); Attrs.addAlignmentAttr(info.second.getAsAlign()); } break; } case ParameterABI::SwiftErrorResult: Attrs.addAttribute(llvm::Attribute::SwiftError); break; case ParameterABI::SwiftContext: Attrs.addAttribute(llvm::Attribute::SwiftSelf); break; } if (FI.getExtParameterInfo(ArgNo).isNoEscape()) Attrs.addAttribute(llvm::Attribute::NoCapture); if (Attrs.hasAttributes()) { unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); for (unsigned i = 0; i < NumIRArgs; i++) ArgAttrs[FirstIRArg + i] = llvm::AttributeSet::get(getLLVMContext(), Attrs); } } assert(ArgNo == FI.arg_size()); AttrList = llvm::AttributeList::get( getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); } /// An argument came in as a promoted argument; demote it back to its /// declared type. static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value) { llvm::Type *varType = CGF.ConvertType(var->getType()); // This can happen with promotions that actually don't change the // underlying type, like the enum promotions. if (value->getType() == varType) return value; assert((varType->isIntegerTy() || varType->isFloatingPointTy()) && "unexpected promotion type"); if (isa(varType)) return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); } /// Returns the attribute (either parameter attribute, or function /// attribute), which declares argument ArgNo to be non-null. static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo) { // FIXME: __attribute__((nonnull)) can also be applied to: // - references to pointers, where the pointee is known to be // nonnull (apparently a Clang extension) // - transparent unions containing pointers // In the former case, LLVM IR cannot represent the constraint. In // the latter case, we have no guarantee that the transparent union // is in fact passed as a pointer. if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) return nullptr; // First, check attribute on parameter itself. if (PVD) { if (auto ParmNNAttr = PVD->getAttr()) return ParmNNAttr; } // Check function attributes. if (!FD) return nullptr; for (const auto *NNAttr : FD->specific_attrs()) { if (NNAttr->isNonNull(ArgNo)) return NNAttr; } return nullptr; } namespace { struct CopyBackSwiftError final : EHScopeStack::Cleanup { Address Temp; Address Arg; CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} void Emit(CodeGenFunction &CGF, Flags flags) override { llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); CGF.Builder.CreateStore(errorValue, Arg); } }; } void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args) { if (CurCodeDecl && CurCodeDecl->hasAttr()) // Naked functions don't have prologues. return; // If this is an implicit-return-zero function, go ahead and // initialize the return value. TODO: it might be nice to have // a more general mechanism for this that didn't require synthesized // return statements. if (const FunctionDecl *FD = dyn_cast_or_null(CurCodeDecl)) { if (FD->hasImplicitReturnZero()) { QualType RetTy = FD->getReturnType().getUnqualifiedType(); llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); Builder.CreateStore(Zero, ReturnValue); } } // FIXME: We no longer need the types from FunctionArgList; lift up and // simplify. ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); // If we're using inalloca, all the memory arguments are GEPs off of the last // parameter, which is a pointer to the complete memory area. Address ArgStruct = Address::invalid(); if (IRFunctionArgs.hasInallocaArg()) { ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), FI.getArgStructAlignment()); assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); } // Name the struct return parameter. if (IRFunctionArgs.hasSRetArg()) { auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); AI->setName("agg.result"); AI->addAttr(llvm::Attribute::NoAlias); } // Track if we received the parameter as a pointer (indirect, byval, or // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it // into a local alloca for us. SmallVector ArgVals; ArgVals.reserve(Args.size()); // Create a pointer value for every parameter declaration. This usually // entails copying one or more LLVM IR arguments into an alloca. Don't push // any cleanups or do anything that might unwind. We do that separately, so // we can push the cleanups in the correct order for the ABI. assert(FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."); unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i, ++info_it, ++ArgNo) { const VarDecl *Arg = *i; const ABIArgInfo &ArgI = info_it->info; bool isPromoted = isa(Arg) && cast(Arg)->isKNRPromoted(); // We are converting from ABIArgInfo type to VarDecl type directly, unless // the parameter is promoted. In this case we convert to // CGFunctionInfo::ArgInfo type with subsequent argument demotion. QualType Ty = isPromoted ? info_it->type : Arg->getType(); assert(hasScalarEvaluationKind(Ty) == hasScalarEvaluationKind(Arg->getType())); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgI.getKind()) { case ABIArgInfo::InAlloca: { assert(NumIRArgs == 0); auto FieldIndex = ArgI.getInAllocaFieldIndex(); Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); if (ArgI.getInAllocaIndirect()) V = Address(Builder.CreateLoad(V), getContext().getTypeAlignInChars(Ty)); ArgVals.push_back(ParamValue::forIndirect(V)); break; } case ABIArgInfo::Indirect: case ABIArgInfo::IndirectAliased: { assert(NumIRArgs == 1); Address ParamAddr = Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign()); if (!hasScalarEvaluationKind(Ty)) { // Aggregates and complex variables are accessed by reference. All we // need to do is realign the value, if requested. Also, if the address // may be aliased, copy it to ensure that the parameter variable is // mutable and has a unique adress, as C requires. Address V = ParamAddr; if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { Address AlignedTemp = CreateMemTemp(Ty, "coerce"); // Copy from the incoming argument pointer to the temporary with the // appropriate alignment. // // FIXME: We should have a common utility for generating an aggregate // copy. CharUnits Size = getContext().getTypeSizeInChars(Ty); Builder.CreateMemCpy( AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); V = AlignedTemp; } ArgVals.push_back(ParamValue::forIndirect(V)); } else { // Load scalar value from indirect argument. llvm::Value *V = EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); if (isPromoted) V = emitArgumentDemotion(*this, Arg, V); ArgVals.push_back(ParamValue::forDirect(V)); } break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: { auto AI = Fn->getArg(FirstIRArg); llvm::Type *LTy = ConvertType(Arg->getType()); // Prepare parameter attributes. So far, only attributes for pointer // parameters are prepared. See // http://llvm.org/docs/LangRef.html#paramattrs. if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && ArgI.getCoerceToType()->isPointerTy()) { assert(NumIRArgs == 1); if (const ParmVarDecl *PVD = dyn_cast(Arg)) { // Set `nonnull` attribute if any. if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), PVD->getFunctionScopeIndex()) && !CGM.getCodeGenOpts().NullPointerIsValid) AI->addAttr(llvm::Attribute::NonNull); QualType OTy = PVD->getOriginalType(); if (const auto *ArrTy = getContext().getAsConstantArrayType(OTy)) { // A C99 array parameter declaration with the static keyword also // indicates dereferenceability, and if the size is constant we can // use the dereferenceable attribute (which requires the size in // bytes). if (ArrTy->getSizeModifier() == ArrayType::Static) { QualType ETy = ArrTy->getElementType(); llvm::Align Alignment = CGM.getNaturalTypeAlignment(ETy).getAsAlign(); AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); uint64_t ArrSize = ArrTy->getSize().getZExtValue(); if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && ArrSize) { llvm::AttrBuilder Attrs; Attrs.addDereferenceableAttr( getContext().getTypeSizeInChars(ETy).getQuantity() * ArrSize); AI->addAttrs(Attrs); } else if (getContext().getTargetInfo().getNullPointerValue( ETy.getAddressSpace()) == 0 && !CGM.getCodeGenOpts().NullPointerIsValid) { AI->addAttr(llvm::Attribute::NonNull); } } } else if (const auto *ArrTy = getContext().getAsVariableArrayType(OTy)) { // For C99 VLAs with the static keyword, we don't know the size so // we can't use the dereferenceable attribute, but in addrspace(0) // we know that it must be nonnull. if (ArrTy->getSizeModifier() == VariableArrayType::Static) { QualType ETy = ArrTy->getElementType(); llvm::Align Alignment = CGM.getNaturalTypeAlignment(ETy).getAsAlign(); AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); if (!getContext().getTargetAddressSpace(ETy) && !CGM.getCodeGenOpts().NullPointerIsValid) AI->addAttr(llvm::Attribute::NonNull); } } // Set `align` attribute if any. const auto *AVAttr = PVD->getAttr(); if (!AVAttr) if (const auto *TOTy = dyn_cast(OTy)) AVAttr = TOTy->getDecl()->getAttr(); if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { // If alignment-assumption sanitizer is enabled, we do *not* add // alignment attribute here, but emit normal alignment assumption, // so the UBSAN check could function. llvm::ConstantInt *AlignmentCI = cast(EmitScalarExpr(AVAttr->getAlignment())); unsigned AlignmentInt = AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); if (AI->getParamAlign().valueOrOne() < AlignmentInt) { AI->removeAttr(llvm::Attribute::AttrKind::Alignment); AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr( llvm::Align(AlignmentInt))); } } } // Set 'noalias' if an argument type has the `restrict` qualifier. if (Arg->getType().isRestrictQualified()) AI->addAttr(llvm::Attribute::NoAlias); } // Prepare the argument value. If we have the trivial case, handle it // with no muss and fuss. if (!isa(ArgI.getCoerceToType()) && ArgI.getCoerceToType() == ConvertType(Ty) && ArgI.getDirectOffset() == 0) { assert(NumIRArgs == 1); // LLVM expects swifterror parameters to be used in very restricted // ways. Copy the value into a less-restricted temporary. llvm::Value *V = AI; if (FI.getExtParameterInfo(ArgNo).getABI() == ParameterABI::SwiftErrorResult) { QualType pointeeTy = Ty->getPointeeType(); assert(pointeeTy->isPointerType()); Address temp = CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); Builder.CreateStore(incomingErrorValue, temp); V = temp.getPointer(); // Push a cleanup to copy the value back at the end of the function. // The convention does not guarantee that the value will be written // back if the function exits with an unwind exception. EHStack.pushCleanup(NormalCleanup, temp, arg); } // Ensure the argument is the correct type. if (V->getType() != ArgI.getCoerceToType()) V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); if (isPromoted) V = emitArgumentDemotion(*this, Arg, V); // Because of merging of function types from multiple decls it is // possible for the type of an argument to not match the corresponding // type in the function type. Since we are codegening the callee // in here, add a cast to the argument type. llvm::Type *LTy = ConvertType(Arg->getType()); if (V->getType() != LTy) V = Builder.CreateBitCast(V, LTy); ArgVals.push_back(ParamValue::forDirect(V)); break; } Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), Arg->getName()); // Pointer to store into. Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. llvm::StructType *STy = dyn_cast(ArgI.getCoerceToType()); if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && STy->getNumElements() > 1) { uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); llvm::Type *DstTy = Ptr.getElementType(); uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); Address AddrToStoreInto = Address::invalid(); if (SrcSize <= DstSize) { AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); } else { AddrToStoreInto = CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); } assert(STy->getNumElements() == NumIRArgs); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { auto AI = Fn->getArg(FirstIRArg + i); AI->setName(Arg->getName() + ".coerce" + Twine(i)); Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); Builder.CreateStore(AI, EltPtr); } if (SrcSize > DstSize) { Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); } } else { // Simple case, just do a coerced store of the argument into the alloca. assert(NumIRArgs == 1); auto AI = Fn->getArg(FirstIRArg); AI->setName(Arg->getName() + ".coerce"); CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); } // Match to what EmitParmDecl is expecting for this type. if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { llvm::Value *V = EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); if (isPromoted) V = emitArgumentDemotion(*this, Arg, V); ArgVals.push_back(ParamValue::forDirect(V)); } else { ArgVals.push_back(ParamValue::forIndirect(Alloca)); } break; } case ABIArgInfo::CoerceAndExpand: { // Reconstruct into a temporary. Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); ArgVals.push_back(ParamValue::forIndirect(alloca)); auto coercionType = ArgI.getCoerceAndExpandType(); alloca = Builder.CreateElementBitCast(alloca, coercionType); unsigned argIndex = FirstIRArg; for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { llvm::Type *eltType = coercionType->getElementType(i); if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; auto eltAddr = Builder.CreateStructGEP(alloca, i); auto elt = Fn->getArg(argIndex++); Builder.CreateStore(elt, eltAddr); } assert(argIndex == FirstIRArg + NumIRArgs); break; } case ABIArgInfo::Expand: { // If this structure was expanded into multiple arguments then // we need to create a temporary and reconstruct it from the // arguments. Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); LValue LV = MakeAddrLValue(Alloca, Ty); ArgVals.push_back(ParamValue::forIndirect(Alloca)); auto FnArgIter = Fn->arg_begin() + FirstIRArg; ExpandTypeFromArgs(Ty, LV, FnArgIter); assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { auto AI = Fn->getArg(FirstIRArg + i); AI->setName(Arg->getName() + "." + Twine(i)); } break; } case ABIArgInfo::Ignore: assert(NumIRArgs == 0); // Initialize the local variable appropriately. if (!hasScalarEvaluationKind(Ty)) { ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); } else { llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); ArgVals.push_back(ParamValue::forDirect(U)); } break; } } if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { for (int I = Args.size() - 1; I >= 0; --I) EmitParmDecl(*Args[I], ArgVals[I], I + 1); } else { for (unsigned I = 0, E = Args.size(); I != E; ++I) EmitParmDecl(*Args[I], ArgVals[I], I + 1); } } static void eraseUnusedBitCasts(llvm::Instruction *insn) { while (insn->use_empty()) { llvm::BitCastInst *bitcast = dyn_cast(insn); if (!bitcast) return; // This is "safe" because we would have used a ConstantExpr otherwise. insn = cast(bitcast->getOperand(0)); bitcast->eraseFromParent(); } } /// Try to emit a fused autorelease of a return result. static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result) { // We must be immediately followed the cast. llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); if (BB->empty()) return nullptr; if (&BB->back() != result) return nullptr; llvm::Type *resultType = result->getType(); // result is in a BasicBlock and is therefore an Instruction. llvm::Instruction *generator = cast(result); SmallVector InstsToKill; // Look for: // %generator = bitcast %type1* %generator2 to %type2* while (llvm::BitCastInst *bitcast = dyn_cast(generator)) { // We would have emitted this as a constant if the operand weren't // an Instruction. generator = cast(bitcast->getOperand(0)); // Require the generator to be immediately followed by the cast. if (generator->getNextNode() != bitcast) return nullptr; InstsToKill.push_back(bitcast); } // Look for: // %generator = call i8* @objc_retain(i8* %originalResult) // or // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) llvm::CallInst *call = dyn_cast(generator); if (!call) return nullptr; bool doRetainAutorelease; if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { doRetainAutorelease = true; } else if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { doRetainAutorelease = false; // If we emitted an assembly marker for this call (and the // ARCEntrypoints field should have been set if so), go looking // for that call. If we can't find it, we can't do this // optimization. But it should always be the immediately previous // instruction, unless we needed bitcasts around the call. if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { llvm::Instruction *prev = call->getPrevNode(); assert(prev); if (isa(prev)) { prev = prev->getPrevNode(); assert(prev); } assert(isa(prev)); assert(cast(prev)->getCalledOperand() == CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); InstsToKill.push_back(prev); } } else { return nullptr; } result = call->getArgOperand(0); InstsToKill.push_back(call); // Keep killing bitcasts, for sanity. Note that we no longer care // about precise ordering as long as there's exactly one use. while (llvm::BitCastInst *bitcast = dyn_cast(result)) { if (!bitcast->hasOneUse()) break; InstsToKill.push_back(bitcast); result = bitcast->getOperand(0); } // Delete all the unnecessary instructions, from latest to earliest. for (auto *I : InstsToKill) I->eraseFromParent(); // Do the fused retain/autorelease if we were asked to. if (doRetainAutorelease) result = CGF.EmitARCRetainAutoreleaseReturnValue(result); // Cast back to the result type. return CGF.Builder.CreateBitCast(result, resultType); } /// If this is a +1 of the value of an immutable 'self', remove it. static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result) { // This is only applicable to a method with an immutable 'self'. const ObjCMethodDecl *method = dyn_cast_or_null(CGF.CurCodeDecl); if (!method) return nullptr; const VarDecl *self = method->getSelfDecl(); if (!self->getType().isConstQualified()) return nullptr; // Look for a retain call. llvm::CallInst *retainCall = dyn_cast(result->stripPointerCasts()); if (!retainCall || retainCall->getCalledOperand() != CGF.CGM.getObjCEntrypoints().objc_retain) return nullptr; // Look for an ordinary load of 'self'. llvm::Value *retainedValue = retainCall->getArgOperand(0); llvm::LoadInst *load = dyn_cast(retainedValue->stripPointerCasts()); if (!load || load->isAtomic() || load->isVolatile() || load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) return nullptr; // Okay! Burn it all down. This relies for correctness on the // assumption that the retain is emitted as part of the return and // that thereafter everything is used "linearly". llvm::Type *resultType = result->getType(); eraseUnusedBitCasts(cast(result)); assert(retainCall->use_empty()); retainCall->eraseFromParent(); eraseUnusedBitCasts(cast(retainedValue)); return CGF.Builder.CreateBitCast(load, resultType); } /// Emit an ARC autorelease of the result of a function. /// /// \return the value to actually return from the function static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result) { // If we're returning 'self', kill the initial retain. This is a // heuristic attempt to "encourage correctness" in the really unfortunate // case where we have a return of self during a dealloc and we desperately // need to avoid the possible autorelease. if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) return self; // At -O0, try to emit a fused retain/autorelease. if (CGF.shouldUseFusedARCCalls()) if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) return fused; return CGF.EmitARCAutoreleaseReturnValue(result); } /// Heuristically search for a dominating store to the return-value slot. static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { // Check if a User is a store which pointerOperand is the ReturnValue. // We are looking for stores to the ReturnValue, not for stores of the // ReturnValue to some other location. auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { auto *SI = dyn_cast(U); if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) return nullptr; // These aren't actually possible for non-coerced returns, and we // only care about non-coerced returns on this code path. assert(!SI->isAtomic() && !SI->isVolatile()); return SI; }; // If there are multiple uses of the return-value slot, just check // for something immediately preceding the IP. Sometimes this can // happen with how we generate implicit-returns; it can also happen // with noreturn cleanups. if (!CGF.ReturnValue.getPointer()->hasOneUse()) { llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); if (IP->empty()) return nullptr; llvm::Instruction *I = &IP->back(); // Skip lifetime markers for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), IE = IP->rend(); II != IE; ++II) { if (llvm::IntrinsicInst *Intrinsic = dyn_cast(&*II)) { if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); ++II; if (II == IE) break; if (isa(&*II) && (CastAddr == &*II)) continue; } } I = &*II; break; } return GetStoreIfValid(I); } llvm::StoreInst *store = GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); if (!store) return nullptr; // Now do a first-and-dirty dominance check: just walk up the // single-predecessors chain from the current insertion point. llvm::BasicBlock *StoreBB = store->getParent(); llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); while (IP != StoreBB) { if (!(IP = IP->getSinglePredecessor())) return nullptr; } // Okay, the store's basic block dominates the insertion point; we // can do our thing. return store; } // Helper functions for EmitCMSEClearRecord // Set the bits corresponding to a field having width `BitWidth` and located at // offset `BitOffset` (from the least significant bit) within a storage unit of // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. // Use little-endian layout, i.e.`Bits[0]` is the LSB. static void setBitRange(SmallVectorImpl &Bits, int BitOffset, int BitWidth, int CharWidth) { assert(CharWidth <= 64); assert(static_cast(BitWidth) <= Bits.size() * CharWidth); int Pos = 0; if (BitOffset >= CharWidth) { Pos += BitOffset / CharWidth; BitOffset = BitOffset % CharWidth; } const uint64_t Used = (uint64_t(1) << CharWidth) - 1; if (BitOffset + BitWidth >= CharWidth) { Bits[Pos++] |= (Used << BitOffset) & Used; BitWidth -= CharWidth - BitOffset; BitOffset = 0; } while (BitWidth >= CharWidth) { Bits[Pos++] = Used; BitWidth -= CharWidth; } if (BitWidth > 0) Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; } // Set the bits corresponding to a field having width `BitWidth` and located at // offset `BitOffset` (from the least significant bit) within a storage unit of // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of // `Bits` corresponds to one target byte. Use target endian layout. static void setBitRange(SmallVectorImpl &Bits, int StorageOffset, int StorageSize, int BitOffset, int BitWidth, int CharWidth, bool BigEndian) { SmallVector TmpBits(StorageSize); setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); if (BigEndian) std::reverse(TmpBits.begin(), TmpBits.end()); for (uint64_t V : TmpBits) Bits[StorageOffset++] |= V; } static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl &); // Set the bits in `Bits`, which correspond to the value representations of // the actual members of the record type `RTy`. Note that this function does // not handle base classes, virtual tables, etc, since they cannot happen in // CMSE function arguments or return. The bit mask corresponds to the target // memory layout, i.e. it's endian dependent. static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, SmallVectorImpl &Bits) { ASTContext &Context = CGM.getContext(); int CharWidth = Context.getCharWidth(); const RecordDecl *RD = RTy->getDecl()->getDefinition(); const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); int Idx = 0; for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { const FieldDecl *F = *I; if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || F->getType()->isIncompleteArrayType()) continue; if (F->isBitField()) { const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), BFI.StorageSize / CharWidth, BFI.Offset, BFI.Size, CharWidth, CGM.getDataLayout().isBigEndian()); continue; } setUsedBits(CGM, F->getType(), Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); } } // Set the bits in `Bits`, which correspond to the value representations of // the elements of an array type `ATy`. static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, int Offset, SmallVectorImpl &Bits) { const ASTContext &Context = CGM.getContext(); QualType ETy = Context.getBaseElementType(ATy); int Size = Context.getTypeSizeInChars(ETy).getQuantity(); SmallVector TmpBits(Size); setUsedBits(CGM, ETy, 0, TmpBits); for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { auto Src = TmpBits.begin(); auto Dst = Bits.begin() + Offset + I * Size; for (int J = 0; J < Size; ++J) *Dst++ |= *Src++; } } // Set the bits in `Bits`, which correspond to the value representations of // the type `QTy`. static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, SmallVectorImpl &Bits) { if (const auto *RTy = QTy->getAs()) return setUsedBits(CGM, RTy, Offset, Bits); ASTContext &Context = CGM.getContext(); if (const auto *ATy = Context.getAsConstantArrayType(QTy)) return setUsedBits(CGM, ATy, Offset, Bits); int Size = Context.getTypeSizeInChars(QTy).getQuantity(); if (Size <= 0) return; std::fill_n(Bits.begin() + Offset, Size, (uint64_t(1) << Context.getCharWidth()) - 1); } static uint64_t buildMultiCharMask(const SmallVectorImpl &Bits, int Pos, int Size, int CharWidth, bool BigEndian) { assert(Size > 0); uint64_t Mask = 0; if (BigEndian) { for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; ++P) Mask = (Mask << CharWidth) | *P; } else { auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; do Mask = (Mask << CharWidth) | *--P; while (P != End); } return Mask; } // Emit code to clear the bits in a record, which aren't a part of any user // declared member, when the record is a function return. llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, llvm::IntegerType *ITy, QualType QTy) { assert(Src->getType() == ITy); assert(ITy->getScalarSizeInBits() <= 64); const llvm::DataLayout &DataLayout = CGM.getDataLayout(); int Size = DataLayout.getTypeStoreSize(ITy); SmallVector Bits(Size); setUsedBits(CGM, QTy->getAs(), 0, Bits); int CharWidth = CGM.getContext().getCharWidth(); uint64_t Mask = buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); return Builder.CreateAnd(Src, Mask, "cmse.clear"); } // Emit code to clear the bits in a record, which aren't a part of any user // declared member, when the record is a function argument. llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, llvm::ArrayType *ATy, QualType QTy) { const llvm::DataLayout &DataLayout = CGM.getDataLayout(); int Size = DataLayout.getTypeStoreSize(ATy); SmallVector Bits(Size); setUsedBits(CGM, QTy->getAs(), 0, Bits); // Clear each element of the LLVM array. int CharWidth = CGM.getContext().getCharWidth(); int CharsPerElt = ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; int MaskIndex = 0; llvm::Value *R = llvm::UndefValue::get(ATy); for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, DataLayout.isBigEndian()); MaskIndex += CharsPerElt; llvm::Value *T0 = Builder.CreateExtractValue(Src, I); llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); R = Builder.CreateInsertValue(R, T1, I); } return R; } void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc) { if (FI.isNoReturn()) { // Noreturn functions don't return. EmitUnreachable(EndLoc); return; } if (CurCodeDecl && CurCodeDecl->hasAttr()) { // Naked functions don't have epilogues. Builder.CreateUnreachable(); return; } // Functions with no result always return void. if (!ReturnValue.isValid()) { Builder.CreateRetVoid(); return; } llvm::DebugLoc RetDbgLoc; llvm::Value *RV = nullptr; QualType RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); switch (RetAI.getKind()) { case ABIArgInfo::InAlloca: // Aggregrates get evaluated directly into the destination. Sometimes we // need to return the sret value in a register, though. assert(hasAggregateEvaluationKind(RetTy)); if (RetAI.getInAllocaSRet()) { llvm::Function::arg_iterator EI = CurFn->arg_end(); --EI; llvm::Value *ArgStruct = &*EI; llvm::Value *SRet = Builder.CreateStructGEP( nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); } break; case ABIArgInfo::Indirect: { auto AI = CurFn->arg_begin(); if (RetAI.isSRetAfterThis()) ++AI; switch (getEvaluationKind(RetTy)) { case TEK_Complex: { ComplexPairTy RT = EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), /*isInit*/ true); break; } case TEK_Aggregate: // Do nothing; aggregrates get evaluated directly into the destination. break; case TEK_Scalar: EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), MakeNaturalAlignAddrLValue(&*AI, RetTy), /*isInit*/ true); break; } break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: if (RetAI.getCoerceToType() == ConvertType(RetTy) && RetAI.getDirectOffset() == 0) { // The internal return value temp always will have pointer-to-return-type // type, just do a load. // If there is a dominating store to ReturnValue, we can elide // the load, zap the store, and usually zap the alloca. if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { // Reuse the debug location from the store unless there is // cleanup code to be emitted between the store and return // instruction. if (EmitRetDbgLoc && !AutoreleaseResult) RetDbgLoc = SI->getDebugLoc(); // Get the stored value and nuke the now-dead store. RV = SI->getValueOperand(); SI->eraseFromParent(); // Otherwise, we have to do a simple load. } else { RV = Builder.CreateLoad(ReturnValue); } } else { // If the value is offset in memory, apply the offset now. Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); } // In ARC, end functions that return a retainable type with a call // to objc_autoreleaseReturnValue. if (AutoreleaseResult) { #ifndef NDEBUG // Type::isObjCRetainabletype has to be called on a QualType that hasn't // been stripped of the typedefs, so we cannot use RetTy here. Get the // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from // CurCodeDecl or BlockInfo. QualType RT; if (auto *FD = dyn_cast(CurCodeDecl)) RT = FD->getReturnType(); else if (auto *MD = dyn_cast(CurCodeDecl)) RT = MD->getReturnType(); else if (isa(CurCodeDecl)) RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); else llvm_unreachable("Unexpected function/method type"); assert(getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() && RT->isObjCRetainableType()); #endif RV = emitAutoreleaseOfResult(*this, RV); } break; case ABIArgInfo::Ignore: break; case ABIArgInfo::CoerceAndExpand: { auto coercionType = RetAI.getCoerceAndExpandType(); // Load all of the coerced elements out into results. llvm::SmallVector results; Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { auto coercedEltType = coercionType->getElementType(i); if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) continue; auto eltAddr = Builder.CreateStructGEP(addr, i); auto elt = Builder.CreateLoad(eltAddr); results.push_back(elt); } // If we have one result, it's the single direct result type. if (results.size() == 1) { RV = results[0]; // Otherwise, we need to make a first-class aggregate. } else { // Construct a return type that lacks padding elements. llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); RV = llvm::UndefValue::get(returnType); for (unsigned i = 0, e = results.size(); i != e; ++i) { RV = Builder.CreateInsertValue(RV, results[i], i); } } break; } case ABIArgInfo::Expand: case ABIArgInfo::IndirectAliased: llvm_unreachable("Invalid ABI kind for return argument"); } llvm::Instruction *Ret; if (RV) { if (CurFuncDecl && CurFuncDecl->hasAttr()) { // For certain return types, clear padding bits, as they may reveal // sensitive information. // Small struct/union types are passed as integers. auto *ITy = dyn_cast(RV->getType()); if (ITy != nullptr && isa(RetTy.getCanonicalType())) RV = EmitCMSEClearRecord(RV, ITy, RetTy); } EmitReturnValueCheck(RV); Ret = Builder.CreateRet(RV); } else { Ret = Builder.CreateRetVoid(); } if (RetDbgLoc) Ret->setDebugLoc(std::move(RetDbgLoc)); } void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { // A current decl may not be available when emitting vtable thunks. if (!CurCodeDecl) return; // If the return block isn't reachable, neither is this check, so don't emit // it. if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) return; ReturnsNonNullAttr *RetNNAttr = nullptr; if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) RetNNAttr = CurCodeDecl->getAttr(); if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) return; // Prefer the returns_nonnull attribute if it's present. SourceLocation AttrLoc; SanitizerMask CheckKind; SanitizerHandler Handler; if (RetNNAttr) { assert(!requiresReturnValueNullabilityCheck() && "Cannot check nullability and the nonnull attribute"); AttrLoc = RetNNAttr->getLocation(); CheckKind = SanitizerKind::ReturnsNonnullAttribute; Handler = SanitizerHandler::NonnullReturn; } else { if (auto *DD = dyn_cast(CurCodeDecl)) if (auto *TSI = DD->getTypeSourceInfo()) if (auto FTL = TSI->getTypeLoc().getAsAdjusted()) AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); CheckKind = SanitizerKind::NullabilityReturn; Handler = SanitizerHandler::NullabilityReturn; } SanitizerScope SanScope(this); // Make sure the "return" source location is valid. If we're checking a // nullability annotation, make sure the preconditions for the check are met. llvm::BasicBlock *Check = createBasicBlock("nullcheck"); llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); if (requiresReturnValueNullabilityCheck()) CanNullCheck = Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); Builder.CreateCondBr(CanNullCheck, Check, NoCheck); EmitBlock(Check); // Now do the null check. llvm::Value *Cond = Builder.CreateIsNotNull(RV); llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; llvm::Value *DynamicData[] = {SLocPtr}; EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); EmitBlock(NoCheck); #ifndef NDEBUG // The return location should not be used after the check has been emitted. ReturnLocation = Address::invalid(); #endif } static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; } static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { // FIXME: Generate IR in one pass, rather than going back and fixing up these // placeholders. llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); llvm::Type *IRPtrTy = IRTy->getPointerTo(); llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); // FIXME: When we generate this IR in one pass, we shouldn't need // this win32-specific alignment hack. CharUnits Align = CharUnits::fromQuantity(4); Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); return AggValueSlot::forAddr(Address(Placeholder, Align), Ty.getQualifiers(), AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap); } void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc) { // StartFunction converted the ABI-lowered parameter(s) into a // local alloca. We need to turn that into an r-value suitable // for EmitCall. Address local = GetAddrOfLocalVar(param); QualType type = param->getType(); if (isInAllocaArgument(CGM.getCXXABI(), type)) { CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); } // GetAddrOfLocalVar returns a pointer-to-pointer for references, // but the argument needs to be the original pointer. if (type->isReferenceType()) { args.add(RValue::get(Builder.CreateLoad(local)), type); // In ARC, move out of consumed arguments so that the release cleanup // entered by StartFunction doesn't cause an over-release. This isn't // optimal -O0 code generation, but it should get cleaned up when // optimization is enabled. This also assumes that delegate calls are // performed exactly once for a set of arguments, but that should be safe. } else if (getLangOpts().ObjCAutoRefCount && param->hasAttr() && type->isObjCRetainableType()) { llvm::Value *ptr = Builder.CreateLoad(local); auto null = llvm::ConstantPointerNull::get(cast(ptr->getType())); Builder.CreateStore(null, local); args.add(RValue::get(ptr), type); // For the most part, we just need to load the alloca, except that // aggregate r-values are actually pointers to temporaries. } else { args.add(convertTempToRValue(local, type, loc), type); } // Deactivate the cleanup for the callee-destructed param that was pushed. if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk && type->castAs()->getDecl()->isParamDestroyedInCallee() && param->needsDestruction(getContext())) { EHScopeStack::stable_iterator cleanup = CalleeDestructedParamCleanups.lookup(cast(param)); assert(cleanup.isValid() && "cleanup for callee-destructed param not recorded"); // This unreachable is a temporary marker which will be removed later. llvm::Instruction *isActive = Builder.CreateUnreachable(); args.addArgCleanupDeactivation(cleanup, isActive); } } static bool isProvablyNull(llvm::Value *addr) { return isa(addr); } /// Emit the actual writing-back of a writeback. static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback) { const LValue &srcLV = writeback.Source; Address srcAddr = srcLV.getAddress(CGF); assert(!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"); llvm::BasicBlock *contBB = nullptr; // If the argument wasn't provably non-null, we need to null check // before doing the store. bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), CGF.CGM.getDataLayout()); if (!provablyNonNull) { llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); contBB = CGF.createBasicBlock("icr.done"); llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); CGF.EmitBlock(writebackBB); } // Load the value to writeback. llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); // Cast it back, in case we're writing an id to a Foo* or something. value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), "icr.writeback-cast"); // Perform the writeback. // If we have a "to use" value, it's something we need to emit a use // of. This has to be carefully threaded in: if it's done after the // release it's potentially undefined behavior (and the optimizer // will ignore it), and if it happens before the retain then the // optimizer could move the release there. if (writeback.ToUse) { assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); // Retain the new value. No need to block-copy here: the block's // being passed up the stack. value = CGF.EmitARCRetainNonBlock(value); // Emit the intrinsic use here. CGF.EmitARCIntrinsicUse(writeback.ToUse); // Load the old value (primitively). llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); // Put the new value in place (primitively). CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); // Release the old value. CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); // Otherwise, we can just do a normal lvalue store. } else { CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); } // Jump to the continuation block. if (!provablyNonNull) CGF.EmitBlock(contBB); } static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args) { for (const auto &I : args.writebacks()) emitWriteback(CGF, I); } static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs) { ArrayRef Cleanups = CallArgs.getCleanupsToDeactivate(); // Iterate in reverse to increase the likelihood of popping the cleanup. for (const auto &I : llvm::reverse(Cleanups)) { CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); I.IsActiveIP->eraseFromParent(); } } static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { if (const UnaryOperator *uop = dyn_cast(E->IgnoreParens())) if (uop->getOpcode() == UO_AddrOf) return uop->getSubExpr(); return nullptr; } /// Emit an argument that's being passed call-by-writeback. That is, /// we are passing the address of an __autoreleased temporary; it /// might be copy-initialized with the current value of the given /// address, but it will definitely be copied out of after the call. static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE) { LValue srcLV; // Make an optimistic effort to emit the address as an l-value. // This can fail if the argument expression is more complicated. if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { srcLV = CGF.EmitLValue(lvExpr); // Otherwise, just emit it as a scalar. } else { Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); QualType srcAddrType = CRE->getSubExpr()->getType()->castAs()->getPointeeType(); srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); } Address srcAddr = srcLV.getAddress(CGF); // The dest and src types don't necessarily match in LLVM terms // because of the crazy ObjC compatibility rules. llvm::PointerType *destType = cast(CGF.ConvertType(CRE->getType())); // If the address is a constant null, just pass the appropriate null. if (isProvablyNull(srcAddr.getPointer())) { args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), CRE->getType()); return; } // Create the temporary. Address temp = CGF.CreateTempAlloca(destType->getElementType(), CGF.getPointerAlign(), "icr.temp"); // Loading an l-value can introduce a cleanup if the l-value is __weak, // and that cleanup will be conditional if we can't prove that the l-value // isn't null, so we need to register a dominating point so that the cleanups // system will make valid IR. CodeGenFunction::ConditionalEvaluation condEval(CGF); // Zero-initialize it if we're not doing a copy-initialization. bool shouldCopy = CRE->shouldCopy(); if (!shouldCopy) { llvm::Value *null = llvm::ConstantPointerNull::get( cast(destType->getElementType())); CGF.Builder.CreateStore(null, temp); } llvm::BasicBlock *contBB = nullptr; llvm::BasicBlock *originBB = nullptr; // If the address is *not* known to be non-null, we need to switch. llvm::Value *finalArgument; bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), CGF.CGM.getDataLayout()); if (provablyNonNull) { finalArgument = temp.getPointer(); } else { llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); finalArgument = CGF.Builder.CreateSelect(isNull, llvm::ConstantPointerNull::get(destType), temp.getPointer(), "icr.argument"); // If we need to copy, then the load has to be conditional, which // means we need control flow. if (shouldCopy) { originBB = CGF.Builder.GetInsertBlock(); contBB = CGF.createBasicBlock("icr.cont"); llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); CGF.Builder.CreateCondBr(isNull, contBB, copyBB); CGF.EmitBlock(copyBB); condEval.begin(CGF); } } llvm::Value *valueToUse = nullptr; // Perform a copy if necessary. if (shouldCopy) { RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); assert(srcRV.isScalar()); llvm::Value *src = srcRV.getScalarVal(); src = CGF.Builder.CreateBitCast(src, destType->getElementType(), "icr.cast"); // Use an ordinary store, not a store-to-lvalue. CGF.Builder.CreateStore(src, temp); // If optimization is enabled, and the value was held in a // __strong variable, we need to tell the optimizer that this // value has to stay alive until we're doing the store back. // This is because the temporary is effectively unretained, // and so otherwise we can violate the high-level semantics. if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { valueToUse = src; } } // Finish the control flow if we needed it. if (shouldCopy && !provablyNonNull) { llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); CGF.EmitBlock(contBB); // Make a phi for the value to intrinsically use. if (valueToUse) { llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, "icr.to-use"); phiToUse->addIncoming(valueToUse, copyBB); phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), originBB); valueToUse = phiToUse; } condEval.end(CGF); } args.addWriteback(srcLV, temp, valueToUse); args.add(RValue::get(finalArgument), CRE->getType()); } void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { assert(!StackBase); // Save the stack. llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); } void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { if (StackBase) { // Restore the stack after the call. llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); CGF.Builder.CreateCall(F, StackBase); } } void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum) { if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || SanOpts.has(SanitizerKind::NullabilityArg))) return; // The param decl may be missing in a variadic function. auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; // Prefer the nonnull attribute if it's present. const NonNullAttr *NNAttr = nullptr; if (SanOpts.has(SanitizerKind::NonnullAttribute)) NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); bool CanCheckNullability = false; if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { auto Nullability = PVD->getType()->getNullability(getContext()); CanCheckNullability = Nullability && *Nullability == NullabilityKind::NonNull && PVD->getTypeSourceInfo(); } if (!NNAttr && !CanCheckNullability) return; SourceLocation AttrLoc; SanitizerMask CheckKind; SanitizerHandler Handler; if (NNAttr) { AttrLoc = NNAttr->getLocation(); CheckKind = SanitizerKind::NonnullAttribute; Handler = SanitizerHandler::NonnullArg; } else { AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); CheckKind = SanitizerKind::NullabilityArg; Handler = SanitizerHandler::NullabilityArg; } SanitizerScope SanScope(this); - assert(RV.isScalar()); - llvm::Value *V = RV.getScalarVal(); - llvm::Value *Cond = - Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); + llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), llvm::ConstantInt::get(Int32Ty, ArgNo + 1), }; EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); } void CodeGenFunction::EmitCallArgs( CallArgList &Args, ArrayRef ArgTypes, llvm::iterator_range ArgRange, AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); // We *have* to evaluate arguments from right to left in the MS C++ ABI, // because arguments are destroyed left to right in the callee. As a special // case, there are certain language constructs that require left-to-right // evaluation, and in those cases we consider the evaluation order requirement // to trump the "destruction order is reverse construction order" guarantee. bool LeftToRight = CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() ? Order == EvaluationOrder::ForceLeftToRight : Order != EvaluationOrder::ForceRightToLeft; auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, RValue EmittedArg) { if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) return; auto *PS = AC.getParamDecl(I)->getAttr(); if (PS == nullptr) return; const auto &Context = getContext(); auto SizeTy = Context.getSizeType(); auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic()); Args.add(RValue::get(V), SizeTy); // If we're emitting args in reverse, be sure to do so with // pass_object_size, as well. if (!LeftToRight) std::swap(Args.back(), *(&Args.back() - 1)); }; // Insert a stack save if we're going to need any inalloca args. bool HasInAllocaArgs = false; if (CGM.getTarget().getCXXABI().isMicrosoft()) { for (ArrayRef::iterator I = ArgTypes.begin(), E = ArgTypes.end(); I != E && !HasInAllocaArgs; ++I) HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); if (HasInAllocaArgs) { assert(getTarget().getTriple().getArch() == llvm::Triple::x86); Args.allocateArgumentMemory(*this); } } // Evaluate each argument in the appropriate order. size_t CallArgsStart = Args.size(); for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { unsigned Idx = LeftToRight ? I : E - I - 1; CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; unsigned InitialArgSize = Args.size(); // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of // the argument and parameter match or the objc method is parameterized. assert((!isa(*Arg) || getContext().hasSameUnqualifiedType((*Arg)->getType(), ArgTypes[Idx]) || (isa(AC.getDecl()) && isObjCMethodWithTypeParams(cast(AC.getDecl())))) && "Argument and parameter types don't match"); EmitCallArg(Args, *Arg, ArgTypes[Idx]); // In particular, we depend on it being the last arg in Args, and the // objectsize bits depend on there only being one arg if !LeftToRight. assert(InitialArgSize + 1 == Args.size() && "The code below depends on only adding one arg per EmitCallArg"); (void)InitialArgSize; // Since pointer argument are never emitted as LValue, it is safe to emit // non-null argument check for r-value only. if (!Args.back().hasLValue()) { RValue RVArg = Args.back().getKnownRValue(); EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, ParamsToSkip + Idx); // @llvm.objectsize should never have side-effects and shouldn't need // destruction/cleanups, so we can safely "emit" it after its arg, // regardless of right-to-leftness MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); } } if (!LeftToRight) { // Un-reverse the arguments we just evaluated so they match up with the LLVM // IR function. std::reverse(Args.begin() + CallArgsStart, Args.end()); } } namespace { struct DestroyUnpassedArg final : EHScopeStack::Cleanup { DestroyUnpassedArg(Address Addr, QualType Ty) : Addr(Addr), Ty(Ty) {} Address Addr; QualType Ty; void Emit(CodeGenFunction &CGF, Flags flags) override { QualType::DestructionKind DtorKind = Ty.isDestructedType(); if (DtorKind == QualType::DK_cxx_destructor) { const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); assert(!Dtor->isTrivial()); CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, /*Delegating=*/false, Addr, Ty); } else { CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); } } }; struct DisableDebugLocationUpdates { CodeGenFunction &CGF; bool disabledDebugInfo; DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { if ((disabledDebugInfo = isa(E) && CGF.getDebugInfo())) CGF.disableDebugInfo(); } ~DisableDebugLocationUpdates() { if (disabledDebugInfo) CGF.enableDebugInfo(); } }; } // end anonymous namespace RValue CallArg::getRValue(CodeGenFunction &CGF) const { if (!HasLV) return RV; LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, LV.isVolatile()); IsUsed = true; return RValue::getAggregate(Copy.getAddress(CGF)); } void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { LValue Dst = CGF.MakeAddrLValue(Addr, Ty); if (!HasLV && RV.isScalar()) CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); else if (!HasLV && RV.isComplex()) CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); else { auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); // We assume that call args are never copied into subobjects. CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, HasLV ? LV.isVolatileQualified() : RV.isVolatileQualified()); } IsUsed = true; } void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, QualType type) { DisableDebugLocationUpdates Dis(*this, E); if (const ObjCIndirectCopyRestoreExpr *CRE = dyn_cast(E)) { assert(getLangOpts().ObjCAutoRefCount); return emitWritebackArg(*this, args, CRE); } assert(type->isReferenceType() == E->isGLValue() && "reference binding to unmaterialized r-value!"); if (E->isGLValue()) { assert(E->getObjectKind() == OK_Ordinary); return args.add(EmitReferenceBindingToExpr(E), type); } bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. // However, we still have to push an EH-only cleanup in case we unwind before // we make it to the call. if (HasAggregateEvalKind && type->castAs()->getDecl()->isParamDestroyedInCallee()) { // If we're using inalloca, use the argument memory. Otherwise, use a // temporary. AggValueSlot Slot; if (args.isUsingInAlloca()) Slot = createPlaceholderSlot(*this, type); else Slot = CreateAggTemp(type, "agg.tmp"); bool DestroyedInCallee = true, NeedsEHCleanup = true; if (const auto *RD = type->getAsCXXRecordDecl()) DestroyedInCallee = RD->hasNonTrivialDestructor(); else NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); if (DestroyedInCallee) Slot.setExternallyDestructed(); EmitAggExpr(E, Slot); RValue RV = Slot.asRValue(); args.add(RV, type); if (DestroyedInCallee && NeedsEHCleanup) { // Create a no-op GEP between the placeholder and the cleanup so we can // RAUW it successfully. It also serves as a marker of the first // instruction where the cleanup is active. pushFullExprCleanup(EHCleanup, Slot.getAddress(), type); // This unreachable is a temporary marker which will be removed later. llvm::Instruction *IsActive = Builder.CreateUnreachable(); args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); } return; } if (HasAggregateEvalKind && isa(E) && cast(E)->getCastKind() == CK_LValueToRValue) { LValue L = EmitLValue(cast(E)->getSubExpr()); assert(L.isSimple()); args.addUncopiedAggregate(L, type); return; } args.add(EmitAnyExprToTemp(E), type); } QualType CodeGenFunction::getVarArgType(const Expr *Arg) { // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC // implicitly widens null pointer constants that are arguments to varargs // functions to pointer-sized ints. if (!getTarget().getTriple().isOSWindows()) return Arg->getType(); if (Arg->getType()->isIntegerType() && getContext().getTypeSize(Arg->getType()) < getContext().getTargetInfo().getPointerWidth(0) && Arg->isNullPointerConstant(getContext(), Expr::NPC_ValueDependentIsNotNull)) { return getContext().getIntPtrType(); } return Arg->getType(); } // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC // optimizer it can aggressively ignore unwind edges. void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { if (CGM.getCodeGenOpts().OptimizationLevel != 0 && !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) Inst->setMetadata("clang.arc.no_objc_arc_exceptions", CGM.getNoObjCARCExceptionsMetadata()); } /// Emits a call to the given no-arguments nounwind runtime function. llvm::CallInst * CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const llvm::Twine &name) { return EmitNounwindRuntimeCall(callee, None, name); } /// Emits a call to the given nounwind runtime function. llvm::CallInst * CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef args, const llvm::Twine &name) { llvm::CallInst *call = EmitRuntimeCall(callee, args, name); call->setDoesNotThrow(); return call; } /// Emits a simple call (never an invoke) to the given no-arguments /// runtime function. llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, const llvm::Twine &name) { return EmitRuntimeCall(callee, None, name); } // Calls which may throw must have operand bundles indicating which funclet // they are nested within. SmallVector CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { SmallVector BundleList; // There is no need for a funclet operand bundle if we aren't inside a // funclet. if (!CurrentFuncletPad) return BundleList; // Skip intrinsics which cannot throw. auto *CalleeFn = dyn_cast(Callee->stripPointerCasts()); if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) return BundleList; BundleList.emplace_back("funclet", CurrentFuncletPad); return BundleList; } /// Emits a simple call (never an invoke) to the given runtime function. llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, ArrayRef args, const llvm::Twine &name) { llvm::CallInst *call = Builder.CreateCall( callee, args, getBundlesForFunclet(callee.getCallee()), name); call->setCallingConv(getRuntimeCC()); return call; } /// Emits a call or invoke to the given noreturn runtime function. void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( llvm::FunctionCallee callee, ArrayRef args) { SmallVector BundleList = getBundlesForFunclet(callee.getCallee()); if (getInvokeDest()) { llvm::InvokeInst *invoke = Builder.CreateInvoke(callee, getUnreachableBlock(), getInvokeDest(), args, BundleList); invoke->setDoesNotReturn(); invoke->setCallingConv(getRuntimeCC()); } else { llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); call->setDoesNotReturn(); call->setCallingConv(getRuntimeCC()); Builder.CreateUnreachable(); } } /// Emits a call or invoke instruction to the given nullary runtime function. llvm::CallBase * CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, const Twine &name) { return EmitRuntimeCallOrInvoke(callee, None, name); } /// Emits a call or invoke instruction to the given runtime function. llvm::CallBase * CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef args, const Twine &name) { llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); call->setCallingConv(getRuntimeCC()); return call; } /// Emits a call or invoke instruction to the given function, depending /// on the current state of the EH stack. llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef Args, const Twine &Name) { llvm::BasicBlock *InvokeDest = getInvokeDest(); SmallVector BundleList = getBundlesForFunclet(Callee.getCallee()); llvm::CallBase *Inst; if (!InvokeDest) Inst = Builder.CreateCall(Callee, Args, BundleList, Name); else { llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, Name); EmitBlock(ContBB); } // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC // optimizer it can aggressively ignore unwind edges. if (CGM.getLangOpts().ObjCAutoRefCount) AddObjCARCExceptionMetadata(Inst); return Inst; } void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New) { DeferredReplacements.push_back(std::make_pair(Old, New)); } namespace { /// Specify given \p NewAlign as the alignment of return value attribute. If /// such attribute already exists, re-set it to the maximal one of two options. LLVM_NODISCARD llvm::AttributeList maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, const llvm::AttributeList &Attrs, llvm::Align NewAlign) { llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); if (CurAlign >= NewAlign) return Attrs; llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); return Attrs .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex, llvm::Attribute::AttrKind::Alignment) .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr); } template class AbstractAssumeAlignedAttrEmitter { protected: CodeGenFunction &CGF; /// We do nothing if this is, or becomes, nullptr. const AlignedAttrTy *AA = nullptr; llvm::Value *Alignment = nullptr; // May or may not be a constant. llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) : CGF(CGF_) { if (!FuncDecl) return; AA = FuncDecl->getAttr(); } public: /// If we can, materialize the alignment as an attribute on return value. LLVM_NODISCARD llvm::AttributeList TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) return Attrs; const auto *AlignmentCI = dyn_cast(Alignment); if (!AlignmentCI) return Attrs; // We may legitimately have non-power-of-2 alignment here. // If so, this is UB land, emit it via `@llvm.assume` instead. if (!AlignmentCI->getValue().isPowerOf2()) return Attrs; llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( CGF.getLLVMContext(), Attrs, llvm::Align( AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); AA = nullptr; // We're done. Disallow doing anything else. return NewAttrs; } /// Emit alignment assumption. /// This is a general fallback that we take if either there is an offset, /// or the alignment is variable or we are sanitizing for alignment. void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { if (!AA) return; CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(), Alignment, OffsetCI); AA = nullptr; // We're done. Disallow doing anything else. } }; /// Helper data structure to emit `AssumeAlignedAttr`. class AssumeAlignedAttrEmitter final : public AbstractAssumeAlignedAttrEmitter { public: AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { if (!AA) return; // It is guaranteed that the alignment/offset are constants. Alignment = cast(CGF.EmitScalarExpr(AA->getAlignment())); if (Expr *Offset = AA->getOffset()) { OffsetCI = cast(CGF.EmitScalarExpr(Offset)); if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. OffsetCI = nullptr; } } }; /// Helper data structure to emit `AllocAlignAttr`. class AllocAlignAttrEmitter final : public AbstractAssumeAlignedAttrEmitter { public: AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, const CallArgList &CallArgs) : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { if (!AA) return; // Alignment may or may not be a constant, and that is okay. Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] .getRValue(CGF) .getScalarVal(); } }; } // namespace RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, llvm::CallBase **callOrInvoke, SourceLocation Loc) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. assert(Callee.isOrdinary() || Callee.isVirtual()); // Handle struct-return functions by passing a pointer to the // location that we would like to return into. QualType RetTy = CallInfo.getReturnType(); const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); if (const FunctionDecl *FD = dyn_cast_or_null(TargetDecl)) { // We can only guarantee that a function is called from the correct // context/function based on the appropriate target attributes, // so only check in the case where we have both always_inline and target // since otherwise we could be making a conditional call after a check for // the proper cpu features (and it won't cause code generation issues due to // function based code generation). if (TargetDecl->hasAttr() && TargetDecl->hasAttr()) checkTargetFeatures(Loc, FD); // Some architectures (such as x86-64) have the ABI changed based on // attribute-target/features. Give them a chance to diagnose. CGM.getTargetCodeGenInfo().checkFunctionCallABI( CGM, Loc, dyn_cast_or_null(CurCodeDecl), FD, CallArgs); } #ifndef NDEBUG if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { // For an inalloca varargs function, we don't expect CallInfo to match the // function pointer's type, because the inalloca struct a will have extra // fields in it for the varargs parameters. Code later in this function // bitcasts the function pointer to the type derived from CallInfo. // // In other cases, we assert that the types match up (until pointers stop // having pointee types). llvm::Type *TypeFromVal; if (Callee.isVirtual()) TypeFromVal = Callee.getVirtualFunctionType(); else TypeFromVal = Callee.getFunctionPointer()->getType()->getPointerElementType(); assert(IRFuncTy == TypeFromVal); } #endif // 1. Set up the arguments. // If we're using inalloca, insert the allocation after the stack save. // FIXME: Do this earlier rather than hacking it in here! Address ArgMemory = Address::invalid(); if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { const llvm::DataLayout &DL = CGM.getDataLayout(); llvm::Instruction *IP = CallArgs.getStackBase(); llvm::AllocaInst *AI; if (IP) { IP = IP->getNextNode(); AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), "argmem", IP); } else { AI = CreateTempAlloca(ArgStruct, "argmem"); } auto Align = CallInfo.getArgStructAlignment(); AI->setAlignment(Align.getAsAlign()); AI->setUsedWithInAlloca(true); assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); ArgMemory = Address(AI, Align); } ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); SmallVector IRCallArgs(IRFunctionArgs.totalIRArgs()); // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. Address SRetPtr = Address::invalid(); Address SRetAlloca = Address::invalid(); llvm::Value *UnusedReturnSizePtr = nullptr; if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { if (!ReturnValue.isNull()) { SRetPtr = ReturnValue.getValue(); } else { SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); if (HaveInsertPoint() && ReturnValue.isUnused()) { uint64_t size = CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); } } if (IRFunctionArgs.hasSRetArg()) { IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); } else if (RetAI.isInAlloca()) { Address Addr = Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); Builder.CreateStore(SRetPtr.getPointer(), Addr); } } Address swiftErrorTemp = Address::invalid(); Address swiftErrorArg = Address::invalid(); // When passing arguments using temporary allocas, we need to add the // appropriate lifetime markers. This vector keeps track of all the lifetime // markers that need to be ended right after the call. SmallVector CallLifetimeEndAfterCall; // Translate all of the arguments as necessary to match the IR lowering. assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); unsigned ArgNo = 0; CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I, ++info_it, ++ArgNo) { const ABIArgInfo &ArgInfo = info_it->info; // Insert a padding argument to ensure proper alignment. if (IRFunctionArgs.hasPaddingArg(ArgNo)) IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = llvm::UndefValue::get(ArgInfo.getPaddingType()); unsigned FirstIRArg, NumIRArgs; std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); switch (ArgInfo.getKind()) { case ABIArgInfo::InAlloca: { assert(NumIRArgs == 0); assert(getTarget().getTriple().getArch() == llvm::Triple::x86); if (I->isAggregate()) { Address Addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); llvm::Instruction *Placeholder = cast(Addr.getPointer()); if (!ArgInfo.getInAllocaIndirect()) { // Replace the placeholder with the appropriate argument slot GEP. CGBuilderTy::InsertPoint IP = Builder.saveIP(); Builder.SetInsertPoint(Placeholder); Addr = Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); Builder.restoreIP(IP); } else { // For indirect things such as overaligned structs, replace the // placeholder with a regular aggregate temporary alloca. Store the // address of this alloca into the struct. Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); Address ArgSlot = Builder.CreateStructGEP( ArgMemory, ArgInfo.getInAllocaFieldIndex()); Builder.CreateStore(Addr.getPointer(), ArgSlot); } deferPlaceholderReplacement(Placeholder, Addr.getPointer()); } else if (ArgInfo.getInAllocaIndirect()) { // Make a temporary alloca and store the address of it into the argument // struct. Address Addr = CreateMemTempWithoutCast( I->Ty, getContext().getTypeAlignInChars(I->Ty), "indirect-arg-temp"); I->copyInto(*this, Addr); Address ArgSlot = Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); Builder.CreateStore(Addr.getPointer(), ArgSlot); } else { // Store the RValue into the argument struct. Address Addr = Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); unsigned AS = Addr.getType()->getPointerAddressSpace(); llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); // There are some cases where a trivial bitcast is not avoidable. The // definition of a type later in a translation unit may change it's type // from {}* to (%struct.foo*)*. if (Addr.getType() != MemType) Addr = Builder.CreateBitCast(Addr, MemType); I->copyInto(*this, Addr); } break; } case ABIArgInfo::Indirect: case ABIArgInfo::IndirectAliased: { assert(NumIRArgs == 1); if (!I->isAggregate()) { // Make a temporary alloca to pass the argument. Address Addr = CreateMemTempWithoutCast( I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); IRCallArgs[FirstIRArg] = Addr.getPointer(); I->copyInto(*this, Addr); } else { // We want to avoid creating an unnecessary temporary+copy here; // however, we need one in three cases: // 1. If the argument is not byval, and we are required to copy the // source. (This case doesn't occur on any common architecture.) // 2. If the argument is byval, RV is not sufficiently aligned, and // we cannot force it to be sufficiently aligned. // 3. If the argument is byval, but RV is not located in default // or alloca address space. Address Addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); llvm::Value *V = Addr.getPointer(); CharUnits Align = ArgInfo.getIndirectAlign(); const llvm::DataLayout *TD = &CGM.getDataLayout(); assert((FirstIRArg >= IRFuncTy->getNumParams() || IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == TD->getAllocaAddrSpace()) && "indirect argument must be in alloca address space"); bool NeedCopy = false; if (Addr.getAlignment() < Align && llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < Align.getAsAlign()) { NeedCopy = true; } else if (I->hasLValue()) { auto LV = I->getKnownLValue(); auto AS = LV.getAddressSpace(); if (!ArgInfo.getIndirectByVal() || (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { NeedCopy = true; } if (!getLangOpts().OpenCL) { if ((ArgInfo.getIndirectByVal() && (AS != LangAS::Default && AS != CGM.getASTAllocaAddressSpace()))) { NeedCopy = true; } } // For OpenCL even if RV is located in default or alloca address space // we don't want to perform address space cast for it. else if ((ArgInfo.getIndirectByVal() && Addr.getType()->getAddressSpace() != IRFuncTy-> getParamType(FirstIRArg)->getPointerAddressSpace())) { NeedCopy = true; } } if (NeedCopy) { // Create an aligned temporary, and copy to it. Address AI = CreateMemTempWithoutCast( I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); IRCallArgs[FirstIRArg] = AI.getPointer(); // Emit lifetime markers for the temporary alloca. uint64_t ByvalTempElementSize = CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); llvm::Value *LifetimeSize = EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); // Add cleanup code to emit the end lifetime marker after the call. if (LifetimeSize) // In case we disabled lifetime markers. CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); // Generate the copy. I->copyInto(*this, AI); } else { // Skip the extra memcpy call. auto *T = V->getType()->getPointerElementType()->getPointerTo( CGM.getDataLayout().getAllocaAddrSpace()); IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, true); } } break; } case ABIArgInfo::Ignore: assert(NumIRArgs == 0); break; case ABIArgInfo::Extend: case ABIArgInfo::Direct: { if (!isa(ArgInfo.getCoerceToType()) && ArgInfo.getCoerceToType() == ConvertType(info_it->type) && ArgInfo.getDirectOffset() == 0) { assert(NumIRArgs == 1); llvm::Value *V; if (!I->isAggregate()) V = I->getKnownRValue().getScalarVal(); else V = Builder.CreateLoad( I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress()); // Implement swifterror by copying into a new swifterror argument. // We'll write back in the normal path out of the call. if (CallInfo.getExtParameterInfo(ArgNo).getABI() == ParameterABI::SwiftErrorResult) { assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); QualType pointeeTy = I->Ty->getPointeeType(); swiftErrorArg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); swiftErrorTemp = CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); V = swiftErrorTemp.getPointer(); cast(V)->setSwiftError(true); llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); Builder.CreateStore(errorValue, swiftErrorTemp); } // We might have to widen integers, but we should never truncate. if (ArgInfo.getCoerceToType() != V->getType() && V->getType()->isIntegerTy()) V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. if (FirstIRArg < IRFuncTy->getNumParams() && V->getType() != IRFuncTy->getParamType(FirstIRArg)) V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); IRCallArgs[FirstIRArg] = V; break; } // FIXME: Avoid the conversion through memory if possible. Address Src = Address::invalid(); if (!I->isAggregate()) { Src = CreateMemTemp(I->Ty, "coerce"); I->copyInto(*this, Src); } else { Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); } // If the value is offset in memory, apply the offset now. Src = emitAddressAtOffset(*this, Src, ArgInfo); // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. llvm::StructType *STy = dyn_cast(ArgInfo.getCoerceToType()); if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { llvm::Type *SrcTy = Src.getElementType(); uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); // If the source type is smaller than the destination type of the // coerce-to logic, copy the source value into a temp alloca the size // of the destination type to allow loading all of it. The bits past // the source value are left undef. if (SrcSize < DstSize) { Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(), Src.getName() + ".coerce"); Builder.CreateMemCpy(TempAlloca, Src, SrcSize); Src = TempAlloca; } else { Src = Builder.CreateBitCast(Src, STy->getPointerTo(Src.getAddressSpace())); } assert(NumIRArgs == STy->getNumElements()); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Address EltPtr = Builder.CreateStructGEP(Src, i); llvm::Value *LI = Builder.CreateLoad(EltPtr); IRCallArgs[FirstIRArg + i] = LI; } } else { // In the simple case, just pass the coerced loaded value. assert(NumIRArgs == 1); llvm::Value *Load = CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); if (CallInfo.isCmseNSCall()) { // For certain parameter types, clear padding bits, as they may reveal // sensitive information. // Small struct/union types are passed as integer arrays. auto *ATy = dyn_cast(Load->getType()); if (ATy != nullptr && isa(I->Ty.getCanonicalType())) Load = EmitCMSEClearRecord(Load, ATy, I->Ty); } IRCallArgs[FirstIRArg] = Load; } break; } case ABIArgInfo::CoerceAndExpand: { auto coercionType = ArgInfo.getCoerceAndExpandType(); auto layout = CGM.getDataLayout().getStructLayout(coercionType); llvm::Value *tempSize = nullptr; Address addr = Address::invalid(); Address AllocaAddr = Address::invalid(); if (I->isAggregate()) { addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); } else { RValue RV = I->getKnownRValue(); assert(RV.isScalar()); // complex should always just be direct llvm::Type *scalarType = RV.getScalarVal()->getType(); auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); // Materialize to a temporary. addr = CreateTempAlloca( RV.getScalarVal()->getType(), CharUnits::fromQuantity(std::max( (unsigned)layout->getAlignment().value(), scalarAlign)), "tmp", /*ArraySize=*/nullptr, &AllocaAddr); tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); Builder.CreateStore(RV.getScalarVal(), addr); } addr = Builder.CreateElementBitCast(addr, coercionType); unsigned IRArgPos = FirstIRArg; for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { llvm::Type *eltType = coercionType->getElementType(i); if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; Address eltAddr = Builder.CreateStructGEP(addr, i); llvm::Value *elt = Builder.CreateLoad(eltAddr); IRCallArgs[IRArgPos++] = elt; } assert(IRArgPos == FirstIRArg + NumIRArgs); if (tempSize) { EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); } break; } case ABIArgInfo::Expand: { unsigned IRArgPos = FirstIRArg; ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); assert(IRArgPos == FirstIRArg + NumIRArgs); break; } } } const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); // If we're using inalloca, set up that argument. if (ArgMemory.isValid()) { llvm::Value *Arg = ArgMemory.getPointer(); if (CallInfo.isVariadic()) { // When passing non-POD arguments by value to variadic functions, we will // end up with a variadic prototype and an inalloca call site. In such // cases, we can't do any parameter mismatch checks. Give up and bitcast // the callee. unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); CalleePtr = Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); } else { llvm::Type *LastParamTy = IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); if (Arg->getType() != LastParamTy) { #ifndef NDEBUG // Assert that these structs have equivalent element types. llvm::StructType *FullTy = CallInfo.getArgStruct(); llvm::StructType *DeclaredTy = cast( cast(LastParamTy)->getElementType()); assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), DE = DeclaredTy->element_end(), FI = FullTy->element_begin(); DI != DE; ++DI, ++FI) assert(*DI == *FI); #endif Arg = Builder.CreateBitCast(Arg, LastParamTy); } } assert(IRFunctionArgs.hasInallocaArg()); IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; } // 2. Prepare the function pointer. // If the callee is a bitcast of a non-variadic function to have a // variadic function pointer type, check to see if we can remove the // bitcast. This comes up with unprototyped functions. // // This makes the IR nicer, but more importantly it ensures that we // can inline the function at -O0 if it is marked always_inline. auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, llvm::Value *Ptr) -> llvm::Function * { if (!CalleeFT->isVarArg()) return nullptr; // Get underlying value if it's a bitcast if (llvm::ConstantExpr *CE = dyn_cast(Ptr)) { if (CE->getOpcode() == llvm::Instruction::BitCast) Ptr = CE->getOperand(0); } llvm::Function *OrigFn = dyn_cast(Ptr); if (!OrigFn) return nullptr; llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); // If the original type is variadic, or if any of the component types // disagree, we cannot remove the cast. if (OrigFT->isVarArg() || OrigFT->getNumParams() != CalleeFT->getNumParams() || OrigFT->getReturnType() != CalleeFT->getReturnType()) return nullptr; for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) return nullptr; return OrigFn; }; if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { CalleePtr = OrigFn; IRFuncTy = OrigFn->getFunctionType(); } // 3. Perform the actual call. // Deactivate any cleanups that we're supposed to do immediately before // the call. if (!CallArgs.getCleanupsToDeactivate().empty()) deactivateArgCleanupsBeforeCall(*this, CallArgs); // Assert that the arguments we computed match up. The IR verifier // will catch this, but this is a common enough source of problems // during IRGen changes that it's way better for debugging to catch // it ourselves here. #ifndef NDEBUG assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); for (unsigned i = 0; i < IRCallArgs.size(); ++i) { // Inalloca argument can have different type. if (IRFunctionArgs.hasInallocaArg() && i == IRFunctionArgs.getInallocaArgNo()) continue; if (i < IRFuncTy->getNumParams()) assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); } #endif // Update the largest vector width if any arguments have vector types. for (unsigned i = 0; i < IRCallArgs.size(); ++i) { if (auto *VT = dyn_cast(IRCallArgs[i]->getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize()); } // Compute the calling convention and attributes. unsigned CallingConv; llvm::AttributeList Attrs; CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, Callee.getAbstractInfo(), Attrs, CallingConv, /*AttrOnCallSite=*/true); if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) if (FD->usesFPIntrin()) // All calls within a strictfp function are marked strictfp Attrs = Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, llvm::Attribute::StrictFP); // Add call-site nomerge attribute if exists. if (InNoMergeAttributedStmt) Attrs = Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, llvm::Attribute::NoMerge); // Apply some call-site-specific attributes. // TODO: work this into building the attribute set. // Apply always_inline to all calls within flatten functions. // FIXME: should this really take priority over __try, below? if (CurCodeDecl && CurCodeDecl->hasAttr() && !(TargetDecl && TargetDecl->hasAttr())) { Attrs = Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, llvm::Attribute::AlwaysInline); } // Disable inlining inside SEH __try blocks. if (isSEHTryScope()) { Attrs = Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, llvm::Attribute::NoInline); } // Decide whether to use a call or an invoke. bool CannotThrow; if (currentFunctionUsesSEHTry()) { // SEH cares about asynchronous exceptions, so everything can "throw." CannotThrow = false; } else if (isCleanupPadScope() && EHPersonality::get(*this).isMSVCXXPersonality()) { // The MSVC++ personality will implicitly terminate the program if an // exception is thrown during a cleanup outside of a try/catch. // We don't need to model anything in IR to get this behavior. CannotThrow = true; } else { // Otherwise, nounwind call sites will never throw. CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind); if (auto *FPtr = dyn_cast(CalleePtr)) if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) CannotThrow = true; } // If we made a temporary, be sure to clean up after ourselves. Note that we // can't depend on being inside of an ExprWithCleanups, so we need to manually // pop this cleanup later on. Being eager about this is OK, since this // temporary is 'invisible' outside of the callee. if (UnusedReturnSizePtr) pushFullExprCleanup(NormalEHLifetimeMarker, SRetAlloca, UnusedReturnSizePtr); llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); SmallVector BundleList = getBundlesForFunclet(CalleePtr); if (const FunctionDecl *FD = dyn_cast_or_null(CurFuncDecl)) if (FD->usesFPIntrin()) // All calls within a strictfp function are marked strictfp Attrs = Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, llvm::Attribute::StrictFP); AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); // Emit the actual call/invoke instruction. llvm::CallBase *CI; if (!InvokeDest) { CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); } else { llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, BundleList); EmitBlock(Cont); } if (callOrInvoke) *callOrInvoke = CI; // If this is within a function that has the guard(nocf) attribute and is an // indirect call, add the "guard_nocf" attribute to this call to indicate that // Control Flow Guard checks should not be added, even if the call is inlined. if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) { if (const auto *A = FD->getAttr()) { if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) Attrs = Attrs.addAttribute( getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf"); } } // Apply the attributes and calling convention. CI->setAttributes(Attrs); CI->setCallingConv(static_cast(CallingConv)); // Apply various metadata. if (!CI->getType()->isVoidTy()) CI->setName("call"); // Update largest vector width from the return type. if (auto *VT = dyn_cast(CI->getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize()); // Insert instrumentation or attach profile metadata at indirect call sites. // For more details, see the comment before the definition of // IPVK_IndirectCallTarget in InstrProfData.inc. if (!CI->getCalledFunction()) PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr); // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC // optimizer it can aggressively ignore unwind edges. if (CGM.getLangOpts().ObjCAutoRefCount) AddObjCARCExceptionMetadata(CI); // Suppress tail calls if requested. if (llvm::CallInst *Call = dyn_cast(CI)) { if (TargetDecl && TargetDecl->hasAttr()) Call->setTailCallKind(llvm::CallInst::TCK_NoTail); } // Add metadata for calls to MSAllocator functions if (getDebugInfo() && TargetDecl && TargetDecl->hasAttr()) getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); // 4. Finish the call. // If the call doesn't return, finish the basic block and clear the // insertion point; this allows the rest of IRGen to discard // unreachable code. if (CI->doesNotReturn()) { if (UnusedReturnSizePtr) PopCleanupBlock(); // Strip away the noreturn attribute to better diagnose unreachable UB. if (SanOpts.has(SanitizerKind::Unreachable)) { // Also remove from function since CallBase::hasFnAttr additionally checks // attributes of the called function. if (auto *F = CI->getCalledFunction()) F->removeFnAttr(llvm::Attribute::NoReturn); CI->removeAttribute(llvm::AttributeList::FunctionIndex, llvm::Attribute::NoReturn); // Avoid incompatibility with ASan which relies on the `noreturn` // attribute to insert handler calls. if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) { SanitizerScope SanScope(this); llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); Builder.SetInsertPoint(CI); auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); EmitNounwindRuntimeCall(Fn); } } EmitUnreachable(Loc); Builder.ClearInsertionPoint(); // FIXME: For now, emit a dummy basic block because expr emitters in // generally are not ready to handle emitting expressions at unreachable // points. EnsureInsertPoint(); // Return a reasonable RValue. return GetUndefRValue(RetTy); } // Perform the swifterror writeback. if (swiftErrorTemp.isValid()) { llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); Builder.CreateStore(errorResult, swiftErrorArg); } // Emit any call-associated writebacks immediately. Arguably this // should happen after any return-value munging. if (CallArgs.hasWritebacks()) emitWritebacks(*this, CallArgs); // The stack cleanup for inalloca arguments has to run out of the normal // lexical order, so deactivate it and run it manually here. CallArgs.freeArgumentMemory(*this); // Extract the return value. RValue Ret = [&] { switch (RetAI.getKind()) { case ABIArgInfo::CoerceAndExpand: { auto coercionType = RetAI.getCoerceAndExpandType(); Address addr = SRetPtr; addr = Builder.CreateElementBitCast(addr, coercionType); assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); bool requiresExtract = isa(CI->getType()); unsigned unpaddedIndex = 0; for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { llvm::Type *eltType = coercionType->getElementType(i); if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; Address eltAddr = Builder.CreateStructGEP(addr, i); llvm::Value *elt = CI; if (requiresExtract) elt = Builder.CreateExtractValue(elt, unpaddedIndex++); else assert(unpaddedIndex == 0); Builder.CreateStore(elt, eltAddr); } // FALLTHROUGH LLVM_FALLTHROUGH; } case ABIArgInfo::InAlloca: case ABIArgInfo::Indirect: { RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); if (UnusedReturnSizePtr) PopCleanupBlock(); return ret; } case ABIArgInfo::Ignore: // If we are ignoring an argument that had a result, make sure to // construct the appropriate return value for our caller. return GetUndefRValue(RetTy); case ABIArgInfo::Extend: case ABIArgInfo::Direct: { llvm::Type *RetIRTy = ConvertType(RetTy); if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { switch (getEvaluationKind(RetTy)) { case TEK_Complex: { llvm::Value *Real = Builder.CreateExtractValue(CI, 0); llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); return RValue::getComplex(std::make_pair(Real, Imag)); } case TEK_Aggregate: { Address DestPtr = ReturnValue.getValue(); bool DestIsVolatile = ReturnValue.isVolatile(); if (!DestPtr.isValid()) { DestPtr = CreateMemTemp(RetTy, "agg.tmp"); DestIsVolatile = false; } EmitAggregateStore(CI, DestPtr, DestIsVolatile); return RValue::getAggregate(DestPtr); } case TEK_Scalar: { // If the argument doesn't match, perform a bitcast to coerce it. This // can happen due to trivial type mismatches. llvm::Value *V = CI; if (V->getType() != RetIRTy) V = Builder.CreateBitCast(V, RetIRTy); return RValue::get(V); } } llvm_unreachable("bad evaluation kind"); } Address DestPtr = ReturnValue.getValue(); bool DestIsVolatile = ReturnValue.isVolatile(); if (!DestPtr.isValid()) { DestPtr = CreateMemTemp(RetTy, "coerce"); DestIsVolatile = false; } // If the value is offset in memory, apply the offset now. Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); return convertTempToRValue(DestPtr, RetTy, SourceLocation()); } case ABIArgInfo::Expand: case ABIArgInfo::IndirectAliased: llvm_unreachable("Invalid ABI kind for return argument"); } llvm_unreachable("Unhandled ABIArgInfo::Kind"); } (); // Emit the assume_aligned check on the return value. if (Ret.isScalar() && TargetDecl) { AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); } // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though // we can't use the full cleanup mechanism. for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) LifetimeEnd.Emit(*this, /*Flags=*/{}); if (!ReturnValue.isExternallyDestructed() && RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), RetTy); return Ret; } CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { if (isVirtual()) { const CallExpr *CE = getVirtualCallExpr(); return CGF.CGM.getCXXABI().getVirtualFunctionPointer( CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), CE ? CE->getBeginLoc() : SourceLocation()); } return *this; } /* VarArg handling */ Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { VAListAddr = VE->isMicrosoftABI() ? EmitMSVAListRef(VE->getSubExpr()) : EmitVAListRef(VE->getSubExpr()); QualType Ty = VE->getType(); if (VE->isMicrosoftABI()) return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); } diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 7351926035e6..27cf066466ca 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1,5345 +1,5352 @@ //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This contains code to emit Expr nodes as LLVM code. // //===----------------------------------------------------------------------===// #include "CGCXXABI.h" #include "CGCall.h" #include "CGCleanup.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" #include "CGOpenMPRuntime.h" #include "CGRecordLayout.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/NSAPI.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/SourceManager.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/Path.h" #include "llvm/Transforms/Utils/SanitizerStats.h" #include using namespace clang; using namespace CodeGen; //===--------------------------------------------------------------------===// // Miscellaneous Helper Methods //===--------------------------------------------------------------------===// llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { unsigned addressSpace = cast(value->getType())->getAddressSpace(); llvm::PointerType *destType = Int8PtrTy; if (addressSpace) destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); if (value->getType() == destType) return value; return Builder.CreateBitCast(value, destType); } /// CreateTempAlloca - This creates a alloca and inserts it into the entry /// block. Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align, const Twine &Name, llvm::Value *ArraySize) { auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); Alloca->setAlignment(Align.getAsAlign()); return Address(Alloca, Align); } /// CreateTempAlloca - This creates a alloca and inserts it into the entry /// block. The alloca is casted to default address space if necessary. Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, const Twine &Name, llvm::Value *ArraySize, Address *AllocaAddr) { auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); if (AllocaAddr) *AllocaAddr = Alloca; llvm::Value *V = Alloca.getPointer(); // Alloca always returns a pointer in alloca address space, which may // be different from the type defined by the language. For example, // in C++ the auto variables are in the default address space. Therefore // cast alloca to the default address space when necessary. if (getASTAllocaAddressSpace() != LangAS::Default) { auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default); llvm::IRBuilderBase::InsertPointGuard IPG(Builder); // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, // otherwise alloca is inserted at the current insertion point of the // builder. if (!ArraySize) Builder.SetInsertPoint(AllocaInsertPt); V = getTargetHooks().performAddrSpaceCast( *this, V, getASTAllocaAddressSpace(), LangAS::Default, Ty->getPointerTo(DestAddrSpace), /*non-null*/ true); } return Address(V, Align); } /// CreateTempAlloca - This creates an alloca and inserts it into the entry /// block if \p ArraySize is nullptr, otherwise inserts it at the current /// insertion point of the builder. llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, const Twine &Name, llvm::Value *ArraySize) { if (ArraySize) return Builder.CreateAlloca(Ty, ArraySize, Name); return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), ArraySize, Name, AllocaInsertPt); } /// CreateDefaultAlignTempAlloca - This creates an alloca with the /// default alignment of the corresponding LLVM type, which is *not* /// guaranteed to be related in any way to the expected alignment of /// an AST type that might have been lowered to Ty. Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name) { CharUnits Align = CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty)); return CreateTempAlloca(Ty, Align, Name); } void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) { auto *Alloca = Var.getPointer(); assert(isa(Alloca) || (isa(Alloca) && isa( cast(Alloca)->getPointerOperand()))); auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false, Var.getAlignment().getAsAlign()); llvm::BasicBlock *Block = AllocaInsertPt->getParent(); Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store); } Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { CharUnits Align = getContext().getTypeAlignInChars(Ty); return CreateTempAlloca(ConvertType(Ty), Align, Name); } Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, Address *Alloca) { // FIXME: Should we prefer the preferred type alignment here? return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca); } Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, const Twine &Name, Address *Alloca) { Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, /*ArraySize=*/nullptr, Alloca); if (Ty->isConstantMatrixType()) { auto *ArrayTy = cast(Result.getType()->getElementType()); auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), ArrayTy->getNumElements()); Result = Address( Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()), Result.getAlignment()); } return Result; } Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align, const Twine &Name) { return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name); } Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, const Twine &Name) { return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty), Name); } /// EvaluateExprAsBool - Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { PGO.setCurrentStmt(E); if (const MemberPointerType *MPT = E->getType()->getAs()) { llvm::Value *MemPtr = EmitScalarExpr(E); return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); } QualType BoolTy = getContext().BoolTy; SourceLocation Loc = E->getExprLoc(); if (!E->getType()->isAnyComplexType()) return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc); return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy, Loc); } /// EmitIgnoredExpr - Emit code to compute the specified expression, /// ignoring the result. void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { if (E->isRValue()) return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); // Just emit it as an l-value and drop the result. EmitLValue(E); } /// EmitAnyExpr - Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. /// If this is an aggregate expression, AggSlot indicates where the /// result should be returned. RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot aggSlot, bool ignoreResult) { switch (getEvaluationKind(E->getType())) { case TEK_Scalar: return RValue::get(EmitScalarExpr(E, ignoreResult)); case TEK_Complex: return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); case TEK_Aggregate: if (!ignoreResult && aggSlot.isIgnored()) aggSlot = CreateAggTemp(E->getType(), "agg-temp"); EmitAggExpr(E, aggSlot); return aggSlot.asRValue(); } llvm_unreachable("bad evaluation kind"); } /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will /// always be accessible even if no aggregate location is provided. RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { AggValueSlot AggSlot = AggValueSlot::ignored(); if (hasAggregateEvaluationKind(E->getType())) AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); return EmitAnyExpr(E, AggSlot); } /// EmitAnyExprToMem - Evaluate an expression into a given memory /// location. void CodeGenFunction::EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInit) { // FIXME: This function should take an LValue as an argument. switch (getEvaluationKind(E->getType())) { case TEK_Complex: EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()), /*isInit*/ false); return; case TEK_Aggregate: { EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, AggValueSlot::IsDestructed_t(IsInit), AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsAliased_t(!IsInit), AggValueSlot::MayOverlap)); return; } case TEK_Scalar: { RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); LValue LV = MakeAddrLValue(Location, E->getType()); EmitStoreThroughLValue(RV, LV); return; } } llvm_unreachable("bad evaluation kind"); } static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary) { // Objective-C++ ARC: // If we are binding a reference to a temporary that has ownership, we // need to perform retain/release operations on the temporary. // // FIXME: This should be looking at E, not M. if (auto Lifetime = M->getType().getObjCLifetime()) { switch (Lifetime) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: // Carry on to normal cleanup handling. break; case Qualifiers::OCL_Autoreleasing: // Nothing to do; cleaned up by an autorelease pool. return; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: switch (StorageDuration Duration = M->getStorageDuration()) { case SD_Static: // Note: we intentionally do not register a cleanup to release // the object on program termination. return; case SD_Thread: // FIXME: We should probably register a cleanup in this case. return; case SD_Automatic: case SD_FullExpression: CodeGenFunction::Destroyer *Destroy; CleanupKind CleanupKind; if (Lifetime == Qualifiers::OCL_Strong) { const ValueDecl *VD = M->getExtendingDecl(); bool Precise = VD && isa(VD) && VD->hasAttr(); CleanupKind = CGF.getARCCleanupKind(); Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise : &CodeGenFunction::destroyARCStrongImprecise; } else { // __weak objects always get EH cleanups; otherwise, exceptions // could cause really nasty crashes instead of mere leaks. CleanupKind = NormalAndEHCleanup; Destroy = &CodeGenFunction::destroyARCWeak; } if (Duration == SD_FullExpression) CGF.pushDestroy(CleanupKind, ReferenceTemporary, M->getType(), *Destroy, CleanupKind & EHCleanup); else CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, M->getType(), *Destroy, CleanupKind & EHCleanup); return; case SD_Dynamic: llvm_unreachable("temporary cannot have dynamic storage duration"); } llvm_unreachable("unknown storage duration"); } } CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; if (const RecordType *RT = E->getType()->getBaseElementTypeUnsafe()->getAs()) { // Get the destructor for the reference temporary. auto *ClassDecl = cast(RT->getDecl()); if (!ClassDecl->hasTrivialDestructor()) ReferenceTemporaryDtor = ClassDecl->getDestructor(); } if (!ReferenceTemporaryDtor) return; // Call the destructor for the temporary. switch (M->getStorageDuration()) { case SD_Static: case SD_Thread: { llvm::FunctionCallee CleanupFn; llvm::Constant *CleanupArg; if (E->getType()->isArrayType()) { CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, dyn_cast_or_null(M->getExtendingDecl())); CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); } else { CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); CleanupArg = cast(ReferenceTemporary.getPointer()); } CGF.CGM.getCXXABI().registerGlobalDtor( CGF, *cast(M->getExtendingDecl()), CleanupFn, CleanupArg); break; } case SD_FullExpression: CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions); break; case SD_Automatic: CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions); break; case SD_Dynamic: llvm_unreachable("temporary cannot have dynamic storage duration"); } } static Address createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, Address *Alloca = nullptr) { auto &TCG = CGF.getTargetHooks(); switch (M->getStorageDuration()) { case SD_FullExpression: case SD_Automatic: { // If we have a constant temporary array or record try to promote it into a // constant global under the same rules a normal constant would've been // promoted. This is easier on the optimizer and generally emits fewer // instructions. QualType Ty = Inner->getType(); if (CGF.CGM.getCodeGenOpts().MergeAllConstants && (Ty->isArrayType() || Ty->isRecordType()) && CGF.CGM.isTypeConstant(Ty, true)) if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) { if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) { auto AS = AddrSpace.getValue(); auto *GV = new llvm::GlobalVariable( CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr, llvm::GlobalValue::NotThreadLocal, CGF.getContext().getTargetAddressSpace(AS)); CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty); GV->setAlignment(alignment.getAsAlign()); llvm::Constant *C = GV; if (AS != LangAS::Default) C = TCG.performAddrSpaceCast( CGF.CGM, GV, AS, LangAS::Default, GV->getValueType()->getPointerTo( CGF.getContext().getTargetAddressSpace(LangAS::Default))); // FIXME: Should we put the new global into a COMDAT? return Address(C, alignment); } } return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca); } case SD_Thread: case SD_Static: return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); case SD_Dynamic: llvm_unreachable("temporary can't have dynamic storage duration"); } llvm_unreachable("unknown storage duration"); } /// Helper method to check if the underlying ABI is AAPCS static bool isAAPCS(const TargetInfo &TargetInfo) { return TargetInfo.getABI().startswith("aapcs"); } LValue CodeGenFunction:: EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { const Expr *E = M->getSubExpr(); assert((!M->getExtendingDecl() || !isa(M->getExtendingDecl()) || !cast(M->getExtendingDecl())->isARCPseudoStrong()) && "Reference should never be pseudo-strong!"); // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so // as that will cause the lifetime adjustment to be lost for ARC auto ownership = M->getType().getObjCLifetime(); if (ownership != Qualifiers::OCL_None && ownership != Qualifiers::OCL_ExplicitNone) { Address Object = createReferenceTemporary(*this, M, E); if (auto *Var = dyn_cast(Object.getPointer())) { Object = Address(llvm::ConstantExpr::getBitCast(Var, ConvertTypeForMem(E->getType()) ->getPointerTo(Object.getAddressSpace())), Object.getAlignment()); // createReferenceTemporary will promote the temporary to a global with a // constant initializer if it can. It can only do this to a value of // ARC-manageable type if the value is global and therefore "immune" to // ref-counting operations. Therefore we have no need to emit either a // dynamic initialization or a cleanup and we can just return the address // of the temporary. if (Var->hasInitializer()) return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); Var->setInitializer(CGM.EmitNullConstant(E->getType())); } LValue RefTempDst = MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); switch (getEvaluationKind(E->getType())) { default: llvm_unreachable("expected scalar or aggregate expression"); case TEK_Scalar: EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); break; case TEK_Aggregate: { EmitAggExpr(E, AggValueSlot::forAddr(Object, E->getType().getQualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); break; } } pushTemporaryCleanup(*this, M, E, Object); return RefTempDst; } SmallVector CommaLHSs; SmallVector Adjustments; E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); for (const auto &Ignored : CommaLHSs) EmitIgnoredExpr(Ignored); if (const auto *opaque = dyn_cast(E)) { if (opaque->getType()->isRecordType()) { assert(Adjustments.empty()); return EmitOpaqueValueLValue(opaque); } } // Create and initialize the reference temporary. Address Alloca = Address::invalid(); Address Object = createReferenceTemporary(*this, M, E, &Alloca); if (auto *Var = dyn_cast( Object.getPointer()->stripPointerCasts())) { Object = Address(llvm::ConstantExpr::getBitCast( cast(Object.getPointer()), ConvertTypeForMem(E->getType())->getPointerTo()), Object.getAlignment()); // If the temporary is a global and has a constant initializer or is a // constant temporary that we promoted to a global, we may have already // initialized it. if (!Var->hasInitializer()) { Var->setInitializer(CGM.EmitNullConstant(E->getType())); EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); } } else { switch (M->getStorageDuration()) { case SD_Automatic: if (auto *Size = EmitLifetimeStart( CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), Alloca.getPointer())) { pushCleanupAfterFullExpr(NormalEHLifetimeMarker, Alloca, Size); } break; case SD_FullExpression: { if (!ShouldEmitLifetimeMarkers) break; // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end // marker. Instead, start the lifetime of a conditional temporary earlier // so that it's unconditional. Don't do this with sanitizers which need // more precise lifetime marks. ConditionalEvaluation *OldConditional = nullptr; CGBuilderTy::InsertPoint OldIP; if (isInConditionalBranch() && !E->getType().isDestructedType() && !SanOpts.has(SanitizerKind::HWAddress) && !SanOpts.has(SanitizerKind::Memory) && !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) { OldConditional = OutermostConditional; OutermostConditional = nullptr; OldIP = Builder.saveIP(); llvm::BasicBlock *Block = OldConditional->getStartingBlock(); Builder.restoreIP(CGBuilderTy::InsertPoint( Block, llvm::BasicBlock::iterator(Block->back()))); } if (auto *Size = EmitLifetimeStart( CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), Alloca.getPointer())) { pushFullExprCleanup(NormalEHLifetimeMarker, Alloca, Size); } if (OldConditional) { OutermostConditional = OldConditional; Builder.restoreIP(OldIP); } break; } default: break; } EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); } pushTemporaryCleanup(*this, M, E, Object); // Perform derived-to-base casts and/or field accesses, to get from the // temporary object we created (and, potentially, for which we extended // the lifetime) to the subobject we're binding the reference to. for (unsigned I = Adjustments.size(); I != 0; --I) { SubobjectAdjustment &Adjustment = Adjustments[I-1]; switch (Adjustment.Kind) { case SubobjectAdjustment::DerivedToBaseAdjustment: Object = GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, Adjustment.DerivedToBase.BasePath->path_begin(), Adjustment.DerivedToBase.BasePath->path_end(), /*NullCheckValue=*/ false, E->getExprLoc()); break; case SubobjectAdjustment::FieldAdjustment: { LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl); LV = EmitLValueForField(LV, Adjustment.Field); assert(LV.isSimple() && "materialized temporary field is not a simple lvalue"); Object = LV.getAddress(*this); break; } case SubobjectAdjustment::MemberPointerAdjustment: { llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr, Adjustment.Ptr.MPT); break; } } } return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); } RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { // Emit the expression as an lvalue. LValue LV = EmitLValue(E); assert(LV.isSimple()); llvm::Value *Value = LV.getPointer(*this); if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { // C++11 [dcl.ref]p5 (as amended by core issue 453): // If a glvalue to which a reference is directly bound designates neither // an existing object or function of an appropriate type nor a region of // storage of suitable size and alignment to contain an object of the // reference's type, the behavior is undefined. QualType Ty = E->getType(); EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); } return RValue::get(Value); } /// getAccessedFieldNo - Given an encoded value and a result number, return the /// input field number being accessed. unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts) { return cast(Elts->getAggregateElement(Idx)) ->getZExtValue(); } /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, llvm::Value *High) { llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); llvm::Value *K47 = Builder.getInt64(47); llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); return Builder.CreateMul(B1, KMul); } bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; } bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); return (RD && RD->hasDefinition() && RD->isDynamicClass()) && (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); } bool CodeGenFunction::sanitizePerformTypeCheck() const { return SanOpts.has(SanitizerKind::Null) | SanOpts.has(SanitizerKind::Alignment) | SanOpts.has(SanitizerKind::ObjectSize) | SanOpts.has(SanitizerKind::Vptr); } void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *Ptr, QualType Ty, CharUnits Alignment, SanitizerSet SkippedChecks, llvm::Value *ArraySize) { if (!sanitizePerformTypeCheck()) return; // Don't check pointers outside the default address space. The null check // isn't correct, the object-size check isn't supported by LLVM, and we can't // communicate the addresses to the runtime handler for the vptr check. if (Ptr->getType()->getPointerAddressSpace()) return; // Don't check pointers to volatile data. The behavior here is implementation- // defined. if (Ty.isVolatileQualified()) return; SanitizerScope SanScope(this); SmallVector, 3> Checks; llvm::BasicBlock *Done = nullptr; // Quickly determine whether we have a pointer to an alloca. It's possible // to skip null checks, and some alignment checks, for these pointers. This // can reduce compile-time significantly. auto PtrToAlloca = dyn_cast(Ptr->stripPointerCasts()); llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext()); llvm::Value *IsNonNull = nullptr; bool IsGuaranteedNonNull = SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca; bool AllowNullPointers = isNullPointerAllowed(TCK); if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && !IsGuaranteedNonNull) { // The glvalue must not be an empty glvalue. IsNonNull = Builder.CreateIsNotNull(Ptr); // The IR builder can constant-fold the null check if the pointer points to // a constant. IsGuaranteedNonNull = IsNonNull == True; // Skip the null check if the pointer is known to be non-null. if (!IsGuaranteedNonNull) { if (AllowNullPointers) { // When performing pointer casts, it's OK if the value is null. // Skip the remaining checks in that case. Done = createBasicBlock("null"); llvm::BasicBlock *Rest = createBasicBlock("not.null"); Builder.CreateCondBr(IsNonNull, Rest, Done); EmitBlock(Rest); } else { Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); } } } if (SanOpts.has(SanitizerKind::ObjectSize) && !SkippedChecks.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) { uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize); if (ArraySize) Size = Builder.CreateMul(Size, ArraySize); // Degenerate case: new X[0] does not need an objectsize check. llvm::Constant *ConstantSize = dyn_cast(Size); if (!ConstantSize || !ConstantSize->isNullValue()) { // The glvalue must refer to a large enough storage region. // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation // to check this. // FIXME: Get object address space llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); llvm::Value *Min = Builder.getFalse(); llvm::Value *NullIsUnknown = Builder.getFalse(); llvm::Value *Dynamic = Builder.getFalse(); llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy); llvm::Value *LargeEnough = Builder.CreateICmpUGE( Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size); Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); } } uint64_t AlignVal = 0; llvm::Value *PtrAsInt = nullptr; if (SanOpts.has(SanitizerKind::Alignment) && !SkippedChecks.has(SanitizerKind::Alignment)) { AlignVal = Alignment.getQuantity(); if (!Ty->isIncompleteType() && !AlignVal) AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr, /*ForPointeeType=*/true) .getQuantity(); // The glvalue must be suitably aligned. if (AlignVal > 1 && (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) { PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy); llvm::Value *Align = Builder.CreateAnd( PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); llvm::Value *Aligned = Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); if (Aligned != True) Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); } } if (Checks.size() > 0) { // Make sure we're not losing information. Alignment needs to be a power of // 2 assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1), llvm::ConstantInt::get(Int8Ty, TCK)}; EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData, PtrAsInt ? PtrAsInt : Ptr); } // If possible, check that the vptr indicates that there is a subobject of // type Ty at offset zero within this object. // // C++11 [basic.life]p5,6: // [For storage which does not refer to an object within its lifetime] // The program has undefined behavior if: // -- the [pointer or glvalue] is used to access a non-static data member // or call a non-static member function if (SanOpts.has(SanitizerKind::Vptr) && !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { // Ensure that the pointer is non-null before loading it. If there is no // compile-time guarantee, reuse the run-time null check or emit a new one. if (!IsGuaranteedNonNull) { if (!IsNonNull) IsNonNull = Builder.CreateIsNotNull(Ptr); if (!Done) Done = createBasicBlock("vptr.null"); llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null"); Builder.CreateCondBr(IsNonNull, VptrNotNull, Done); EmitBlock(VptrNotNull); } // Compute a hash of the mangled name of the type. // // FIXME: This is not guaranteed to be deterministic! Move to a // fingerprinting mechanism once LLVM provides one. For the time // being the implementation happens to be deterministic. SmallString<64> MangledName; llvm::raw_svector_ostream Out(MangledName); CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), Out); // Blacklist based on the mangled type. if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType( SanitizerKind::Vptr, Out.str())) { llvm::hash_code TypeHash = hash_value(Out.str()); // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign()); llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); Hash = Builder.CreateTrunc(Hash, IntPtrTy); // Look the hash up in our cache. const int CacheSize = 128; llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, "__ubsan_vptr_type_cache"); llvm::Value *Slot = Builder.CreateAnd(Hash, llvm::ConstantInt::get(IntPtrTy, CacheSize-1)); llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; llvm::Value *CacheVal = Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices), getPointerAlign()); // If the hash isn't in the cache, call a runtime handler to perform the // hard work of checking whether the vptr is for an object of the right // type. This will either fill in the cache and return, or produce a // diagnostic. llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), llvm::ConstantInt::get(Int8Ty, TCK) }; llvm::Value *DynamicData[] = { Ptr, Hash }; EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), SanitizerHandler::DynamicTypeCacheMiss, StaticData, DynamicData); } } if (Done) { Builder.CreateBr(Done); EmitBlock(Done); } } /// Determine whether this expression refers to a flexible array member in a /// struct. We disable array bounds checks for such members. static bool isFlexibleArrayMemberExpr(const Expr *E) { // For compatibility with existing code, we treat arrays of length 0 or // 1 as flexible array members. // FIXME: This is inconsistent with the warning code in SemaChecking. Unify // the two mechanisms. const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); if (const auto *CAT = dyn_cast(AT)) { // FIXME: Sema doesn't treat [1] as a flexible array member if the bound // was produced by macro expansion. if (CAT->getSize().ugt(1)) return false; } else if (!isa(AT)) return false; E = E->IgnoreParens(); // A flexible array member must be the last member in the class. if (const auto *ME = dyn_cast(E)) { // FIXME: If the base type of the member expr is not FD->getParent(), // this should not be treated as a flexible array member access. if (const auto *FD = dyn_cast(ME->getMemberDecl())) { // FIXME: Sema doesn't treat a T[1] union member as a flexible array // member, only a T[0] or T[] member gets that treatment. if (FD->getParent()->isUnion()) return true; RecordDecl::field_iterator FI( DeclContext::decl_iterator(const_cast(FD))); return ++FI == FD->getParent()->field_end(); } } else if (const auto *IRE = dyn_cast(E)) { return IRE->getDecl()->getNextIvar() == nullptr; } return false; } llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, QualType EltTy) { ASTContext &C = getContext(); uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity(); if (!EltSize) return nullptr; auto *ArrayDeclRef = dyn_cast(E->IgnoreParenImpCasts()); if (!ArrayDeclRef) return nullptr; auto *ParamDecl = dyn_cast(ArrayDeclRef->getDecl()); if (!ParamDecl) return nullptr; auto *POSAttr = ParamDecl->getAttr(); if (!POSAttr) return nullptr; // Don't load the size if it's a lower bound. int POSType = POSAttr->getType(); if (POSType != 0 && POSType != 1) return nullptr; // Find the implicit size parameter. auto PassedSizeIt = SizeArguments.find(ParamDecl); if (PassedSizeIt == SizeArguments.end()) return nullptr; const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable"); Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second; llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false, C.getSizeType(), E->getExprLoc()); llvm::Value *SizeOfElement = llvm::ConstantInt::get(SizeInBytes->getType(), EltSize); return Builder.CreateUDiv(SizeInBytes, SizeOfElement); } /// If Base is known to point to the start of an array, return the length of /// that array. Return 0 if the length cannot be determined. static llvm::Value *getArrayIndexingBound( CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { // For the vector indexing extension, the bound is the number of elements. if (const VectorType *VT = Base->getType()->getAs()) { IndexedType = Base->getType(); return CGF.Builder.getInt32(VT->getNumElements()); } Base = Base->IgnoreParens(); if (const auto *CE = dyn_cast(Base)) { if (CE->getCastKind() == CK_ArrayToPointerDecay && !isFlexibleArrayMemberExpr(CE->getSubExpr())) { IndexedType = CE->getSubExpr()->getType(); const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); if (const auto *CAT = dyn_cast(AT)) return CGF.Builder.getInt(CAT->getSize()); else if (const auto *VAT = dyn_cast(AT)) return CGF.getVLASize(VAT).NumElts; // Ignore pass_object_size here. It's not applicable on decayed pointers. } } QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) { IndexedType = Base->getType(); return POS; } return nullptr; } void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed) { assert(SanOpts.has(SanitizerKind::ArrayBounds) && "should not be called unless adding bounds checks"); SanitizerScope SanScope(this); QualType IndexedType; llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); if (!Bound) return; bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(E->getExprLoc()), EmitCheckTypeDescriptor(IndexedType), EmitCheckTypeDescriptor(IndexType) }; llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) : Builder.CreateICmpULE(IndexVal, BoundVal); EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), SanitizerHandler::OutOfBounds, StaticData, Index); } CodeGenFunction::ComplexPairTy CodeGenFunction:: EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); llvm::Value *NextVal; if (isa(InVal.first->getType())) { uint64_t AmountVal = isInc ? 1 : -1; NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); // Add the inc/dec to the real part. NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); } else { QualType ElemTy = E->getType()->castAs()->getElementType(); llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); if (!isInc) FVal.changeSign(); NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); // Add the inc/dec to the real part. NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); } ComplexPairTy IncVal(NextVal, InVal.second); // Store the updated result through the lvalue. EmitStoreOfComplex(IncVal, LV, /*init*/ false); if (getLangOpts().OpenMP) CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, E->getSubExpr()); // If this is a postinc, return the value read from memory, otherwise use the // updated value. return isPre ? IncVal : InVal; } void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF) { // Bind VLAs in the cast type. if (CGF && E->getType()->isVariablyModifiedType()) CGF->EmitVariablyModifiedType(E->getType()); if (CGDebugInfo *DI = getModuleDebugInfo()) DI->EmitExplicitCastType(E->getType()); } //===----------------------------------------------------------------------===// // LValue Expression Emission //===----------------------------------------------------------------------===// /// EmitPointerWithAlignment - Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { // We allow this with ObjC object pointers because of fragile ABIs. assert(E->getType()->isPointerType() || E->getType()->isObjCObjectPointerType()); E = E->IgnoreParens(); // Casts: if (const CastExpr *CE = dyn_cast(E)) { if (const auto *ECE = dyn_cast(CE)) CGM.EmitExplicitCastExprType(ECE, this); switch (CE->getCastKind()) { // Non-converting casts (but not C's implicit conversion from void*). case CK_BitCast: case CK_NoOp: case CK_AddressSpaceConversion: if (auto PtrTy = CE->getSubExpr()->getType()->getAs()) { if (PtrTy->getPointeeType()->isVoidType()) break; LValueBaseInfo InnerBaseInfo; TBAAAccessInfo InnerTBAAInfo; Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo); if (BaseInfo) *BaseInfo = InnerBaseInfo; if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; if (isa(CE)) { LValueBaseInfo TargetTypeBaseInfo; TBAAAccessInfo TargetTypeTBAAInfo; CharUnits Align = CGM.getNaturalPointeeTypeAlignment( E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo); if (TBAAInfo) *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo); // If the source l-value is opaque, honor the alignment of the // casted-to type. if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { if (BaseInfo) BaseInfo->mergeForCast(TargetTypeBaseInfo); Addr = Address(Addr.getPointer(), Align); } } if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) && CE->getCastKind() == CK_BitCast) { if (auto PT = E->getType()->getAs()) EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(), /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast, CE->getBeginLoc()); } return CE->getCastKind() != CK_AddressSpaceConversion ? Builder.CreateBitCast(Addr, ConvertType(E->getType())) : Builder.CreateAddrSpaceCast(Addr, ConvertType(E->getType())); } break; // Array-to-pointer decay. case CK_ArrayToPointerDecay: return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo); // Derived-to-base conversions. case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(E->getType()); Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo); auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); return GetAddressOfBaseClass(Addr, Derived, CE->path_begin(), CE->path_end(), ShouldNullCheckClassCastValue(CE), CE->getExprLoc()); } // TODO: Is there any reason to treat base-to-derived conversions // specially? default: break; } } // Unary &. if (const UnaryOperator *UO = dyn_cast(E)) { if (UO->getOpcode() == UO_AddrOf) { LValue LV = EmitLValue(UO->getSubExpr()); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); return LV.getAddress(*this); } } // TODO: conditional operators, comma. // Otherwise, use the alignment of the type. CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo); return Address(EmitScalarExpr(E), Align); } +llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { + llvm::Value *V = RV.getScalarVal(); + if (auto MPT = T->getAs()) + return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT); + return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); +} + RValue CodeGenFunction::GetUndefRValue(QualType Ty) { if (Ty->isVoidType()) return RValue::get(nullptr); switch (getEvaluationKind(Ty)) { case TEK_Complex: { llvm::Type *EltTy = ConvertType(Ty->castAs()->getElementType()); llvm::Value *U = llvm::UndefValue::get(EltTy); return RValue::getComplex(std::make_pair(U, U)); } // If this is a use of an undefined aggregate type, the aggregate must have an // identifiable address. Just because the contents of the value are undefined // doesn't mean that the address can't be taken and compared. case TEK_Aggregate: { Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); return RValue::getAggregate(DestPtr); } case TEK_Scalar: return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); } llvm_unreachable("bad evaluation kind"); } RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, const char *Name) { ErrorUnsupported(E, Name); return GetUndefRValue(E->getType()); } LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, const char *Name) { ErrorUnsupported(E, Name); llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()), E->getType()); } bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { const Expr *Base = Obj; while (!isa(Base)) { // The result of a dynamic_cast can be null. if (isa(Base)) return false; if (const auto *CE = dyn_cast(Base)) { Base = CE->getSubExpr(); } else if (const auto *PE = dyn_cast(Base)) { Base = PE->getSubExpr(); } else if (const auto *UO = dyn_cast(Base)) { if (UO->getOpcode() == UO_Extension) Base = UO->getSubExpr(); else return false; } else { return false; } } return true; } LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { LValue LV; if (SanOpts.has(SanitizerKind::ArrayBounds) && isa(E)) LV = EmitArraySubscriptExpr(cast(E), /*Accessed*/true); else LV = EmitLValue(E); if (!isa(E) && !LV.isBitField() && LV.isSimple()) { SanitizerSet SkippedChecks; if (const auto *ME = dyn_cast(E)) { bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); if (IsBaseCXXThis) SkippedChecks.set(SanitizerKind::Alignment, true); if (IsBaseCXXThis || isa(ME->getBase())) SkippedChecks.set(SanitizerKind::Null, true); } EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(), LV.getAlignment(), SkippedChecks); } return LV; } /// EmitLValue - Emit code to compute a designator that specifies the location /// of the expression. /// /// This can return one of two things: a simple address or a bitfield reference. /// In either case, the LLVM Value* in the LValue structure is guaranteed to be /// an LLVM pointer type. /// /// If this returns a bitfield reference, nothing about the pointee type of the /// LLVM value is known: For example, it may not be a pointer to an integer. /// /// If this returns a normal address, and if the lvalue's C type is fixed size, /// this method guarantees that the returned pointer type will point to an LLVM /// type of the same size of the lvalue's type. If the lvalue has a variable /// length type, this is not possible. /// LValue CodeGenFunction::EmitLValue(const Expr *E) { ApplyDebugLocation DL(*this, E); switch (E->getStmtClass()) { default: return EmitUnsupportedLValue(E, "l-value expression"); case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); case Expr::ObjCSelectorExprClass: return EmitObjCSelectorLValue(cast(E)); case Expr::ObjCIsaExprClass: return EmitObjCIsaExpr(cast(E)); case Expr::BinaryOperatorClass: return EmitBinaryOperatorLValue(cast(E)); case Expr::CompoundAssignOperatorClass: { QualType Ty = E->getType(); if (const AtomicType *AT = Ty->getAs()) Ty = AT->getValueType(); if (!Ty->isAnyComplexType()) return EmitCompoundAssignmentLValue(cast(E)); return EmitComplexCompoundAssignmentLValue(cast(E)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: case Expr::UserDefinedLiteralClass: return EmitCallExprLValue(cast(E)); case Expr::CXXRewrittenBinaryOperatorClass: return EmitLValue(cast(E)->getSemanticForm()); case Expr::VAArgExprClass: return EmitVAArgExprLValue(cast(E)); case Expr::DeclRefExprClass: return EmitDeclRefLValue(cast(E)); case Expr::ConstantExprClass: { const ConstantExpr *CE = cast(E); if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { QualType RetType = cast(CE->getSubExpr()->IgnoreImplicit()) ->getCallReturnType(getContext()); return MakeNaturalAlignAddrLValue(Result, RetType); } return EmitLValue(cast(E)->getSubExpr()); } case Expr::ParenExprClass: return EmitLValue(cast(E)->getSubExpr()); case Expr::GenericSelectionExprClass: return EmitLValue(cast(E)->getResultExpr()); case Expr::PredefinedExprClass: return EmitPredefinedLValue(cast(E)); case Expr::StringLiteralClass: return EmitStringLiteralLValue(cast(E)); case Expr::ObjCEncodeExprClass: return EmitObjCEncodeExprLValue(cast(E)); case Expr::PseudoObjectExprClass: return EmitPseudoObjectLValue(cast(E)); case Expr::InitListExprClass: return EmitInitListLValue(cast(E)); case Expr::CXXTemporaryObjectExprClass: case Expr::CXXConstructExprClass: return EmitCXXConstructLValue(cast(E)); case Expr::CXXBindTemporaryExprClass: return EmitCXXBindTemporaryLValue(cast(E)); case Expr::CXXUuidofExprClass: return EmitCXXUuidofLValue(cast(E)); case Expr::LambdaExprClass: return EmitAggExprToLValue(E); case Expr::ExprWithCleanupsClass: { const auto *cleanups = cast(E); RunCleanupsScope Scope(*this); LValue LV = EmitLValue(cleanups->getSubExpr()); if (LV.isSimple()) { // Defend against branches out of gnu statement expressions surrounded by // cleanups. llvm::Value *V = LV.getPointer(*this); Scope.ForceCleanup({&V}); return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(), getContext(), LV.getBaseInfo(), LV.getTBAAInfo()); } // FIXME: Is it possible to create an ExprWithCleanups that produces a // bitfield lvalue or some other non-simple lvalue? return LV; } case Expr::CXXDefaultArgExprClass: { auto *DAE = cast(E); CXXDefaultArgExprScope Scope(*this, DAE); return EmitLValue(DAE->getExpr()); } case Expr::CXXDefaultInitExprClass: { auto *DIE = cast(E); CXXDefaultInitExprScope Scope(*this, DIE); return EmitLValue(DIE->getExpr()); } case Expr::CXXTypeidExprClass: return EmitCXXTypeidLValue(cast(E)); case Expr::ObjCMessageExprClass: return EmitObjCMessageExprLValue(cast(E)); case Expr::ObjCIvarRefExprClass: return EmitObjCIvarRefLValue(cast(E)); case Expr::StmtExprClass: return EmitStmtExprLValue(cast(E)); case Expr::UnaryOperatorClass: return EmitUnaryOpLValue(cast(E)); case Expr::ArraySubscriptExprClass: return EmitArraySubscriptExpr(cast(E)); case Expr::MatrixSubscriptExprClass: return EmitMatrixSubscriptExpr(cast(E)); case Expr::OMPArraySectionExprClass: return EmitOMPArraySectionExpr(cast(E)); case Expr::ExtVectorElementExprClass: return EmitExtVectorElementExpr(cast(E)); case Expr::MemberExprClass: return EmitMemberExpr(cast(E)); case Expr::CompoundLiteralExprClass: return EmitCompoundLiteralLValue(cast(E)); case Expr::ConditionalOperatorClass: return EmitConditionalOperatorLValue(cast(E)); case Expr::BinaryConditionalOperatorClass: return EmitConditionalOperatorLValue(cast(E)); case Expr::ChooseExprClass: return EmitLValue(cast(E)->getChosenSubExpr()); case Expr::OpaqueValueExprClass: return EmitOpaqueValueLValue(cast(E)); case Expr::SubstNonTypeTemplateParmExprClass: return EmitLValue(cast(E)->getReplacement()); case Expr::ImplicitCastExprClass: case Expr::CStyleCastExprClass: case Expr::CXXFunctionalCastExprClass: case Expr::CXXStaticCastExprClass: case Expr::CXXDynamicCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: case Expr::CXXAddrspaceCastExprClass: case Expr::ObjCBridgedCastExprClass: return EmitCastLValue(cast(E)); case Expr::MaterializeTemporaryExprClass: return EmitMaterializeTemporaryExpr(cast(E)); case Expr::CoawaitExprClass: return EmitCoawaitLValue(cast(E)); case Expr::CoyieldExprClass: return EmitCoyieldLValue(cast(E)); } } /// Given an object of the given canonical type, can we safely copy a /// value out of it based on its initializer? static bool isConstantEmittableObjectType(QualType type) { assert(type.isCanonical()); assert(!type->isReferenceType()); // Must be const-qualified but non-volatile. Qualifiers qs = type.getLocalQualifiers(); if (!qs.hasConst() || qs.hasVolatile()) return false; // Otherwise, all object types satisfy this except C++ classes with // mutable subobjects or non-trivial copy/destroy behavior. if (const auto *RT = dyn_cast(type)) if (const auto *RD = dyn_cast(RT->getDecl())) if (RD->hasMutableFields() || !RD->isTrivial()) return false; return true; } /// Can we constant-emit a load of a reference to a variable of the /// given type? This is different from predicates like /// Decl::mightBeUsableInConstantExpressions because we do want it to apply /// in situations that don't necessarily satisfy the language's rules /// for this (e.g. C++'s ODR-use rules). For example, we want to able /// to do this with const float variables even if those variables /// aren't marked 'constexpr'. enum ConstantEmissionKind { CEK_None, CEK_AsReferenceOnly, CEK_AsValueOrReference, CEK_AsValueOnly }; static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { type = type.getCanonicalType(); if (const auto *ref = dyn_cast(type)) { if (isConstantEmittableObjectType(ref->getPointeeType())) return CEK_AsValueOrReference; return CEK_AsReferenceOnly; } if (isConstantEmittableObjectType(type)) return CEK_AsValueOnly; return CEK_None; } /// Try to emit a reference to the given value without producing it as /// an l-value. This is just an optimization, but it avoids us needing /// to emit global copies of variables if they're named without triggering /// a formal use in a context where we can't emit a direct reference to them, /// for instance if a block or lambda or a member of a local class uses a /// const int variable or constexpr variable from an enclosing function. CodeGenFunction::ConstantEmission CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { ValueDecl *value = refExpr->getDecl(); // The value needs to be an enum constant or a constant variable. ConstantEmissionKind CEK; if (isa(value)) { CEK = CEK_None; } else if (auto *var = dyn_cast(value)) { CEK = checkVarTypeForConstantEmission(var->getType()); } else if (isa(value)) { CEK = CEK_AsValueOnly; } else { CEK = CEK_None; } if (CEK == CEK_None) return ConstantEmission(); Expr::EvalResult result; bool resultIsReference; QualType resultType; // It's best to evaluate all the way as an r-value if that's permitted. if (CEK != CEK_AsReferenceOnly && refExpr->EvaluateAsRValue(result, getContext())) { resultIsReference = false; resultType = refExpr->getType(); // Otherwise, try to evaluate as an l-value. } else if (CEK != CEK_AsValueOnly && refExpr->EvaluateAsLValue(result, getContext())) { resultIsReference = true; resultType = value->getType(); // Failure. } else { return ConstantEmission(); } // In any case, if the initializer has side-effects, abandon ship. if (result.HasSideEffects) return ConstantEmission(); // Emit as a constant. auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), result.Val, resultType); // Make sure we emit a debug reference to the global variable. // This should probably fire even for if (isa(value)) { if (!getContext().DeclMustBeEmitted(cast(value))) EmitDeclRefExprDbgValue(refExpr, result.Val); } else { assert(isa(value)); EmitDeclRefExprDbgValue(refExpr, result.Val); } // If we emitted a reference constant, we need to dereference that. if (resultIsReference) return ConstantEmission::forReference(C); return ConstantEmission::forValue(C); } static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME) { if (auto *VD = dyn_cast(ME->getMemberDecl())) { // Try to emit static variable member expressions as DREs. return DeclRefExpr::Create( CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); } return nullptr; } CodeGenFunction::ConstantEmission CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME)) return tryEmitAsConstant(DRE); return ConstantEmission(); } llvm::Value *CodeGenFunction::emitScalarConstant( const CodeGenFunction::ConstantEmission &Constant, Expr *E) { assert(Constant && "not a constant"); if (Constant.isReference()) return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E), E->getExprLoc()) .getScalarVal(); return Constant.getValue(); } llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, SourceLocation Loc) { return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(), lvalue.getType(), Loc, lvalue.getBaseInfo(), lvalue.getTBAAInfo(), lvalue.isNontemporal()); } static bool hasBooleanRepresentation(QualType Ty) { if (Ty->isBooleanType()) return true; if (const EnumType *ET = Ty->getAs()) return ET->getDecl()->getIntegerType()->isBooleanType(); if (const AtomicType *AT = Ty->getAs()) return hasBooleanRepresentation(AT->getValueType()); return false; } static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool) { const EnumType *ET = Ty->getAs(); bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && ET && !ET->getDecl()->isFixed(); if (!IsBool && !IsRegularCPlusPlusEnum) return false; if (IsBool) { Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); } else { const EnumDecl *ED = ET->getDecl(); llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); unsigned Bitwidth = LTy->getScalarSizeInBits(); unsigned NumNegativeBits = ED->getNumNegativeBits(); unsigned NumPositiveBits = ED->getNumPositiveBits(); if (NumNegativeBits) { unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); assert(NumBits <= Bitwidth); End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); Min = -End; } else { assert(NumPositiveBits <= Bitwidth); End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; Min = llvm::APInt(Bitwidth, 0); } } return true; } llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { llvm::APInt Min, End; if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums, hasBooleanRepresentation(Ty))) return nullptr; llvm::MDBuilder MDHelper(getLLVMContext()); return MDHelper.createRange(Min, End); } bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc) { bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool); bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum); if (!HasBoolCheck && !HasEnumCheck) return false; bool IsBool = hasBooleanRepresentation(Ty) || NSAPI(CGM.getContext()).isObjCBOOLType(Ty); bool NeedsBoolCheck = HasBoolCheck && IsBool; bool NeedsEnumCheck = HasEnumCheck && Ty->getAs(); if (!NeedsBoolCheck && !NeedsEnumCheck) return false; // Single-bit booleans don't need to be checked. Special-case this to avoid // a bit width mismatch when handling bitfield values. This is handled by // EmitFromMemory for the non-bitfield case. if (IsBool && cast(Value->getType())->getBitWidth() == 1) return false; llvm::APInt Min, End; if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) return true; auto &Ctx = getLLVMContext(); SanitizerScope SanScope(this); llvm::Value *Check; --End; if (!Min) { Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End)); } else { llvm::Value *Upper = Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End)); llvm::Value *Lower = Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min)); Check = Builder.CreateAnd(Upper, Lower); } llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty)}; SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue, StaticArgs, EmitCheckValue(Value)); return true; } llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isNontemporal) { if (!CGM.getCodeGenOpts().PreserveVec3Type) { // For better performance, handle vector loads differently. if (Ty->isVectorType()) { const llvm::Type *EltTy = Addr.getElementType(); const auto *VTy = cast(EltTy); // Handle vectors of size 3 like size 4 for better performance. if (VTy->getNumElements() == 3) { // Bitcast to vec4 type. auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4); Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4"); // Now load value. llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4"); // Shuffle vector to get vec3. V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty), ArrayRef{0, 1, 2}, "extractVec"); return EmitFromMemory(V, Ty); } } } // Atomic operations have to be done on integral types. LValue AtomicLValue = LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal(); } llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile); if (isNontemporal) { llvm::MDNode *Node = llvm::MDNode::get( Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); } CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); if (EmitScalarRangeCheck(Load, Ty, Loc)) { // In order to prevent the optimizer from throwing away the check, don't // attach range metadata to the load. } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); return EmitFromMemory(Load, Ty); } llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { // Bool has a different representation in memory than in registers. if (hasBooleanRepresentation(Ty)) { // This should really always be an i1, but sometimes it's already // an i8, and it's awkward to track those cases down. if (Value->getType()->isIntegerTy(1)) return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && "wrong value rep of bool"); } return Value; } llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { // Bool has a different representation in memory than in registers. if (hasBooleanRepresentation(Ty)) { assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && "wrong value rep of bool"); return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); } return Value; } // Convert the pointer of \p Addr to a pointer to a vector (the value type of // MatrixType), if it points to a array (the memory type of MatrixType). static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF, bool IsVector = true) { auto *ArrayTy = dyn_cast( cast(Addr.getPointer()->getType())->getElementType()); if (ArrayTy && IsVector) { auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), ArrayTy->getNumElements()); return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy)); } auto *VectorTy = dyn_cast( cast(Addr.getPointer()->getType())->getElementType()); if (VectorTy && !IsVector) { auto *ArrayTy = llvm::ArrayType::get( VectorTy->getElementType(), cast(VectorTy)->getNumElements()); return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy)); } return Addr; } // Emit a store of a matrix LValue. This may require casting the original // pointer to memory address (ArrayType) to a pointer to the value type // (VectorType). static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF) { Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF, value->getType()->isVectorTy()); CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); } void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isInit, bool isNontemporal) { if (!CGM.getCodeGenOpts().PreserveVec3Type) { // Handle vectors differently to get better performance. if (Ty->isVectorType()) { llvm::Type *SrcTy = Value->getType(); auto *VecTy = dyn_cast(SrcTy); // Handle vec3 special. if (VecTy && cast(VecTy)->getNumElements() == 3) { // Our source is a vec3, do a shuffle vector to make it a vec4. Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy), ArrayRef{0, 1, 2, -1}, "extractVec"); SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4); } if (Addr.getElementType() != SrcTy) { Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp"); } } } Value = EmitToMemory(Value, Ty); LValue AtomicLValue = LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); if (Ty->isAtomicType() || (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) { EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit); return; } llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); if (isNontemporal) { llvm::MDNode *Node = llvm::MDNode::get(Store->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); } CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); } void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit) { if (lvalue.getType()->isConstantMatrixType()) { EmitStoreOfMatrixScalar(value, lvalue, isInit, *this); return; } EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(), lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); } // Emit a load of a LValue of matrix type. This may require casting the pointer // to memory address (ArrayType) to a pointer to the value type (VectorType). static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF) { assert(LV.getType()->isConstantMatrixType()); Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF); LV.setAddress(Addr); return RValue::get(CGF.EmitLoadOfScalar(LV, Loc)); } /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { if (LV.isObjCWeak()) { // load of a __weak object. Address AddrWeakObj = LV.getAddress(*this); return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, AddrWeakObj)); } if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { // In MRC mode, we do a load+autorelease. if (!getLangOpts().ObjCAutoRefCount) { return RValue::get(EmitARCLoadWeak(LV.getAddress(*this))); } // In ARC mode, we load retained and then consume the value. llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this)); Object = EmitObjCConsumeObject(LV.getType(), Object); return RValue::get(Object); } if (LV.isSimple()) { assert(!LV.getType()->isFunctionType()); if (LV.getType()->isConstantMatrixType()) return EmitLoadOfMatrixLValue(LV, Loc, *this); // Everything needs a load. return RValue::get(EmitLoadOfScalar(LV, Loc)); } if (LV.isVectorElt()) { llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(), LV.isVolatileQualified()); return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), "vecext")); } // If this is a reference to a subset of the elements of a vector, either // shuffle the input or extract/insert them as appropriate. if (LV.isExtVectorElt()) { return EmitLoadOfExtVectorElementLValue(LV); } // Global Register variables always invoke intrinsics if (LV.isGlobalReg()) return EmitLoadOfGlobalRegLValue(LV); if (LV.isMatrixElt()) { llvm::LoadInst *Load = Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified()); return RValue::get( Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext")); } assert(LV.isBitField() && "Unknown LValue type!"); return EmitLoadOfBitfieldLValue(LV, Loc); } RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc) { const CGBitFieldInfo &Info = LV.getBitFieldInfo(); // Get the output type. llvm::Type *ResLTy = ConvertType(LV.getType()); Address Ptr = LV.getBitFieldAddress(); llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load"); if (Info.IsSigned) { assert(static_cast(Info.Offset + Info.Size) <= Info.StorageSize); unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size; if (HighBits) Val = Builder.CreateShl(Val, HighBits, "bf.shl"); if (Info.Offset + HighBits) Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr"); } else { if (Info.Offset) Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr"); if (static_cast(Info.Offset) + Info.Size < Info.StorageSize) Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize, Info.Size), "bf.clear"); } Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); EmitScalarRangeCheck(Val, LV.getType(), Loc); return RValue::get(Val); } // If this is a reference to a subset of the elements of a vector, create an // appropriate shufflevector. RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(), LV.isVolatileQualified()); const llvm::Constant *Elts = LV.getExtVectorElts(); // If the result of the expression is a non-vector type, we must be extracting // a single element. Just codegen as an extractelement. const VectorType *ExprVT = LV.getType()->getAs(); if (!ExprVT) { unsigned InIdx = getAccessedFieldNo(0, Elts); llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); return RValue::get(Builder.CreateExtractElement(Vec, Elt)); } // Always use shuffle vector to try to retain the original program structure unsigned NumResultElts = ExprVT->getNumElements(); SmallVector Mask; for (unsigned i = 0; i != NumResultElts; ++i) Mask.push_back(getAccessedFieldNo(i, Elts)); Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), Mask); return RValue::get(Vec); } /// Generates lvalue for partial ext_vector access. Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { Address VectorAddress = LV.getExtVectorAddress(); QualType EQT = LV.getType()->castAs()->getElementType(); llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); Address CastToPointerElement = Builder.CreateElementBitCast(VectorAddress, VectorElementTy, "conv.ptr.element"); const llvm::Constant *Elts = LV.getExtVectorElts(); unsigned ix = getAccessedFieldNo(0, Elts); Address VectorBasePtrPlusIx = Builder.CreateConstInBoundsGEP(CastToPointerElement, ix, "vector.elt"); return VectorBasePtrPlusIx; } /// Load of global gamed gegisters are always calls to intrinsics. RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && "Bad type for register variable"); llvm::MDNode *RegName = cast( cast(LV.getGlobalReg())->getMetadata()); // We accept integer and pointer types only llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); llvm::Type *Ty = OrigTy; if (OrigTy->isPointerTy()) Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); llvm::Type *Types[] = { Ty }; llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); llvm::Value *Call = Builder.CreateCall( F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); if (OrigTy->isPointerTy()) Call = Builder.CreateIntToPtr(Call, OrigTy); return RValue::get(Call); } /// EmitStoreThroughLValue - Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { if (!Dst.isSimple()) { if (Dst.isVectorElt()) { // Read/modify/write the vector, inserting the new element. llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(), Dst.isVolatileQualified()); Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), Dst.getVectorIdx(), "vecins"); Builder.CreateStore(Vec, Dst.getVectorAddress(), Dst.isVolatileQualified()); return; } // If this is an update of extended vector elements, insert them as // appropriate. if (Dst.isExtVectorElt()) return EmitStoreThroughExtVectorComponentLValue(Src, Dst); if (Dst.isGlobalReg()) return EmitStoreThroughGlobalRegLValue(Src, Dst); if (Dst.isMatrixElt()) { llvm::Value *Vec = Builder.CreateLoad(Dst.getMatrixAddress()); Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), Dst.getMatrixIdx(), "matins"); Builder.CreateStore(Vec, Dst.getMatrixAddress(), Dst.isVolatileQualified()); return; } assert(Dst.isBitField() && "Unknown LValue type"); return EmitStoreThroughBitfieldLValue(Src, Dst); } // There's special magic for assigning into an ARC-qualified l-value. if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { switch (Lifetime) { case Qualifiers::OCL_None: llvm_unreachable("present but none"); case Qualifiers::OCL_ExplicitNone: // nothing special break; case Qualifiers::OCL_Strong: if (isInit) { Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal())); break; } EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); return; case Qualifiers::OCL_Weak: if (isInit) // Initialize and then skip the primitive store. EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal()); else EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(), /*ignore*/ true); return; case Qualifiers::OCL_Autoreleasing: Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), Src.getScalarVal())); // fall into the normal path break; } } if (Dst.isObjCWeak() && !Dst.isNonGC()) { // load of a __weak object. Address LvalueDst = Dst.getAddress(*this); llvm::Value *src = Src.getScalarVal(); CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); return; } if (Dst.isObjCStrong() && !Dst.isNonGC()) { // load of a __strong object. Address LvalueDst = Dst.getAddress(*this); llvm::Value *src = Src.getScalarVal(); if (Dst.isObjCIvar()) { assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); llvm::Type *ResultType = IntPtrTy; Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); llvm::Value *RHS = dst.getPointer(); RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType, "sub.ptr.lhs.cast"); llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween); } else if (Dst.isGlobalObjCRef()) { CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, Dst.isThreadLocalRef()); } else CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); return; } assert(Src.isScalar() && "Can't emit an agg store with this method"); EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); } void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result) { const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); Address Ptr = Dst.getBitFieldAddress(); // Get the source value, truncated to the width of the bit-field. llvm::Value *SrcVal = Src.getScalarVal(); // Cast the source to the storage type and shift it into place. SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(), /*isSigned=*/false); llvm::Value *MaskedVal = SrcVal; // See if there are other bits in the bitfield's storage we'll need to load // and mask together with source before storing. if (Info.StorageSize != Info.Size) { assert(Info.StorageSize > Info.Size && "Invalid bitfield size."); llvm::Value *Val = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); // Mask the source value as needed. if (!hasBooleanRepresentation(Dst.getType())) SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(Info.StorageSize, Info.Size), "bf.value"); MaskedVal = SrcVal; if (Info.Offset) SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl"); // Mask out the original value. Val = Builder.CreateAnd(Val, ~llvm::APInt::getBitsSet(Info.StorageSize, Info.Offset, Info.Offset + Info.Size), "bf.clear"); // Or together the unchanged values and the source value. SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); } else { assert(Info.Offset == 0); // According to the AACPS: // When a volatile bit-field is written, and its container does not overlap // with any non-bit-field member, its container must be read exactly once and // written exactly once using the access width appropriate to the type of the // container. The two accesses are not atomic. if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) Builder.CreateLoad(Ptr, true, "bf.load"); } // Write the new value back out. Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified()); // Return the new value of the bit-field, if requested. if (Result) { llvm::Value *ResultVal = MaskedVal; // Sign extend the value if needed. if (Info.IsSigned) { assert(Info.Size <= Info.StorageSize); unsigned HighBits = Info.StorageSize - Info.Size; if (HighBits) { ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); } } ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, "bf.result.cast"); *Result = EmitFromMemory(ResultVal, Dst.getType()); } } void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst) { // This access turns into a read/modify/write of the vector. Load the input // value now. llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(), Dst.isVolatileQualified()); const llvm::Constant *Elts = Dst.getExtVectorElts(); llvm::Value *SrcVal = Src.getScalarVal(); if (const VectorType *VTy = Dst.getType()->getAs()) { unsigned NumSrcElts = VTy->getNumElements(); unsigned NumDstElts = cast(Vec->getType())->getNumElements(); if (NumDstElts == NumSrcElts) { // Use shuffle vector is the src and destination are the same number of // elements and restore the vector mask since it is on the side it will be // stored. SmallVector Mask(NumDstElts); for (unsigned i = 0; i != NumSrcElts; ++i) Mask[getAccessedFieldNo(i, Elts)] = i; Vec = Builder.CreateShuffleVector( SrcVal, llvm::UndefValue::get(Vec->getType()), Mask); } else if (NumDstElts > NumSrcElts) { // Extended the source vector to the same length and then shuffle it // into the destination. // FIXME: since we're shuffling with undef, can we just use the indices // into that? This could be simpler. SmallVector ExtMask; for (unsigned i = 0; i != NumSrcElts; ++i) ExtMask.push_back(i); ExtMask.resize(NumDstElts, -1); llvm::Value *ExtSrcVal = Builder.CreateShuffleVector( SrcVal, llvm::UndefValue::get(SrcVal->getType()), ExtMask); // build identity SmallVector Mask; for (unsigned i = 0; i != NumDstElts; ++i) Mask.push_back(i); // When the vector size is odd and .odd or .hi is used, the last element // of the Elts constant array will be one past the size of the vector. // Ignore the last element here, if it is greater than the mask size. if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) NumSrcElts--; // modify when what gets shuffled in for (unsigned i = 0; i != NumSrcElts; ++i) Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts; Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask); } else { // We should never shorten the vector llvm_unreachable("unexpected shorten vector length"); } } else { // If the Src is a scalar (not a vector) it must be updating one element. unsigned InIdx = getAccessedFieldNo(0, Elts); llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); } Builder.CreateStore(Vec, Dst.getExtVectorAddress(), Dst.isVolatileQualified()); } /// Store of global named registers are always calls to intrinsics. void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && "Bad type for register variable"); llvm::MDNode *RegName = cast( cast(Dst.getGlobalReg())->getMetadata()); assert(RegName && "Register LValue is not metadata"); // We accept integer and pointer types only llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); llvm::Type *Ty = OrigTy; if (OrigTy->isPointerTy()) Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); llvm::Type *Types[] = { Ty }; llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); llvm::Value *Value = Src.getScalarVal(); if (OrigTy->isPointerTy()) Value = Builder.CreatePtrToInt(Value, Ty); Builder.CreateCall( F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); } // setObjCGCLValueClass - sets class of the lvalue for the purpose of // generating write-barries API. It is currently a global, ivar, // or neither. static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false) { if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) return; if (isa(E)) { QualType ExpTy = E->getType(); if (IsMemberAccess && ExpTy->isPointerType()) { // If ivar is a structure pointer, assigning to field of // this struct follows gcc's behavior and makes it a non-ivar // writer-barrier conservatively. ExpTy = ExpTy->castAs()->getPointeeType(); if (ExpTy->isRecordType()) { LV.setObjCIvar(false); return; } } LV.setObjCIvar(true); auto *Exp = cast(const_cast(E)); LV.setBaseIvarExp(Exp->getBase()); LV.setObjCArray(E->getType()->isArrayType()); return; } if (const auto *Exp = dyn_cast(E)) { if (const auto *VD = dyn_cast(Exp->getDecl())) { if (VD->hasGlobalStorage()) { LV.setGlobalObjCRef(true); LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); } } LV.setObjCArray(E->getType()->isArrayType()); return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); if (LV.isObjCIvar()) { // If cast is to a structure pointer, follow gcc's behavior and make it // a non-ivar write-barrier. QualType ExpTy = E->getType(); if (ExpTy->isPointerType()) ExpTy = ExpTy->castAs()->getPointeeType(); if (ExpTy->isRecordType()) LV.setObjCIvar(false); } return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getBase(), LV); if (LV.isObjCIvar() && !LV.isObjCArray()) // Using array syntax to assigning to what an ivar points to is not // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; LV.setObjCIvar(false); else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) // Using array syntax to assigning to what global points to is not // same as assigning to the global itself. {id *G;} G[i] = 0; LV.setGlobalObjCRef(false); return; } if (const auto *Exp = dyn_cast(E)) { setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); // We don't know if member is an 'ivar', but this flag is looked at // only in the context of LV.isObjCIvar(). LV.setObjCArray(E->getType()->isArrayType()); return; } } static llvm::Value * EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, llvm::Value *V, llvm::Type *IRType, StringRef Name = StringRef()) { unsigned AS = cast(V->getType())->getAddressSpace(); return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); } static LValue EmitThreadPrivateVarDeclLValue( CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc) { if (CGF.CGM.getLangOpts().OpenMPIRBuilder) Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( CGF, VD, Addr, Loc); else Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc); Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy); return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); } static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T) { llvm::Optional Res = OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); // Return an invalid address if variable is MT_To and unified // memory is not enabled. For all other cases: MT_Link and // MT_To with unified memory, return a valid address. if (!Res || (*Res == OMPDeclareTargetDeclAttr::MT_To && !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) return Address::invalid(); assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && "Expected link clause OR to clause with unified memory enabled."); QualType PtrTy = CGF.getContext().getPointerType(VD->getType()); Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs()); } Address CodeGenFunction::EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo, TBAAAccessInfo *PointeeTBAAInfo) { llvm::LoadInst *Load = Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile()); CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); CharUnits Align = CGM.getNaturalTypeAlignment( RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo, /* forPointeeType= */ true); return Address(Load, Align); } LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { LValueBaseInfo PointeeBaseInfo; TBAAAccessInfo PointeeTBAAInfo; Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo, &PointeeTBAAInfo); return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo); } Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { llvm::Value *Addr = Builder.CreateLoad(Ptr); return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo, TBAAInfo, /*forPointeeType=*/true)); } LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, const PointerType *PtrTy) { LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo); return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo); } static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD) { QualType T = E->getType(); // If it's thread_local, emit a call to its wrapper function instead. if (VD->getTLSKind() == VarDecl::TLS_Dynamic && CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); // Check if the variable is marked as declare target with link clause in // device codegen. if (CGF.getLangOpts().OpenMPIsDevice) { Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); if (Addr.isValid()) return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); } llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); CharUnits Alignment = CGF.getContext().getDeclAlign(VD); Address Addr(V, Alignment); // Emit reference to the private copy of the variable if it is an OpenMP // threadprivate variable. if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && VD->hasAttr()) { return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, E->getExprLoc()); } LValue LV = VD->getType()->isReferenceType() ? CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), AlignmentSource::Decl) : CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); setObjCGCLValueClass(CGF.getContext(), E, LV); return LV; } static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, GlobalDecl GD) { const FunctionDecl *FD = cast(GD.getDecl()); if (FD->hasAttr()) { ConstantAddress aliasee = CGM.GetWeakRefReference(FD); return aliasee.getPointer(); } llvm::Constant *V = CGM.GetAddrOfFunction(GD); if (!FD->hasPrototype()) { if (const FunctionProtoType *Proto = FD->getType()->getAs()) { // Ugly case: for a K&R-style definition, the type of the definition // isn't the same as the type of a use. Correct for this with a // bitcast. QualType NoProtoType = CGM.getContext().getFunctionNoProtoType(Proto->getReturnType()); NoProtoType = CGM.getContext().getPointerType(NoProtoType); V = llvm::ConstantExpr::getBitCast(V, CGM.getTypes().ConvertType(NoProtoType)); } } return V; } static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD) { const FunctionDecl *FD = cast(GD.getDecl()); llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD); CharUnits Alignment = CGF.getContext().getDeclAlign(FD); return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl); } static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue) { QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); return CGF.EmitLValueForField(LV, FD); } /// Named Registers are named metadata pointing to the register name /// which will be read from/written to as an argument to the intrinsic /// @llvm.read/write_register. /// So far, only the name is being passed down, but other options such as /// register type, allocation type or even optimization options could be /// passed down via the metadata node. static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { SmallString<64> Name("llvm.named.register."); AsmLabelAttr *Asm = VD->getAttr(); assert(Asm->getLabel().size() < 64-Name.size() && "Register name too big"); Name.append(Asm->getLabel()); llvm::NamedMDNode *M = CGM.getModule().getOrInsertNamedMetadata(Name); if (M->getNumOperands() == 0) { llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), Asm->getLabel()); llvm::Metadata *Ops[] = {Str}; M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); } CharUnits Alignment = CGM.getContext().getDeclAlign(VD); llvm::Value *Ptr = llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)); return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType()); } /// Determine whether we can emit a reference to \p VD from the current /// context, despite not necessarily having seen an odr-use of the variable in /// this context. static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD, bool IsConstant) { // For a variable declared in an enclosing scope, do not emit a spurious // reference even if we have a capture, as that will emit an unwarranted // reference to our capture state, and will likely generate worse code than // emitting a local copy. if (E->refersToEnclosingVariableOrCapture()) return false; // For a local declaration declared in this function, we can always reference // it even if we don't have an odr-use. if (VD->hasLocalStorage()) { return VD->getDeclContext() == dyn_cast_or_null(CGF.CurCodeDecl); } // For a global declaration, we can emit a reference to it if we know // for sure that we are able to emit a definition of it. VD = VD->getDefinition(CGF.getContext()); if (!VD) return false; // Don't emit a spurious reference if it might be to a variable that only // exists on a different device / target. // FIXME: This is unnecessarily broad. Check whether this would actually be a // cross-target reference. if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || CGF.getLangOpts().OpenCL) { return false; } // We can emit a spurious reference only if the linkage implies that we'll // be emitting a non-interposable symbol that will be retained until link // time. switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) { case llvm::GlobalValue::ExternalLinkage: case llvm::GlobalValue::LinkOnceODRLinkage: case llvm::GlobalValue::WeakODRLinkage: case llvm::GlobalValue::InternalLinkage: case llvm::GlobalValue::PrivateLinkage: return true; default: return false; } } LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); QualType T = E->getType(); assert(E->isNonOdrUse() != NOUR_Unevaluated && "should not emit an unevaluated operand"); if (const auto *VD = dyn_cast(ND)) { // Global Named registers access via intrinsics only if (VD->getStorageClass() == SC_Register && VD->hasAttr() && !VD->isLocalVarDecl()) return EmitGlobalNamedRegister(VD, CGM); // If this DeclRefExpr does not constitute an odr-use of the variable, // we're not permitted to emit a reference to it in general, and it might // not be captured if capture would be necessary for a use. Emit the // constant value directly instead. if (E->isNonOdrUse() == NOUR_Constant && (VD->getType()->isReferenceType() || !canEmitSpuriousReferenceToVariable(*this, E, VD, true))) { VD->getAnyInitializer(VD); llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( E->getLocation(), *VD->evaluateValue(), VD->getType()); assert(Val && "failed to emit constant expression"); Address Addr = Address::invalid(); if (!VD->getType()->isReferenceType()) { // Spill the constant value to a global. Addr = CGM.createUnnamedGlobalFrom(*VD, Val, getContext().getDeclAlign(VD)); llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType()); auto *PTy = llvm::PointerType::get( VarTy, getContext().getTargetAddressSpace(VD->getType())); if (PTy != Addr.getType()) Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy); } else { // Should we be using the alignment of the constant pointer we emitted? CharUnits Alignment = CGM.getNaturalTypeAlignment(E->getType(), /* BaseInfo= */ nullptr, /* TBAAInfo= */ nullptr, /* forPointeeType= */ true); Addr = Address(Val, Alignment); } return MakeAddrLValue(Addr, T, AlignmentSource::Decl); } // FIXME: Handle other kinds of non-odr-use DeclRefExprs. // Check for captured variables. if (E->refersToEnclosingVariableOrCapture()) { VD = VD->getCanonicalDecl(); if (auto *FD = LambdaCaptureFields.lookup(VD)) return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); if (CapturedStmtInfo) { auto I = LocalDeclMap.find(VD); if (I != LocalDeclMap.end()) { LValue CapLVal; if (VD->getType()->isReferenceType()) CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(), AlignmentSource::Decl); else CapLVal = MakeAddrLValue(I->second, T); // Mark lvalue as nontemporal if the variable is marked as nontemporal // in simd context. if (getLangOpts().OpenMP && CGM.getOpenMPRuntime().isNontemporalDecl(VD)) CapLVal.setNontemporal(/*Value=*/true); return CapLVal; } LValue CapLVal = EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), CapturedStmtInfo->getContextValue()); CapLVal = MakeAddrLValue( Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)), CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl), CapLVal.getTBAAInfo()); // Mark lvalue as nontemporal if the variable is marked as nontemporal // in simd context. if (getLangOpts().OpenMP && CGM.getOpenMPRuntime().isNontemporalDecl(VD)) CapLVal.setNontemporal(/*Value=*/true); return CapLVal; } assert(isa(CurCodeDecl)); Address addr = GetAddrOfBlockDecl(VD); return MakeAddrLValue(addr, T, AlignmentSource::Decl); } } // FIXME: We should be able to assert this for FunctionDecls as well! // FIXME: We should be able to assert this for all DeclRefExprs, not just // those with a valid source location. assert((ND->isUsed(false) || !isa(ND) || E->isNonOdrUse() || !E->getLocation().isValid()) && "Should not use decl without marking it used!"); if (ND->hasAttr()) { const auto *VD = cast(ND); ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl); } if (const auto *VD = dyn_cast(ND)) { // Check if this is a global variable. if (VD->hasLinkage() || VD->isStaticDataMember()) return EmitGlobalVarDeclLValue(*this, E, VD); Address addr = Address::invalid(); // The variable should generally be present in the local decl map. auto iter = LocalDeclMap.find(VD); if (iter != LocalDeclMap.end()) { addr = iter->second; // Otherwise, it might be static local we haven't emitted yet for // some reason; most likely, because it's in an outer function. } else if (VD->isStaticLocal()) { addr = Address(CGM.getOrCreateStaticVarDecl( *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)), getContext().getDeclAlign(VD)); // No other cases for now. } else { llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?"); } // Check for OpenMP threadprivate variables. if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && VD->hasAttr()) { return EmitThreadPrivateVarDeclLValue( *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()), E->getExprLoc()); } // Drill into block byref variables. bool isBlockByref = VD->isEscapingByref(); if (isBlockByref) { addr = emitBlockByrefAddress(addr, VD); } // Drill into reference types. LValue LV = VD->getType()->isReferenceType() ? EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) : MakeAddrLValue(addr, T, AlignmentSource::Decl); bool isLocalStorage = VD->hasLocalStorage(); bool NonGCable = isLocalStorage && !VD->getType()->isReferenceType() && !isBlockByref; if (NonGCable) { LV.getQuals().removeObjCGCAttr(); LV.setNonGC(true); } bool isImpreciseLifetime = (isLocalStorage && !VD->hasAttr()); if (isImpreciseLifetime) LV.setARCPreciseLifetime(ARCImpreciseLifetime); setObjCGCLValueClass(getContext(), E, LV); return LV; } if (const auto *FD = dyn_cast(ND)) return EmitFunctionDeclLValue(*this, E, FD); // FIXME: While we're emitting a binding from an enclosing scope, all other // DeclRefExprs we see should be implicitly treated as if they also refer to // an enclosing scope. if (const auto *BD = dyn_cast(ND)) return EmitLValue(BD->getBinding()); // We can form DeclRefExprs naming GUID declarations when reconstituting // non-type template parameters into expressions. if (const auto *GD = dyn_cast(ND)) return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T, AlignmentSource::Decl); llvm_unreachable("Unhandled DeclRefExpr"); } LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { // __extension__ doesn't affect lvalue-ness. if (E->getOpcode() == UO_Extension) return EmitLValue(E->getSubExpr()); QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); switch (E->getOpcode()) { default: llvm_unreachable("Unknown unary operator lvalue!"); case UO_Deref: { QualType T = E->getSubExpr()->getType()->getPointeeType(); assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo, &TBAAInfo); LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); // We should not generate __weak write barrier on indirect reference // of a pointer to object; as in void foo (__weak id *param); *param = 0; // But, we continue to generate __strong write barrier on indirect write // into a pointer to object. if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC && LV.isObjCWeak()) LV.setNonGC(!E->isOBJCGCCandidate(getContext())); return LV; } case UO_Real: case UO_Imag: { LValue LV = EmitLValue(E->getSubExpr()); assert(LV.isSimple() && "real/imag on non-ordinary l-value"); // __real is valid on scalars. This is a faster way of testing that. // __imag can only produce an rvalue on scalars. if (E->getOpcode() == UO_Real && !LV.getAddress(*this).getElementType()->isStructTy()) { assert(E->getSubExpr()->getType()->isArithmeticType()); return LV; } QualType T = ExprTy->castAs()->getElementType(); Address Component = (E->getOpcode() == UO_Real ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType()) : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType())); LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, T)); ElemLV.getQuals().addQualifiers(LV.getQuals()); return ElemLV; } case UO_PreInc: case UO_PreDec: { LValue LV = EmitLValue(E->getSubExpr()); bool isInc = E->getOpcode() == UO_PreInc; if (E->getType()->isAnyComplexType()) EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); else EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); return LV; } } } LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), E->getType(), AlignmentSource::Decl); } LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), E->getType(), AlignmentSource::Decl); } LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { auto SL = E->getFunctionName(); assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); StringRef FnName = CurFn->getName(); if (FnName.startswith("\01")) FnName = FnName.substr(1); StringRef NameItems[] = { PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName}; std::string GVName = llvm::join(NameItems, NameItems + 2, "."); if (auto *BD = dyn_cast_or_null(CurCodeDecl)) { std::string Name = std::string(SL->getString()); if (!Name.empty()) { unsigned Discriminator = CGM.getCXXABI().getMangleContext().getBlockId(BD, true); if (Discriminator) Name += "_" + Twine(Discriminator + 1).str(); auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str()); return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); } else { auto C = CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str()); return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); } } auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); } /// Emit a type description suitable for use by a runtime sanitizer library. The /// format of a type descriptor is /// /// \code /// { i16 TypeKind, i16 TypeInfo } /// \endcode /// /// followed by an array of i8 containing the type name. TypeKind is 0 for an /// integer, 1 for a floating point value, and -1 for anything else. llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { // Only emit each type's descriptor once. if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) return C; uint16_t TypeKind = -1; uint16_t TypeInfo = 0; if (T->isIntegerType()) { TypeKind = 0; TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | (T->isSignedIntegerType() ? 1 : 0); } else if (T->isFloatingType()) { TypeKind = 1; TypeInfo = getContext().getTypeSize(T); } // Format the type name as if for a diagnostic, including quotes and // optionally an 'aka'. SmallString<32> Buffer; CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(), StringRef(), None, Buffer, None); llvm::Constant *Components[] = { Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) }; llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); auto *GV = new llvm::GlobalVariable( CGM.getModule(), Descriptor->getType(), /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); // Remember the descriptor for this type. CGM.setTypeDescriptorInMap(T, GV); return GV; } llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { llvm::Type *TargetTy = IntPtrTy; if (V->getType() == TargetTy) return V; // Floating-point types which fit into intptr_t are bitcast to integers // and then passed directly (after zero-extension, if necessary). if (V->getType()->isFloatingPointTy()) { unsigned Bits = V->getType()->getPrimitiveSizeInBits(); if (Bits <= TargetTy->getIntegerBitWidth()) V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), Bits)); } // Integers which fit in intptr_t are zero-extended and passed directly. if (V->getType()->isIntegerTy() && V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) return Builder.CreateZExt(V, TargetTy); // Pointers are passed directly, everything else is passed by address. if (!V->getType()->isPointerTy()) { Address Ptr = CreateDefaultAlignTempAlloca(V->getType()); Builder.CreateStore(V, Ptr); V = Ptr.getPointer(); } return Builder.CreatePtrToInt(V, TargetTy); } /// Emit a representation of a SourceLocation for passing to a handler /// in a sanitizer runtime library. The format for this data is: /// \code /// struct SourceLocation { /// const char *Filename; /// int32_t Line, Column; /// }; /// \endcode /// For an invalid SourceLocation, the Filename pointer is null. llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { llvm::Constant *Filename; int Line, Column; PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); if (PLoc.isValid()) { StringRef FilenameString = PLoc.getFilename(); int PathComponentsToStrip = CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; if (PathComponentsToStrip < 0) { assert(PathComponentsToStrip != INT_MIN); int PathComponentsToKeep = -PathComponentsToStrip; auto I = llvm::sys::path::rbegin(FilenameString); auto E = llvm::sys::path::rend(FilenameString); while (I != E && --PathComponentsToKeep) ++I; FilenameString = FilenameString.substr(I - E); } else if (PathComponentsToStrip > 0) { auto I = llvm::sys::path::begin(FilenameString); auto E = llvm::sys::path::end(FilenameString); while (I != E && PathComponentsToStrip--) ++I; if (I != E) FilenameString = FilenameString.substr(I - llvm::sys::path::begin(FilenameString)); else FilenameString = llvm::sys::path::filename(FilenameString); } auto FilenameGV = CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src"); CGM.getSanitizerMetadata()->disableSanitizerForGlobal( cast(FilenameGV.getPointer())); Filename = FilenameGV.getPointer(); Line = PLoc.getLine(); Column = PLoc.getColumn(); } else { Filename = llvm::Constant::getNullValue(Int8PtrTy); Line = Column = 0; } llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), Builder.getInt32(Column)}; return llvm::ConstantStruct::getAnon(Data); } namespace { /// Specify under what conditions this check can be recovered enum class CheckRecoverableKind { /// Always terminate program execution if this check fails. Unrecoverable, /// Check supports recovering, runtime has both fatal (noreturn) and /// non-fatal handlers for this check. Recoverable, /// Runtime conditionally aborts, always need to support recovery. AlwaysRecoverable }; } static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { assert(Kind.countPopulation() == 1); if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr) return CheckRecoverableKind::AlwaysRecoverable; else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable) return CheckRecoverableKind::Unrecoverable; else return CheckRecoverableKind::Recoverable; } namespace { struct SanitizerHandlerInfo { char const *const Name; unsigned Version; }; } const SanitizerHandlerInfo SanitizerHandlers[] = { #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, LIST_SANITIZER_CHECKS #undef SANITIZER_CHECK }; static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB) { assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); Optional DL; if (!CGF.Builder.getCurrentDebugLocation()) { // Ensure that the call has at least an artificial debug location. DL.emplace(CGF, SourceLocation()); } bool NeedsAbortSuffix = IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; const StringRef CheckName = CheckInfo.Name; std::string FnName = "__ubsan_handle_" + CheckName.str(); if (CheckInfo.Version && !MinimalRuntime) FnName += "_v" + llvm::utostr(CheckInfo.Version); if (MinimalRuntime) FnName += "_minimal"; if (NeedsAbortSuffix) FnName += "_abort"; bool MayReturn = !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; llvm::AttrBuilder B; if (!MayReturn) { B.addAttribute(llvm::Attribute::NoReturn) .addAttribute(llvm::Attribute::NoUnwind); } B.addAttribute(llvm::Attribute::UWTable); llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( FnType, FnName, llvm::AttributeList::get(CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, B), /*Local=*/true); llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); if (!MayReturn) { HandlerCall->setDoesNotReturn(); CGF.Builder.CreateUnreachable(); } else { CGF.Builder.CreateBr(ContBB); } } void CodeGenFunction::EmitCheck( ArrayRef> Checked, SanitizerHandler CheckHandler, ArrayRef StaticArgs, ArrayRef DynamicArgs) { assert(IsSanitizerScope); assert(Checked.size() > 0); assert(CheckHandler >= 0 && size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers)); const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; llvm::Value *FatalCond = nullptr; llvm::Value *RecoverableCond = nullptr; llvm::Value *TrapCond = nullptr; for (int i = 0, n = Checked.size(); i < n; ++i) { llvm::Value *Check = Checked[i].first; // -fsanitize-trap= overrides -fsanitize-recover=. llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) ? TrapCond : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) ? RecoverableCond : FatalCond; Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; } if (TrapCond) EmitTrapCheck(TrapCond); if (!FatalCond && !RecoverableCond) return; llvm::Value *JointCond; if (FatalCond && RecoverableCond) JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); else JointCond = FatalCond ? FatalCond : RecoverableCond; assert(JointCond); CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); assert(SanOpts.has(Checked[0].second)); #ifndef NDEBUG for (int i = 1, n = Checked.size(); i < n; ++i) { assert(RecoverKind == getRecoverableKind(Checked[i].second) && "All recoverable kinds in a single check must be same!"); assert(SanOpts.has(Checked[i].second)); } #endif llvm::BasicBlock *Cont = createBasicBlock("cont"); llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); // Give hint that we very much don't expect to execute the handler // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp llvm::MDBuilder MDHelper(getLLVMContext()); llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); EmitBlock(Handlers); // Handler functions take an i8* pointing to the (handler-specific) static // information block, followed by a sequence of intptr_t arguments // representing operand values. SmallVector Args; SmallVector ArgTypes; if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { Args.reserve(DynamicArgs.size() + 1); ArgTypes.reserve(DynamicArgs.size() + 1); // Emit handler arguments and create handler function type. if (!StaticArgs.empty()) { llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); auto *InfoPtr = new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, llvm::GlobalVariable::PrivateLinkage, Info); InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); ArgTypes.push_back(Int8PtrTy); } for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { Args.push_back(EmitCheckValue(DynamicArgs[i])); ArgTypes.push_back(IntPtrTy); } } llvm::FunctionType *FnType = llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); if (!FatalCond || !RecoverableCond) { // Simple case: we need to generate a single handler call, either // fatal, or non-fatal. emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, (FatalCond != nullptr), Cont); } else { // Emit two handler calls: first one for set of unrecoverable checks, // another one for recoverable. llvm::BasicBlock *NonFatalHandlerBB = createBasicBlock("non_fatal." + CheckName); llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); EmitBlock(FatalHandlerBB); emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true, NonFatalHandlerBB); EmitBlock(NonFatalHandlerBB); emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false, Cont); } EmitBlock(Cont); } void CodeGenFunction::EmitCfiSlowPathCheck( SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef StaticArgs) { llvm::BasicBlock *Cont = createBasicBlock("cfi.cont"); llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath"); llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB); llvm::MDBuilder MDHelper(getLLVMContext()); llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); BI->setMetadata(llvm::LLVMContext::MD_prof, Node); EmitBlock(CheckBB); bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind); llvm::CallInst *CheckCall; llvm::FunctionCallee SlowPathFn; if (WithDiag) { llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); auto *InfoPtr = new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, llvm::GlobalVariable::PrivateLinkage, Info); InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); SlowPathFn = CGM.getModule().getOrInsertFunction( "__cfi_slowpath_diag", llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false)); CheckCall = Builder.CreateCall( SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)}); } else { SlowPathFn = CGM.getModule().getOrInsertFunction( "__cfi_slowpath", llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false)); CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr}); } CGM.setDSOLocal( cast(SlowPathFn.getCallee()->stripPointerCasts())); CheckCall->setDoesNotThrow(); EmitBlock(Cont); } // Emit a stub for __cfi_check function so that the linker knows about this // symbol in LTO mode. void CodeGenFunction::EmitCfiCheckStub() { llvm::Module *M = &CGM.getModule(); auto &Ctx = M->getContext(); llvm::Function *F = llvm::Function::Create( llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false), llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M); CGM.setDSOLocal(F); llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F); // FIXME: consider emitting an intrinsic call like // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2) // which can be lowered in CrossDSOCFI pass to the actual contents of // __cfi_check. This would allow inlining of __cfi_check calls. llvm::CallInst::Create( llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB); llvm::ReturnInst::Create(Ctx, nullptr, BB); } // This function is basically a switch over the CFI failure kind, which is // extracted from CFICheckFailData (1st function argument). Each case is either // llvm.trap or a call to one of the two runtime handlers, based on // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid // failure kind) traps, but this should really never happen. CFICheckFailData // can be nullptr if the calling module has -fsanitize-trap behavior for this // check kind; in this case __cfi_check_fail traps as well. void CodeGenFunction::EmitCfiCheckFail() { SanitizerScope SanScope(this); FunctionArgList Args; ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, ImplicitParamDecl::Other); ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, ImplicitParamDecl::Other); Args.push_back(&ArgData); Args.push_back(&ArgAddr); const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); llvm::Function *F = llvm::Function::Create( llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false), llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule()); CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F); CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); F->setVisibility(llvm::GlobalValue::HiddenVisibility); StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args, SourceLocation()); // This function should not be affected by blacklist. This function does // not have a source location, but "src:*" would still apply. Revert any // changes to SanOpts made in StartFunction. SanOpts = CGM.getLangOpts().Sanitize; llvm::Value *Data = EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, CGM.getContext().VoidPtrTy, ArgData.getLocation()); llvm::Value *Addr = EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); // Data == nullptr means the calling module has trap behaviour for this check. llvm::Value *DataIsNotNullPtr = Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy)); EmitTrapCheck(DataIsNotNullPtr); llvm::StructType *SourceLocationTy = llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty); llvm::StructType *CfiCheckFailDataTy = llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy); llvm::Value *V = Builder.CreateConstGEP2_32( CfiCheckFailDataTy, Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0, 0); Address CheckKindAddr(V, getIntAlign()); llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); llvm::Value *AllVtables = llvm::MetadataAsValue::get( CGM.getLLVMContext(), llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); llvm::Value *ValidVtable = Builder.CreateZExt( Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), {Addr, AllVtables}), IntPtrTy); const std::pair CheckKinds[] = { {CFITCK_VCall, SanitizerKind::CFIVCall}, {CFITCK_NVCall, SanitizerKind::CFINVCall}, {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, {CFITCK_ICall, SanitizerKind::CFIICall}}; SmallVector, 5> Checks; for (auto CheckKindMaskPair : CheckKinds) { int Kind = CheckKindMaskPair.first; SanitizerMask Mask = CheckKindMaskPair.second; llvm::Value *Cond = Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind)); if (CGM.getLangOpts().Sanitize.has(Mask)) EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {}, {Data, Addr, ValidVtable}); else EmitTrapCheck(Cond); } FinishFunction(); // The only reference to this function will be created during LTO link. // Make sure it survives until then. CGM.addUsedGlobal(F); } void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { if (SanOpts.has(SanitizerKind::Unreachable)) { SanitizerScope SanScope(this); EmitCheck(std::make_pair(static_cast(Builder.getFalse()), SanitizerKind::Unreachable), SanitizerHandler::BuiltinUnreachable, EmitCheckSourceLocation(Loc), None); } Builder.CreateUnreachable(); } void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) { llvm::BasicBlock *Cont = createBasicBlock("cont"); // If we're optimizing, collapse all calls to trap down to just one per // function to save on code size. if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { TrapBB = createBasicBlock("trap"); Builder.CreateCondBr(Checked, Cont, TrapBB); EmitBlock(TrapBB); llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); TrapCall->setDoesNotReturn(); TrapCall->setDoesNotThrow(); Builder.CreateUnreachable(); } else { Builder.CreateCondBr(Checked, Cont, TrapBB); } EmitBlock(Cont); } llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID)); if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", CGM.getCodeGenOpts().TrapFuncName); TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A); } return TrapCall; } Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { assert(E->getType()->isArrayType() && "Array to pointer decay must have array source type!"); // Expressions of array type can't be bitfields or vector elements. LValue LV = EmitLValue(E); Address Addr = LV.getAddress(*this); // If the array type was an incomplete type, we need to make sure // the decay ends up being the right type. llvm::Type *NewTy = ConvertType(E->getType()); Addr = Builder.CreateElementBitCast(Addr, NewTy); // Note that VLA pointers are always decayed, so we don't need to do // anything here. if (!E->getType()->isVariableArrayType()) { assert(isa(Addr.getElementType()) && "Expected pointer to array"); Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); } // The result of this decay conversion points to an array element within the // base lvalue. However, since TBAA currently does not support representing // accesses to elements of member arrays, we conservatively represent accesses // to the pointee object as if it had no any base lvalue specified. // TODO: Support TBAA for member arrays. QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType); return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType)); } /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an /// array to pointer, return the array subexpression. static const Expr *isSimpleArrayDecayOperand(const Expr *E) { // If this isn't just an array->pointer decay, bail out. const auto *CE = dyn_cast(E); if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) return nullptr; // If this is a decay from variable width array, bail out. const Expr *SubExpr = CE->getSubExpr(); if (SubExpr->getType()->isVariableArrayType()) return nullptr; return SubExpr; } static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Value *ptr, ArrayRef indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name = "arrayidx") { if (inbounds) { return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices, CodeGenFunction::NotSubtraction, loc, name); } else { return CGF.Builder.CreateGEP(ptr, indices, name); } } static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize) { // If we have a constant index, we can use the exact offset of the // element we're accessing. if (auto constantIdx = dyn_cast(idx)) { CharUnits offset = constantIdx->getZExtValue() * eltSize; return arrayAlign.alignmentAtOffset(offset); // Otherwise, use the worst-case alignment for any element. } else { return arrayAlign.alignmentOfArrayElement(eltSize); } } static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla) { QualType eltType; do { eltType = vla->getElementType(); } while ((vla = ctx.getAsVariableArrayType(eltType))); return eltType; } /// Given an array base, check whether its member access belongs to a record /// with preserve_access_index attribute or not. static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { if (!ArrayBase || !CGF.getDebugInfo()) return false; // Only support base as either a MemberExpr or DeclRefExpr. // DeclRefExpr to cover cases like: // struct s { int a; int b[10]; }; // struct s *p; // p[1].a // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. // p->b[5] is a MemberExpr example. const Expr *E = ArrayBase->IgnoreImpCasts(); if (const auto *ME = dyn_cast(E)) return ME->getMemberDecl()->hasAttr(); if (const auto *DRE = dyn_cast(E)) { const auto *VarDef = dyn_cast(DRE->getDecl()); if (!VarDef) return false; const auto *PtrT = VarDef->getType()->getAs(); if (!PtrT) return false; const auto *PointeeT = PtrT->getPointeeType() ->getUnqualifiedDesugaredType(); if (const auto *RecT = dyn_cast(PointeeT)) return RecT->getDecl()->hasAttr(); return false; } return false; } static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, ArrayRef indices, QualType eltType, bool inbounds, bool signedIndices, SourceLocation loc, QualType *arrayType = nullptr, const Expr *Base = nullptr, const llvm::Twine &name = "arrayidx") { // All the indices except that last must be zero. #ifndef NDEBUG for (auto idx : indices.drop_back()) assert(isa(idx) && cast(idx)->isZero()); #endif // Determine the element size of the statically-sized base. This is // the thing that the indices are expressed in terms of. if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { eltType = getFixedSizeElementType(CGF.getContext(), vla); } // We can use that to compute the best alignment of the element. CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); CharUnits eltAlign = getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); llvm::Value *eltPtr; auto LastIndex = dyn_cast(indices.back()); if (!LastIndex || (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) { eltPtr = emitArraySubscriptGEP( CGF, addr.getPointer(), indices, inbounds, signedIndices, loc, name); } else { // Remember the original array subscript for bpf target unsigned idx = LastIndex->getZExtValue(); llvm::DIType *DbgInfo = nullptr; if (arrayType) DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc); eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(), addr.getPointer(), indices.size() - 1, idx, DbgInfo); } return Address(eltPtr, eltAlign); } LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed) { // The index must always be an integer, which is not an aggregate. Emit it // in lexical order (this complexity is, sadly, required by C++17). llvm::Value *IdxPre = (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; bool SignedIndices = false; auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { auto *Idx = IdxPre; if (E->getLHS() != E->getIdx()) { assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); Idx = EmitScalarExpr(E->getIdx()); } QualType IdxTy = E->getIdx()->getType(); bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); SignedIndices |= IdxSigned; if (SanOpts.has(SanitizerKind::ArrayBounds)) EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); // Extend or truncate the index type to 32 or 64-bits. if (Promote && Idx->getType() != IntPtrTy) Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); return Idx; }; IdxPre = nullptr; // If the base is a vector type, then we are forming a vector element lvalue // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa(E->getBase())) { // Emit the vector as an lvalue to get its address. LValue LHS = EmitLValue(E->getBase()); auto *Idx = EmitIdxAfterBase(/*Promote*/false); assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); return LValue::MakeVectorElt(LHS.getAddress(*this), Idx, E->getBase()->getType(), LHS.getBaseInfo(), TBAAAccessInfo()); } // All the other cases basically behave like simple offsetting. // Handle the extvector case we ignored above. if (isa(E->getBase())) { LValue LV = EmitLValue(E->getBase()); auto *Idx = EmitIdxAfterBase(/*Promote*/true); Address Addr = EmitExtVectorElementLValue(LV); QualType EltType = LV.getType()->castAs()->getElementType(); Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true, SignedIndices, E->getExprLoc()); return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, EltType)); } LValueBaseInfo EltBaseInfo; TBAAAccessInfo EltTBAAInfo; Address Addr = Address::invalid(); if (const VariableArrayType *vla = getContext().getAsVariableArrayType(E->getType())) { // The base must be a pointer, which is not an aggregate. Emit // it. It needs to be emitted first in case it's what captures // the VLA bounds. Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); auto *Idx = EmitIdxAfterBase(/*Promote*/true); // The element count here is the total number of non-VLA elements. llvm::Value *numElements = getVLASize(vla).NumElts; // Effectively, the multiply by the VLA size is part of the GEP. // GEP indexes are signed, and scaling an index isn't permitted to // signed-overflow, so we use the same semantics for our explicit // multiply. We suppress this if overflow is not undefined behavior. if (getLangOpts().isSignedOverflowDefined()) { Idx = Builder.CreateMul(Idx, numElements); } else { Idx = Builder.CreateNSWMul(Idx, numElements); } Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, E->getExprLoc()); } else if (const ObjCObjectType *OIT = E->getType()->getAs()){ // Indexing over an interface, as in "NSString *P; P[4];" // Emit the base pointer. Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); auto *Idx = EmitIdxAfterBase(/*Promote*/true); CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); llvm::Value *InterfaceSizeVal = llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity()); llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal); // We don't necessarily build correct LLVM struct types for ObjC // interfaces, so we can't rely on GEP to do this scaling // correctly, so we need to cast to i8*. FIXME: is this actually // true? A lot of other things in the fragile ABI would break... llvm::Type *OrigBaseTy = Addr.getType(); Addr = Builder.CreateElementBitCast(Addr, Int8Ty); // Do the GEP. CharUnits EltAlign = getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); llvm::Value *EltPtr = emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false, SignedIndices, E->getExprLoc()); Addr = Address(EltPtr, EltAlign); // Cast back. Addr = Builder.CreateBitCast(Addr, OrigBaseTy); } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { // If this is A[i] where A is an array, the frontend will have decayed the // base to be a ArrayToPointerDecay implicit cast. While correct, it is // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a // "gep x, i" here. Emit one "gep A, 0, i". assert(Array->getType()->isArrayType() && "Array to pointer decay must have array source type!"); LValue ArrayLV; // For simple multidimensional array indexing, set the 'accessed' flag for // better bounds-checking of the base expression. if (const auto *ASE = dyn_cast(Array)) ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); else ArrayLV = EmitLValue(Array); auto *Idx = EmitIdxAfterBase(/*Promote*/true); // Propagate the alignment from the array itself to the result. QualType arrayType = Array->getType(); Addr = emitArraySubscriptGEP( *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, E->getExprLoc(), &arrayType, E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType()); } else { // The base must be a pointer; emit it with an estimate of its alignment. Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); auto *Idx = EmitIdxAfterBase(/*Promote*/true); QualType ptrType = E->getBase()->getType(); Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, E->getExprLoc(), &ptrType, E->getBase()); } LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) { LV.setNonGC(!E->isOBJCGCCandidate(getContext())); setObjCGCLValueClass(getContext(), E, LV); } return LV; } LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { assert( !E->isIncomplete() && "incomplete matrix subscript expressions should be rejected during Sema"); LValue Base = EmitLValue(E->getBase()); llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx()); llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx()); llvm::Value *NumRows = Builder.getIntN( RowIdx->getType()->getScalarSizeInBits(), E->getBase()->getType()->getAs()->getNumRows()); llvm::Value *FinalIdx = Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx); return LValue::MakeMatrixElt( MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx, E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo()); } static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound) { LValue BaseLVal; if (auto *ASE = dyn_cast(Base->IgnoreParenImpCasts())) { BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound); if (BaseTy->isArrayType()) { Address Addr = BaseLVal.getAddress(CGF); BaseInfo = BaseLVal.getBaseInfo(); // If the array type was an incomplete type, we need to make sure // the decay ends up being the right type. llvm::Type *NewTy = CGF.ConvertType(BaseTy); Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy); // Note that VLA pointers are always decayed, so we don't need to do // anything here. if (!BaseTy->isVariableArrayType()) { assert(isa(Addr.getElementType()) && "Expected pointer to array"); Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); } return CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(ElTy)); } LValueBaseInfo TypeBaseInfo; TBAAAccessInfo TypeTBAAInfo; CharUnits Align = CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo); BaseInfo.mergeForCast(TypeBaseInfo); TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo); return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align); } return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); } LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, bool IsLowerBound) { QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase()); QualType ResultExprTy; if (auto *AT = getContext().getAsArrayType(BaseTy)) ResultExprTy = AT->getElementType(); else ResultExprTy = BaseTy->getPointeeType(); llvm::Value *Idx = nullptr; if (IsLowerBound || E->getColonLocFirst().isInvalid()) { // Requesting lower bound or upper bound, but without provided length and // without ':' symbol for the default length -> length = 1. // Idx = LowerBound ?: 0; if (auto *LowerBound = E->getLowerBound()) { Idx = Builder.CreateIntCast( EmitScalarExpr(LowerBound), IntPtrTy, LowerBound->getType()->hasSignedIntegerRepresentation()); } else Idx = llvm::ConstantInt::getNullValue(IntPtrTy); } else { // Try to emit length or lower bound as constant. If this is possible, 1 // is subtracted from constant length or lower bound. Otherwise, emit LLVM // IR (LB + Len) - 1. auto &C = CGM.getContext(); auto *Length = E->getLength(); llvm::APSInt ConstLength; if (Length) { // Idx = LowerBound + Length - 1; if (Optional CL = Length->getIntegerConstantExpr(C)) { ConstLength = CL->zextOrTrunc(PointerWidthInBits); Length = nullptr; } auto *LowerBound = E->getLowerBound(); llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); if (LowerBound) { if (Optional LB = LowerBound->getIntegerConstantExpr(C)) { ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits); LowerBound = nullptr; } } if (!Length) --ConstLength; else if (!LowerBound) --ConstLowerBound; if (Length || LowerBound) { auto *LowerBoundVal = LowerBound ? Builder.CreateIntCast( EmitScalarExpr(LowerBound), IntPtrTy, LowerBound->getType()->hasSignedIntegerRepresentation()) : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound); auto *LengthVal = Length ? Builder.CreateIntCast( EmitScalarExpr(Length), IntPtrTy, Length->getType()->hasSignedIntegerRepresentation()) : llvm::ConstantInt::get(IntPtrTy, ConstLength); Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len", /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); if (Length && LowerBound) { Idx = Builder.CreateSub( Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1", /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); } } else Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound); } else { // Idx = ArraySize - 1; QualType ArrayTy = BaseTy->isPointerType() ? E->getBase()->IgnoreParenImpCasts()->getType() : BaseTy; if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) { Length = VAT->getSizeExpr(); if (Optional L = Length->getIntegerConstantExpr(C)) { ConstLength = *L; Length = nullptr; } } else { auto *CAT = C.getAsConstantArrayType(ArrayTy); ConstLength = CAT->getSize(); } if (Length) { auto *LengthVal = Builder.CreateIntCast( EmitScalarExpr(Length), IntPtrTy, Length->getType()->hasSignedIntegerRepresentation()); Idx = Builder.CreateSub( LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1", /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); } else { ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); --ConstLength; Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength); } } } assert(Idx); Address EltPtr = Address::invalid(); LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) { // The base must be a pointer, which is not an aggregate. Emit // it. It needs to be emitted first in case it's what captures // the VLA bounds. Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy, VLA->getElementType(), IsLowerBound); // The element count here is the total number of non-VLA elements. llvm::Value *NumElements = getVLASize(VLA).NumElts; // Effectively, the multiply by the VLA size is part of the GEP. // GEP indexes are signed, and scaling an index isn't permitted to // signed-overflow, so we use the same semantics for our explicit // multiply. We suppress this if overflow is not undefined behavior. if (getLangOpts().isSignedOverflowDefined()) Idx = Builder.CreateMul(Idx, NumElements); else Idx = Builder.CreateNSWMul(Idx, NumElements); EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), !getLangOpts().isSignedOverflowDefined(), /*signedIndices=*/false, E->getExprLoc()); } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { // If this is A[i] where A is an array, the frontend will have decayed the // base to be a ArrayToPointerDecay implicit cast. While correct, it is // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a // "gep x, i" here. Emit one "gep A, 0, i". assert(Array->getType()->isArrayType() && "Array to pointer decay must have array source type!"); LValue ArrayLV; // For simple multidimensional array indexing, set the 'accessed' flag for // better bounds-checking of the base expression. if (const auto *ASE = dyn_cast(Array)) ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); else ArrayLV = EmitLValue(Array); // Propagate the alignment from the array itself to the result. EltPtr = emitArraySubscriptGEP( *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, ResultExprTy, !getLangOpts().isSignedOverflowDefined(), /*signedIndices=*/false, E->getExprLoc()); BaseInfo = ArrayLV.getBaseInfo(); TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy); } else { Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy, ResultExprTy, IsLowerBound); EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, !getLangOpts().isSignedOverflowDefined(), /*signedIndices=*/false, E->getExprLoc()); } return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo); } LValue CodeGenFunction:: EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { // Emit the base vector as an l-value. LValue Base; // ExtVectorElementExpr's base can either be a vector or pointer to vector. if (E->isArrow()) { // If it is a pointer to a vector, emit the address and form an lvalue with // it. LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo); const auto *PT = E->getBase()->getType()->castAs(); Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo); Base.getQuals().removeObjCGCAttr(); } else if (E->getBase()->isGLValue()) { // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), // emit the base as an lvalue. assert(E->getBase()->getType()->isVectorType()); Base = EmitLValue(E->getBase()); } else { // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. assert(E->getBase()->getType()->isVectorType() && "Result must be a vector"); llvm::Value *Vec = EmitScalarExpr(E->getBase()); // Store the vector to memory (because LValue wants an address). Address VecMem = CreateMemTemp(E->getBase()->getType()); Builder.CreateStore(Vec, VecMem); Base = MakeAddrLValue(VecMem, E->getBase()->getType(), AlignmentSource::Decl); } QualType type = E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); // Encode the element access list into a vector of unsigned indices. SmallVector Indices; E->getEncodedElementAccess(Indices); if (Base.isSimple()) { llvm::Constant *CV = llvm::ConstantDataVector::get(getLLVMContext(), Indices); return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type, Base.getBaseInfo(), TBAAAccessInfo()); } assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); llvm::Constant *BaseElts = Base.getExtVectorElts(); SmallVector CElts; for (unsigned i = 0, e = Indices.size(); i != e; ++i) CElts.push_back(BaseElts->getAggregateElement(Indices[i])); llvm::Constant *CV = llvm::ConstantVector::get(CElts); return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type, Base.getBaseInfo(), TBAAAccessInfo()); } LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { EmitIgnoredExpr(E->getBase()); return EmitDeclRefLValue(DRE); } Expr *BaseExpr = E->getBase(); // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. LValue BaseLV; if (E->isArrow()) { LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); QualType PtrTy = BaseExpr->getType()->getPointeeType(); SanitizerSet SkippedChecks; bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); if (IsBaseCXXThis) SkippedChecks.set(SanitizerKind::Alignment, true); if (IsBaseCXXThis || isa(BaseExpr)) SkippedChecks.set(SanitizerKind::Null, true); EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, /*Alignment=*/CharUnits::Zero(), SkippedChecks); BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); } else BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); NamedDecl *ND = E->getMemberDecl(); if (auto *Field = dyn_cast(ND)) { LValue LV = EmitLValueForField(BaseLV, Field); setObjCGCLValueClass(getContext(), E, LV); if (getLangOpts().OpenMP) { // If the member was explicitly marked as nontemporal, mark it as // nontemporal. If the base lvalue is marked as nontemporal, mark access // to children as nontemporal too. if ((IsWrappedCXXThis(BaseExpr) && CGM.getOpenMPRuntime().isNontemporalDecl(Field)) || BaseLV.isNontemporal()) LV.setNontemporal(/*Value=*/true); } return LV; } if (const auto *FD = dyn_cast(ND)) return EmitFunctionDeclLValue(*this, E, FD); llvm_unreachable("Unhandled member declaration!"); } /// Given that we are currently emitting a lambda, emit an l-value for /// one of its members. LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { assert(cast(CurCodeDecl)->getParent()->isLambda()); assert(cast(CurCodeDecl)->getParent() == Field->getParent()); QualType LambdaTagType = getContext().getTagDeclType(Field->getParent()); LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); return EmitLValueForField(LambdaLV, Field); } /// Get the field index in the debug info. The debug info structure/union /// will ignore the unnamed bitfields. unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex) { unsigned I = 0, Skipped = 0; for (auto F : Rec->getDefinition()->fields()) { if (I == FieldIndex) break; if (F->isUnnamedBitfield()) Skipped++; I++; } return FieldIndex - Skipped; } /// Get the address of a zero-sized field within a record. The resulting /// address doesn't necessarily have the right type. static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field) { CharUnits Offset = CGF.getContext().toCharUnitsFromBits( CGF.getContext().getFieldOffset(Field)); if (Offset.isZero()) return Base; Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty); return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset); } /// Drill down to the storage of a field without walking into /// reference types. /// /// The resulting address doesn't necessarily have the right type. static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field) { if (field->isZeroSize(CGF.getContext())) return emitAddrOfZeroSizeField(CGF, base, field); const RecordDecl *rec = field->getParent(); unsigned idx = CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); return CGF.Builder.CreateStructGEP(base, idx, field->getName()); } static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field) { const RecordDecl *rec = field->getParent(); llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( base.getType(), rec->getLocation()); unsigned idx = CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); return CGF.Builder.CreatePreserveStructAccessIndex( addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo); } static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); if (!RD) return false; if (RD->isDynamicClass()) return true; for (const auto &Base : RD->bases()) if (hasAnyVptr(Base.getType(), Context)) return true; for (const FieldDecl *Field : RD->fields()) if (hasAnyVptr(Field->getType(), Context)) return true; return false; } LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); if (field->isBitField()) { const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(field->getParent()); const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); Address Addr = base.getAddress(*this); unsigned Idx = RL.getLLVMFieldNo(field); const RecordDecl *rec = field->getParent(); if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) { if (Idx != 0) // For structs, we GEP to the field that the record layout suggests. Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); } else { llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( getContext().getRecordType(rec), rec->getLocation()); Addr = Builder.CreatePreserveStructAccessIndex(Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo); } // Get the access type. llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize); if (Addr.getElementType() != FieldIntTy) Addr = Builder.CreateElementBitCast(Addr, FieldIntTy); QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); // TODO: Support TBAA for bit fields. LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo, TBAAAccessInfo()); } // Fields of may-alias structures are may-alias themselves. // FIXME: this should get propagated down through anonymous structs // and unions. QualType FieldType = field->getType(); const RecordDecl *rec = field->getParent(); AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); TBAAAccessInfo FieldTBAAInfo; if (base.getTBAAInfo().isMayAlias() || rec->hasAttr() || FieldType->isVectorType()) { FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); } else if (rec->isUnion()) { // TODO: Support TBAA for unions. FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); } else { // If no base type been assigned for the base access, then try to generate // one for this base lvalue. FieldTBAAInfo = base.getTBAAInfo(); if (!FieldTBAAInfo.BaseType) { FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType()); assert(!FieldTBAAInfo.Offset && "Nonzero offset for an access with no base type!"); } // Adjust offset to be relative to the base type. const ASTRecordLayout &Layout = getContext().getASTRecordLayout(field->getParent()); unsigned CharWidth = getContext().getCharWidth(); if (FieldTBAAInfo.BaseType) FieldTBAAInfo.Offset += Layout.getFieldOffset(field->getFieldIndex()) / CharWidth; // Update the final access type and size. FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType); FieldTBAAInfo.Size = getContext().getTypeSizeInChars(FieldType).getQuantity(); } Address addr = base.getAddress(*this); if (auto *ClassDef = dyn_cast(rec)) { if (CGM.getCodeGenOpts().StrictVTablePointers && ClassDef->isDynamicClass()) { // Getting to any field of dynamic object requires stripping dynamic // information provided by invariant.group. This is because accessing // fields may leak the real address of dynamic object, which could result // in miscompilation when leaked pointer would be compared. auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer()); addr = Address(stripped, addr.getAlignment()); } } unsigned RecordCVR = base.getVRQualifiers(); if (rec->isUnion()) { // For unions, there is no pointer adjustment. if (CGM.getCodeGenOpts().StrictVTablePointers && hasAnyVptr(FieldType, getContext())) // Because unions can easily skip invariant.barriers, we need to add // a barrier every time CXXRecord field with vptr is referenced. addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()), addr.getAlignment()); if (IsInPreservedAIRegion || (getDebugInfo() && rec->hasAttr())) { // Remember the original union field index llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(), rec->getLocation()); addr = Address( Builder.CreatePreserveUnionAccessIndex( addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo), addr.getAlignment()); } if (FieldType->isReferenceType()) addr = Builder.CreateElementBitCast( addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName()); } else { if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) // For structs, we GEP to the field that the record layout suggests. addr = emitAddrOfFieldStorage(*this, addr, field); else // Remember the original struct field index addr = emitPreserveStructAccess(*this, base, addr, field); } // If this is a reference field, load the reference right now. if (FieldType->isReferenceType()) { LValue RefLVal = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); if (RecordCVR & Qualifiers::Volatile) RefLVal.getQuals().addVolatile(); addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo); // Qualifiers on the struct don't apply to the referencee. RecordCVR = 0; FieldType = FieldType->getPointeeType(); } // Make sure that the address is pointing to the right type. This is critical // for both unions and structs. A union needs a bitcast, a struct element // will need a bitcast if the LLVM type laid out doesn't match the desired // type. addr = Builder.CreateElementBitCast( addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName()); if (field->hasAttr()) addr = EmitFieldAnnotations(field, addr); LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); LV.getQuals().addCVRQualifiers(RecordCVR); // __weak attribute on a field is ignored. if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) LV.getQuals().removeObjCGCAttr(); return LV; } LValue CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field) { QualType FieldType = Field->getType(); if (!FieldType->isReferenceType()) return EmitLValueForField(Base, Field); Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field); // Make sure that the address is pointing to the right type. llvm::Type *llvmType = ConvertTypeForMem(FieldType); V = Builder.CreateElementBitCast(V, llvmType, Field->getName()); // TODO: Generate TBAA information that describes this access as a structure // member access and not just an access to an object of the field's type. This // should be similar to what we do in EmitLValueForField(). LValueBaseInfo BaseInfo = Base.getBaseInfo(); AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); return MakeAddrLValue(V, FieldType, FieldBaseInfo, CGM.getTBAAInfoForSubobject(Base, FieldType)); } LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ if (E->isFileScope()) { ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); } if (E->getType()->isVariablyModifiedType()) // make sure to emit the VLA size. EmitVariablyModifiedType(E->getType()); Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); const Expr *InitExpr = E->getInitializer(); LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), /*Init*/ true); // Block-scope compound literals are destroyed at the end of the enclosing // scope in C. if (!getLangOpts().CPlusPlus) if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr, E->getType(), getDestroyer(DtorKind), DtorKind & EHCleanup); return Result; } LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { if (!E->isGLValue()) // Initializing an aggregate temporary in C++11: T{...}. return EmitAggExprToLValue(E); // An lvalue initializer list must be initializing a reference. assert(E->isTransparent() && "non-transparent glvalue init list"); return EmitLValue(E->getInit(0)); } /// Emit the operand of a glvalue conditional operator. This is either a glvalue /// or a (possibly-parenthesized) throw-expression. If this is a throw, no /// LValue is returned and the current block has been terminated. static Optional EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand) { if (auto *ThrowExpr = dyn_cast(Operand->IgnoreParens())) { CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); return None; } return CGF.EmitLValue(Operand); } LValue CodeGenFunction:: EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { if (!expr->isGLValue()) { // ?: here should be an aggregate. assert(hasAggregateEvaluationKind(expr->getType()) && "Unexpected conditional operator!"); return EmitAggExprToLValue(expr); } OpaqueValueMapping binding(*this, expr); const Expr *condExpr = expr->getCond(); bool CondExprBool; if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); if (!CondExprBool) std::swap(live, dead); if (!ContainsLabel(dead)) { // If the true case is live, we need to track its region. if (CondExprBool) incrementProfileCounter(expr); // If a throw expression we emit it and return an undefined lvalue // because it can't be used. if (auto *ThrowExpr = dyn_cast(live->IgnoreParens())) { EmitCXXThrowExpr(ThrowExpr); llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(dead->getType())); return MakeAddrLValue( Address(llvm::UndefValue::get(Ty), CharUnits::One()), dead->getType()); } return EmitLValue(live); } } llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); ConditionalEvaluation eval(*this); EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr)); // Any temporaries created here are conditional. EmitBlock(lhsBlock); incrementProfileCounter(expr); eval.begin(*this); Optional lhs = EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); eval.end(*this); if (lhs && !lhs->isSimple()) return EmitUnsupportedLValue(expr, "conditional operator"); lhsBlock = Builder.GetInsertBlock(); if (lhs) Builder.CreateBr(contBlock); // Any temporaries created here are conditional. EmitBlock(rhsBlock); eval.begin(*this); Optional rhs = EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); eval.end(*this); if (rhs && !rhs->isSimple()) return EmitUnsupportedLValue(expr, "conditional operator"); rhsBlock = Builder.GetInsertBlock(); EmitBlock(contBlock); if (lhs && rhs) { llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue"); phi->addIncoming(lhs->getPointer(*this), lhsBlock); phi->addIncoming(rhs->getPointer(*this), rhsBlock); Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment())); AlignmentSource alignSource = std::max(lhs->getBaseInfo().getAlignmentSource(), rhs->getBaseInfo().getAlignmentSource()); TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( lhs->getTBAAInfo(), rhs->getTBAAInfo()); return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), TBAAInfo); } else { assert((lhs || rhs) && "both operands of glvalue conditional are throw-expressions?"); return lhs ? *lhs : *rhs; } } /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference /// type. If the cast is to a reference, we can have the usual lvalue result, /// otherwise if a cast is needed by the code generator in an lvalue context, /// then it must mean that we need the address of an aggregate in order to /// access one of its members. This can happen for all the reasons that casts /// are permitted with aggregate result, including noop aggregate casts, and /// cast from scalar to union. LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { switch (E->getCastKind()) { case CK_ToVoid: case CK_BitCast: case CK_LValueToRValueBitCast: case CK_ArrayToPointerDecay: case CK_FunctionToPointerDecay: case CK_NullToMemberPointer: case CK_NullToPointer: case CK_IntegralToPointer: case CK_PointerToIntegral: case CK_PointerToBoolean: case CK_VectorSplat: case CK_IntegralCast: case CK_BooleanToSignedIntegral: case CK_IntegralToBoolean: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingToBoolean: case CK_FloatingCast: case CK_FloatingRealToComplex: case CK_FloatingComplexToReal: case CK_FloatingComplexToBoolean: case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralRealToComplex: case CK_IntegralComplexToReal: case CK_IntegralComplexToBoolean: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: case CK_DerivedToBaseMemberPointer: case CK_BaseToDerivedMemberPointer: case CK_MemberPointerToBoolean: case CK_ReinterpretMemberPointer: case CK_AnyPointerToBlockPointerCast: case CK_ARCProduceObject: case CK_ARCConsumeObject: case CK_ARCReclaimReturnedObject: case CK_ARCExtendBlockObject: case CK_CopyAndAutoreleaseBlockObject: case CK_IntToOCLSampler: case CK_FixedPointCast: case CK_FixedPointToBoolean: case CK_FixedPointToIntegral: case CK_IntegralToFixedPoint: return EmitUnsupportedLValue(E, "unexpected cast lvalue"); case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); case CK_BuiltinFnToFnPtr: llvm_unreachable("builtin functions are handled elsewhere"); // These are never l-values; just use the aggregate emission code. case CK_NonAtomicToAtomic: case CK_AtomicToNonAtomic: return EmitAggExprToLValue(E); case CK_Dynamic: { LValue LV = EmitLValue(E->getSubExpr()); Address V = LV.getAddress(*this); const auto *DCE = cast(E); return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); } case CK_ConstructorConversion: case CK_UserDefinedConversion: case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_NoOp: case CK_LValueToRValue: return EmitLValue(E->getSubExpr()); case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { const auto *DerivedClassTy = E->getSubExpr()->getType()->castAs(); auto *DerivedClassDecl = cast(DerivedClassTy->getDecl()); LValue LV = EmitLValue(E->getSubExpr()); Address This = LV.getAddress(*this); // Perform the derived-to-base conversion Address Base = GetAddressOfBaseClass( This, DerivedClassDecl, E->path_begin(), E->path_end(), /*NullCheckValue=*/false, E->getExprLoc()); // TODO: Support accesses to members of base classes in TBAA. For now, we // conservatively pretend that the complete object is of the base class // type. return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, E->getType())); } case CK_ToUnion: return EmitAggExprToLValue(E); case CK_BaseToDerived: { const auto *DerivedClassTy = E->getType()->castAs(); auto *DerivedClassDecl = cast(DerivedClassTy->getDecl()); LValue LV = EmitLValue(E->getSubExpr()); // Perform the base-to-derived conversion Address Derived = GetAddressOfDerivedClass( LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(), /*NullCheckValue=*/false); // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is // performed and the object is not of the derived type. if (sanitizePerformTypeCheck()) EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived.getPointer(), E->getType()); if (SanOpts.has(SanitizerKind::CFIDerivedCast)) EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(), /*MayBeNull=*/false, CFITCK_DerivedCast, E->getBeginLoc()); return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, E->getType())); } case CK_LValueBitCast: { // This must be a reinterpret_cast (or c-style equivalent). const auto *CE = cast(E); CGM.EmitExplicitCastExprType(CE, this); LValue LV = EmitLValue(E->getSubExpr()); Address V = Builder.CreateBitCast(LV.getAddress(*this), ConvertType(CE->getTypeAsWritten())); if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) EmitVTablePtrCheckForCast(E->getType(), V.getPointer(), /*MayBeNull=*/false, CFITCK_UnrelatedCast, E->getBeginLoc()); return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, E->getType())); } case CK_AddressSpaceConversion: { LValue LV = EmitLValue(E->getSubExpr()); QualType DestTy = getContext().getPointerType(E->getType()); llvm::Value *V = getTargetHooks().performAddrSpaceCast( *this, LV.getPointer(*this), E->getSubExpr()->getType().getAddressSpace(), E->getType().getAddressSpace(), ConvertType(DestTy)); return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()), E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); } case CK_ObjCObjectLValueCast: { LValue LV = EmitLValue(E->getSubExpr()); Address V = Builder.CreateElementBitCast(LV.getAddress(*this), ConvertType(E->getType())); return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, E->getType())); } case CK_ZeroToOCLOpaqueType: llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); } llvm_unreachable("Unhandled lvalue cast kind?"); } LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { assert(OpaqueValueMappingData::shouldBindAsLValue(e)); return getOrCreateOpaqueLValueMapping(e); } LValue CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { assert(OpaqueValueMapping::shouldBindAsLValue(e)); llvm::DenseMap::iterator it = OpaqueLValues.find(e); if (it != OpaqueLValues.end()) return it->second; assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); return EmitLValue(e->getSourceExpr()); } RValue CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { assert(!OpaqueValueMapping::shouldBindAsLValue(e)); llvm::DenseMap::iterator it = OpaqueRValues.find(e); if (it != OpaqueRValues.end()) return it->second; assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); return EmitAnyExpr(e->getSourceExpr()); } RValue CodeGenFunction::EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc) { QualType FT = FD->getType(); LValue FieldLV = EmitLValueForField(LV, FD); switch (getEvaluationKind(FT)) { case TEK_Complex: return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); case TEK_Aggregate: return FieldLV.asAggregateRValue(*this); case TEK_Scalar: // This routine is used to load fields one-by-one to perform a copy, so // don't load reference fields. if (FD->getType()->isReferenceType()) return RValue::get(FieldLV.getPointer(*this)); // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a // primitive load. if (FieldLV.isBitField()) return EmitLoadOfLValue(FieldLV, Loc); return RValue::get(EmitLoadOfScalar(FieldLV, Loc)); } llvm_unreachable("bad evaluation kind"); } //===--------------------------------------------------------------------===// // Expression Emission //===--------------------------------------------------------------------===// RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue) { // Builtins never have block type. if (E->getCallee()->getType()->isBlockPointerType()) return EmitBlockCallExpr(E, ReturnValue); if (const auto *CE = dyn_cast(E)) return EmitCXXMemberCallExpr(CE, ReturnValue); if (const auto *CE = dyn_cast(E)) return EmitCUDAKernelCallExpr(CE, ReturnValue); if (const auto *CE = dyn_cast(E)) if (const CXXMethodDecl *MD = dyn_cast_or_null(CE->getCalleeDecl())) return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); CGCallee callee = EmitCallee(E->getCallee()); if (callee.isBuiltin()) { return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), E, ReturnValue); } if (callee.isPseudoDestructor()) { return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr()); } return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue); } /// Emit a CallExpr without considering whether it might be a subclass. RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue) { CGCallee Callee = EmitCallee(E->getCallee()); return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue); } static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { const FunctionDecl *FD = cast(GD.getDecl()); if (auto builtinID = FD->getBuiltinID()) { // Replaceable builtin provide their own implementation of a builtin. Unless // we are in the builtin implementation itself, don't call the actual // builtin. If we are in the builtin implementation, avoid trivial infinite // recursion. if (!FD->isInlineBuiltinDeclaration() || CGF.CurFn->getName() == FD->getName()) return CGCallee::forBuiltin(builtinID, FD); } llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, GD); return CGCallee::forDirect(calleePtr, GD); } CGCallee CodeGenFunction::EmitCallee(const Expr *E) { E = E->IgnoreParens(); // Look through function-to-pointer decay. if (auto ICE = dyn_cast(E)) { if (ICE->getCastKind() == CK_FunctionToPointerDecay || ICE->getCastKind() == CK_BuiltinFnToFnPtr) { return EmitCallee(ICE->getSubExpr()); } // Resolve direct calls. } else if (auto DRE = dyn_cast(E)) { if (auto FD = dyn_cast(DRE->getDecl())) { return EmitDirectCallee(*this, FD); } } else if (auto ME = dyn_cast(E)) { if (auto FD = dyn_cast(ME->getMemberDecl())) { EmitIgnoredExpr(ME->getBase()); return EmitDirectCallee(*this, FD); } // Look through template substitutions. } else if (auto NTTP = dyn_cast(E)) { return EmitCallee(NTTP->getReplacement()); // Treat pseudo-destructor calls differently. } else if (auto PDE = dyn_cast(E)) { return CGCallee::forPseudoDestructor(PDE); } // Otherwise, we have an indirect reference. llvm::Value *calleePtr; QualType functionType; if (auto ptrType = E->getType()->getAs()) { calleePtr = EmitScalarExpr(E); functionType = ptrType->getPointeeType(); } else { functionType = E->getType(); calleePtr = EmitLValue(E).getPointer(*this); } assert(functionType->isFunctionType()); GlobalDecl GD; if (const auto *VD = dyn_cast_or_null(E->getReferencedDeclOfCallee())) GD = GlobalDecl(VD); CGCalleeInfo calleeInfo(functionType->getAs(), GD); CGCallee callee(calleeInfo, calleePtr); return callee; } LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { EmitIgnoredExpr(E->getLHS()); EnsureInsertPoint(); return EmitLValue(E->getRHS()); } if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) return EmitPointerToDataMemberBinaryExpr(E); assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); // Note that in all of these cases, __block variables need the RHS // evaluated first just in case the variable gets moved by the RHS. switch (getEvaluationKind(E->getType())) { case TEK_Scalar: { switch (E->getLHS()->getType().getObjCLifetime()) { case Qualifiers::OCL_Strong: return EmitARCStoreStrong(E, /*ignored*/ false).first; case Qualifiers::OCL_Autoreleasing: return EmitARCStoreAutoreleasing(E).first; // No reason to do any of these differently. case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Weak: break; } RValue RV = EmitAnyExpr(E->getRHS()); LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); if (RV.isScalar()) EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc()); EmitStoreThroughLValue(RV, LV); if (getLangOpts().OpenMP) CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, E->getLHS()); return LV; } case TEK_Complex: return EmitComplexAssignmentLValue(E); case TEK_Aggregate: return EmitAggExprToLValue(E); } llvm_unreachable("bad evaluation kind"); } LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { RValue RV = EmitCallExpr(E); if (!RV.isScalar()) return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), AlignmentSource::Decl); assert(E->getCallReturnType(getContext())->isReferenceType() && "Can't have a scalar return unless the return type is a " "reference type!"); return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); } LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { // FIXME: This shouldn't require another copy. return EmitAggExprToLValue(E); } LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() && "binding l-value to type which needs a temporary"); AggValueSlot Slot = CreateAggTemp(E->getType()); EmitCXXConstructExpr(E, Slot); return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); } LValue CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType()); } Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()), ConvertType(E->getType())); } LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), AlignmentSource::Decl); } LValue CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); Slot.setExternallyDestructed(); EmitAggExpr(E->getSubExpr(), Slot); EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress()); return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); } LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { RValue RV = EmitObjCMessageExpr(E); if (!RV.isScalar()) return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), AlignmentSource::Decl); assert(E->getMethodDecl()->getReturnType()->isReferenceType() && "Can't have a scalar return unless the return type is a " "reference type!"); return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); } LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { Address V = CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector()); return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); } llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) { return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); } LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) { return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, Ivar, CVRQualifiers); } LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { // FIXME: A lot of the code below could be shared with EmitMemberExpr. llvm::Value *BaseValue = nullptr; const Expr *BaseExpr = E->getBase(); Qualifiers BaseQuals; QualType ObjectTy; if (E->isArrow()) { BaseValue = EmitScalarExpr(BaseExpr); ObjectTy = BaseExpr->getType()->getPointeeType(); BaseQuals = ObjectTy.getQualifiers(); } else { LValue BaseLV = EmitLValue(BaseExpr); BaseValue = BaseLV.getPointer(*this); ObjectTy = BaseExpr->getType(); BaseQuals = ObjectTy.getQualifiers(); } LValue LV = EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), BaseQuals.getCVRQualifiers()); setObjCGCLValueClass(getContext(), E, LV); return LV; } LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { // Can only get l-value for message expression returning aggregate type RValue RV = EmitAnyExprToTemp(E); return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), AlignmentSource::Decl); } RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Value *Chain) { // Get the actual function type. The callee type will always be a pointer to // function type or a block pointer type. assert(CalleeType->isFunctionPointerType() && "Call must have function pointer type!"); const Decl *TargetDecl = OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); CalleeType = getContext().getCanonicalType(CalleeType); auto PointeeType = cast(CalleeType)->getPointeeType(); CGCallee Callee = OrigCallee; if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) && (!TargetDecl || !isa(TargetDecl))) { if (llvm::Constant *PrefixSig = CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { SanitizerScope SanScope(this); // Remove any (C++17) exception specifications, to allow calling e.g. a // noexcept function through a non-noexcept pointer. auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None); llvm::Constant *FTRTTIConst = CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty}; llvm::StructType *PrefixStructTy = llvm::StructType::get( CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true); llvm::Value *CalleePtr = Callee.getFunctionPointer(); llvm::Value *CalleePrefixStruct = Builder.CreateBitCast( CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy)); llvm::Value *CalleeSigPtr = Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0); llvm::Value *CalleeSig = Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign()); llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); llvm::BasicBlock *Cont = createBasicBlock("cont"); llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); EmitBlock(TypeCheck); llvm::Value *CalleeRTTIPtr = Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1); llvm::Value *CalleeRTTIEncoded = Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign()); llvm::Value *CalleeRTTI = DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded); llvm::Value *CalleeRTTIMatch = Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst); llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()), EmitCheckTypeDescriptor(CalleeType)}; EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function), SanitizerHandler::FunctionTypeMismatch, StaticData, {CalleePtr, CalleeRTTI, FTRTTIConst}); Builder.CreateBr(Cont); EmitBlock(Cont); } } const auto *FnType = cast(PointeeType); // If we are checking indirect calls and this call is indirect, check that the // function pointer is a member of the bit set for the function type. if (SanOpts.has(SanitizerKind::CFIICall) && (!TargetDecl || !isa(TargetDecl))) { SanitizerScope SanScope(this); EmitSanitizerStatReport(llvm::SanStat_CFI_ICall); llvm::Metadata *MD; if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0)); else MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0)); llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD); llvm::Value *CalleePtr = Callee.getFunctionPointer(); llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy); llvm::Value *TypeTest = Builder.CreateCall( CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId}); auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); llvm::Constant *StaticData[] = { llvm::ConstantInt::get(Int8Ty, CFITCK_ICall), EmitCheckSourceLocation(E->getBeginLoc()), EmitCheckTypeDescriptor(QualType(FnType, 0)), }; if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId, CastedCallee, StaticData); } else { EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall), SanitizerHandler::CFICheckFail, StaticData, {CastedCallee, llvm::UndefValue::get(IntPtrTy)}); } } CallArgList Args; if (Chain) Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)), CGM.getContext().VoidPtrTy); // C++17 requires that we evaluate arguments to a call using assignment syntax // right-to-left, and that we evaluate arguments to certain other operators // left-to-right. Note that we allow this to override the order dictated by // the calling convention on the MS ABI, which means that parameter // destruction order is not necessarily reverse construction order. // FIXME: Revisit this based on C++ committee response to unimplementability. EvaluationOrder Order = EvaluationOrder::Default; if (auto *OCE = dyn_cast(E)) { if (OCE->isAssignmentOp()) Order = EvaluationOrder::ForceRightToLeft; else { switch (OCE->getOperator()) { case OO_LessLess: case OO_GreaterGreater: case OO_AmpAmp: case OO_PipePipe: case OO_Comma: case OO_ArrowStar: Order = EvaluationOrder::ForceLeftToRight; break; default: break; } } } EmitCallArgs(Args, dyn_cast(FnType), E->arguments(), E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( Args, FnType, /*ChainCall=*/Chain); // C99 6.5.2.2p6: // If the expression that denotes the called function has a type // that does not include a prototype, [the default argument // promotions are performed]. If the number of arguments does not // equal the number of parameters, the behavior is undefined. If // the function is defined with a type that includes a prototype, // and either the prototype ends with an ellipsis (, ...) or the // types of the arguments after promotion are not compatible with // the types of the parameters, the behavior is undefined. If the // function is defined with a type that does not include a // prototype, and the types of the arguments after promotion are // not compatible with those of the parameters after promotion, // the behavior is undefined [except in some trivial cases]. // That is, in the general case, we should assume that a call // through an unprototyped function type works like a *non-variadic* // call. The way we make this work is to cast to the exact type // of the promoted arguments. // // Chain calls use this same code path to add the invisible chain parameter // to the function type. if (isa(FnType) || Chain) { llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); CalleeTy = CalleeTy->getPointerTo(AS); llvm::Value *CalleePtr = Callee.getFunctionPointer(); CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); Callee.setFunctionPointer(CalleePtr); } llvm::CallBase *CallOrInvoke = nullptr; RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke, E->getExprLoc()); // Generate function declaration DISuprogram in order to be used // in debug info about call sites. if (CGDebugInfo *DI = getDebugInfo()) { if (auto *CalleeDecl = dyn_cast_or_null(TargetDecl)) DI->EmitFuncDeclForCallSite(CallOrInvoke, QualType(FnType, 0), CalleeDecl); } return Call; } LValue CodeGenFunction:: EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { Address BaseAddr = Address::invalid(); if (E->getOpcode() == BO_PtrMemI) { BaseAddr = EmitPointerWithAlignment(E->getLHS()); } else { BaseAddr = EmitLValue(E->getLHS()).getAddress(*this); } llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); const auto *MPT = E->getRHS()->getType()->castAs(); LValueBaseInfo BaseInfo; TBAAAccessInfo TBAAInfo; Address MemberAddr = EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo, &TBAAInfo); return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo); } /// Given the address of a temporary variable, produce an r-value of /// its type. RValue CodeGenFunction::convertTempToRValue(Address addr, QualType type, SourceLocation loc) { LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl); switch (getEvaluationKind(type)) { case TEK_Complex: return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); case TEK_Aggregate: return lvalue.asAggregateRValue(*this); case TEK_Scalar: return RValue::get(EmitLoadOfScalar(lvalue, loc)); } llvm_unreachable("bad evaluation kind"); } void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { assert(Val->getType()->isFPOrFPVectorTy()); if (Accuracy == 0.0 || !isa(Val)) return; llvm::MDBuilder MDHelper(getLLVMContext()); llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); cast(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); } namespace { struct LValueOrRValue { LValue LV; RValue RV; }; } static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot) { SmallVector opaques; // Find the result expression, if any. const Expr *resultExpr = E->getResultExpr(); LValueOrRValue result; for (PseudoObjectExpr::const_semantics_iterator i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { const Expr *semantic = *i; // If this semantic expression is an opaque value, bind it // to the result of its source expression. if (const auto *ov = dyn_cast(semantic)) { // Skip unique OVEs. if (ov->isUnique()) { assert(ov != resultExpr && "A unique OVE cannot be used as the result expression"); continue; } // If this is the result expression, we may need to evaluate // directly into the slot. typedef CodeGenFunction::OpaqueValueMappingData OVMA; OVMA opaqueData; if (ov == resultExpr && ov->isRValue() && !forLValue && CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { CGF.EmitAggExpr(ov->getSourceExpr(), slot); LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(), AlignmentSource::Decl); opaqueData = OVMA::bind(CGF, ov, LV); result.RV = slot.asRValue(); // Otherwise, emit as normal. } else { opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); // If this is the result, also evaluate the result now. if (ov == resultExpr) { if (forLValue) result.LV = CGF.EmitLValue(ov); else result.RV = CGF.EmitAnyExpr(ov, slot); } } opaques.push_back(opaqueData); // Otherwise, if the expression is the result, evaluate it // and remember the result. } else if (semantic == resultExpr) { if (forLValue) result.LV = CGF.EmitLValue(semantic); else result.RV = CGF.EmitAnyExpr(semantic, slot); // Otherwise, evaluate the expression in an ignored context. } else { CGF.EmitIgnoredExpr(semantic); } } // Unbind all the opaques now. for (unsigned i = 0, e = opaques.size(); i != e; ++i) opaques[i].unbind(CGF); return result; } RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, AggValueSlot slot) { return emitPseudoObjectExpr(*this, E, false, slot).RV; } LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; } diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index eb8a1125c7b6..16656de4e8f7 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -1,4712 +1,4715 @@ //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This is the internal per-function state used for llvm translation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H #include "CGBuilder.h" #include "CGDebugInfo.h" #include "CGLoopInfo.h" #include "CGValue.h" #include "CodeGenModule.h" #include "CodeGenPGO.h" #include "EHScopeStack.h" #include "VarBypassDetector.h" #include "clang/AST/CharUnits.h" #include "clang/AST/CurrentSourceLocExprScope.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Utils/SanitizerStats.h" namespace llvm { class BasicBlock; class LLVMContext; class MDNode; class Module; class SwitchInst; class Twine; class Value; } namespace clang { class ASTContext; class BlockDecl; class CXXDestructorDecl; class CXXForRangeStmt; class CXXTryStmt; class Decl; class LabelDecl; class EnumConstantDecl; class FunctionDecl; class FunctionProtoType; class LabelStmt; class ObjCContainerDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; class ObjCMethodDecl; class ObjCImplementationDecl; class ObjCPropertyImplDecl; class TargetInfo; class VarDecl; class ObjCForCollectionStmt; class ObjCAtTryStmt; class ObjCAtThrowStmt; class ObjCAtSynchronizedStmt; class ObjCAutoreleasePoolStmt; class OMPUseDevicePtrClause; class OMPUseDeviceAddrClause; class ReturnsNonNullAttr; class SVETypeFlags; class OMPExecutableDirective; namespace analyze_os_log { class OSLogBufferLayout; } namespace CodeGen { class CodeGenTypes; class CGCallee; class CGFunctionInfo; class CGRecordLayout; class CGBlockInfo; class CGCXXABI; class BlockByrefHelpers; class BlockByrefInfo; class BlockFlags; class BlockFieldFlags; class RegionCodeGenTy; class TargetCodeGenInfo; struct OMPTaskDataTy; struct CGCoroData; /// The kind of evaluation to perform on values of a particular /// type. Basically, is the code in CGExprScalar, CGExprComplex, or /// CGExprAgg? /// /// TODO: should vectors maybe be split out into their own thing? enum TypeEvaluationKind { TEK_Scalar, TEK_Complex, TEK_Aggregate }; #define LIST_SANITIZER_CHECKS \ SANITIZER_CHECK(AddOverflow, add_overflow, 0) \ SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \ SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \ SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \ SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \ SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \ SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \ SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \ SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \ SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \ SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \ SANITIZER_CHECK(MissingReturn, missing_return, 0) \ SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \ SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \ SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \ SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \ SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \ SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \ SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \ SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \ SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \ SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \ SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \ SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \ SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) enum SanitizerHandler { #define SANITIZER_CHECK(Enum, Name, Version) Enum, LIST_SANITIZER_CHECKS #undef SANITIZER_CHECK }; /// Helper class with most of the code for saving a value for a /// conditional expression cleanup. struct DominatingLLVMValue { typedef llvm::PointerIntPair saved_type; /// Answer whether the given value needs extra work to be saved. static bool needsSaving(llvm::Value *value) { // If it's not an instruction, we don't need to save. if (!isa(value)) return false; // If it's an instruction in the entry block, we don't need to save. llvm::BasicBlock *block = cast(value)->getParent(); return (block != &block->getParent()->getEntryBlock()); } static saved_type save(CodeGenFunction &CGF, llvm::Value *value); static llvm::Value *restore(CodeGenFunction &CGF, saved_type value); }; /// A partial specialization of DominatingValue for llvm::Values that /// might be llvm::Instructions. template struct DominatingPointer : DominatingLLVMValue { typedef T *type; static type restore(CodeGenFunction &CGF, saved_type value) { return static_cast(DominatingLLVMValue::restore(CGF, value)); } }; /// A specialization of DominatingValue for Address. template <> struct DominatingValue
{ typedef Address type; struct saved_type { DominatingLLVMValue::saved_type SavedValue; CharUnits Alignment; }; static bool needsSaving(type value) { return DominatingLLVMValue::needsSaving(value.getPointer()); } static saved_type save(CodeGenFunction &CGF, type value) { return { DominatingLLVMValue::save(CGF, value.getPointer()), value.getAlignment() }; } static type restore(CodeGenFunction &CGF, saved_type value) { return Address(DominatingLLVMValue::restore(CGF, value.SavedValue), value.Alignment); } }; /// A specialization of DominatingValue for RValue. template <> struct DominatingValue { typedef RValue type; class saved_type { enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral, AggregateAddress, ComplexAddress }; llvm::Value *Value; unsigned K : 3; unsigned Align : 29; saved_type(llvm::Value *v, Kind k, unsigned a = 0) : Value(v), K(k), Align(a) {} public: static bool needsSaving(RValue value); static saved_type save(CodeGenFunction &CGF, RValue value); RValue restore(CodeGenFunction &CGF); // implementations in CGCleanup.cpp }; static bool needsSaving(type value) { return saved_type::needsSaving(value); } static saved_type save(CodeGenFunction &CGF, type value) { return saved_type::save(CGF, value); } static type restore(CodeGenFunction &CGF, saved_type value) { return value.restore(CGF); } }; /// CodeGenFunction - This class organizes the per-function state that is used /// while generating LLVM code. class CodeGenFunction : public CodeGenTypeCache { CodeGenFunction(const CodeGenFunction &) = delete; void operator=(const CodeGenFunction &) = delete; friend class CGCXXABI; public: /// A jump destination is an abstract label, branching to which may /// require a jump out through normal cleanups. struct JumpDest { JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {} JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth, unsigned Index) : Block(Block), ScopeDepth(Depth), Index(Index) {} bool isValid() const { return Block != nullptr; } llvm::BasicBlock *getBlock() const { return Block; } EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; } unsigned getDestIndex() const { return Index; } // This should be used cautiously. void setScopeDepth(EHScopeStack::stable_iterator depth) { ScopeDepth = depth; } private: llvm::BasicBlock *Block; EHScopeStack::stable_iterator ScopeDepth; unsigned Index; }; CodeGenModule &CGM; // Per-module state. const TargetInfo &Target; // For EH/SEH outlined funclets, this field points to parent's CGF CodeGenFunction *ParentCGF = nullptr; typedef std::pair ComplexPairTy; LoopInfoStack LoopStack; CGBuilderTy Builder; // Stores variables for which we can't generate correct lifetime markers // because of jumps. VarBypassDetector Bypasses; // CodeGen lambda for loops and support for ordered clause typedef llvm::function_ref CodeGenLoopTy; typedef llvm::function_ref CodeGenOrderedTy; // Codegen lambda for loop bounds in worksharing loop constructs typedef llvm::function_ref( CodeGenFunction &, const OMPExecutableDirective &S)> CodeGenLoopBoundsTy; // Codegen lambda for loop bounds in dispatch-based loop implementation typedef llvm::function_ref( CodeGenFunction &, const OMPExecutableDirective &S, Address LB, Address UB)> CodeGenDispatchBoundsTy; /// CGBuilder insert helper. This function is called after an /// instruction is created using Builder. void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const; /// CurFuncDecl - Holds the Decl for the current outermost /// non-closure context. const Decl *CurFuncDecl; /// CurCodeDecl - This is the inner-most code context, which includes blocks. const Decl *CurCodeDecl; const CGFunctionInfo *CurFnInfo; QualType FnRetTy; llvm::Function *CurFn = nullptr; // Holds coroutine data if the current function is a coroutine. We use a // wrapper to manage its lifetime, so that we don't have to define CGCoroData // in this header. struct CGCoroInfo { std::unique_ptr Data; CGCoroInfo(); ~CGCoroInfo(); }; CGCoroInfo CurCoro; bool isCoroutine() const { return CurCoro.Data != nullptr; } /// CurGD - The GlobalDecl for the current function being compiled. GlobalDecl CurGD; /// PrologueCleanupDepth - The cleanup depth enclosing all the /// cleanups associated with the parameters. EHScopeStack::stable_iterator PrologueCleanupDepth; /// ReturnBlock - Unified return block. JumpDest ReturnBlock; /// ReturnValue - The temporary alloca to hold the return /// value. This is invalid iff the function has no return value. Address ReturnValue = Address::invalid(); /// ReturnValuePointer - The temporary alloca to hold a pointer to sret. /// This is invalid if sret is not in use. Address ReturnValuePointer = Address::invalid(); /// If a return statement is being visited, this holds the return statment's /// result expression. const Expr *RetExpr = nullptr; /// Return true if a label was seen in the current scope. bool hasLabelBeenSeenInCurrentScope() const { if (CurLexicalScope) return CurLexicalScope->hasLabels(); return !LabelMap.empty(); } /// AllocaInsertPoint - This is an instruction in the entry block before which /// we prefer to insert allocas. llvm::AssertingVH AllocaInsertPt; /// API for captured statement code generation. class CGCapturedStmtInfo { public: explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default) : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {} explicit CGCapturedStmtInfo(const CapturedStmt &S, CapturedRegionKind K = CR_Default) : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) { RecordDecl::field_iterator Field = S.getCapturedRecordDecl()->field_begin(); for (CapturedStmt::const_capture_iterator I = S.capture_begin(), E = S.capture_end(); I != E; ++I, ++Field) { if (I->capturesThis()) CXXThisFieldDecl = *Field; else if (I->capturesVariable()) CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field; else if (I->capturesVariableByCopy()) CaptureFields[I->getCapturedVar()->getCanonicalDecl()] = *Field; } } virtual ~CGCapturedStmtInfo(); CapturedRegionKind getKind() const { return Kind; } virtual void setContextValue(llvm::Value *V) { ThisValue = V; } // Retrieve the value of the context parameter. virtual llvm::Value *getContextValue() const { return ThisValue; } /// Lookup the captured field decl for a variable. virtual const FieldDecl *lookup(const VarDecl *VD) const { return CaptureFields.lookup(VD->getCanonicalDecl()); } bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; } virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; } static bool classof(const CGCapturedStmtInfo *) { return true; } /// Emit the captured statement body. virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) { CGF.incrementProfileCounter(S); CGF.EmitStmt(S); } /// Get the name of the capture helper. virtual StringRef getHelperName() const { return "__captured_stmt"; } private: /// The kind of captured statement being generated. CapturedRegionKind Kind; /// Keep the map between VarDecl and FieldDecl. llvm::SmallDenseMap CaptureFields; /// The base address of the captured record, passed in as the first /// argument of the parallel region function. llvm::Value *ThisValue; /// Captured 'this' type. FieldDecl *CXXThisFieldDecl; }; CGCapturedStmtInfo *CapturedStmtInfo = nullptr; /// RAII for correct setting/restoring of CapturedStmtInfo. class CGCapturedStmtRAII { private: CodeGenFunction &CGF; CGCapturedStmtInfo *PrevCapturedStmtInfo; public: CGCapturedStmtRAII(CodeGenFunction &CGF, CGCapturedStmtInfo *NewCapturedStmtInfo) : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) { CGF.CapturedStmtInfo = NewCapturedStmtInfo; } ~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; } }; /// An abstract representation of regular/ObjC call/message targets. class AbstractCallee { /// The function declaration of the callee. const Decl *CalleeDecl; public: AbstractCallee() : CalleeDecl(nullptr) {} AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {} AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {} bool hasFunctionDecl() const { return dyn_cast_or_null(CalleeDecl); } const Decl *getDecl() const { return CalleeDecl; } unsigned getNumParams() const { if (const auto *FD = dyn_cast(CalleeDecl)) return FD->getNumParams(); return cast(CalleeDecl)->param_size(); } const ParmVarDecl *getParamDecl(unsigned I) const { if (const auto *FD = dyn_cast(CalleeDecl)) return FD->getParamDecl(I); return *(cast(CalleeDecl)->param_begin() + I); } }; /// Sanitizers enabled for this function. SanitizerSet SanOpts; /// True if CodeGen currently emits code implementing sanitizer checks. bool IsSanitizerScope = false; /// RAII object to set/unset CodeGenFunction::IsSanitizerScope. class SanitizerScope { CodeGenFunction *CGF; public: SanitizerScope(CodeGenFunction *CGF); ~SanitizerScope(); }; /// In C++, whether we are code generating a thunk. This controls whether we /// should emit cleanups. bool CurFuncIsThunk = false; /// In ARC, whether we should autorelease the return value. bool AutoreleaseResult = false; /// Whether we processed a Microsoft-style asm block during CodeGen. These can /// potentially set the return value. bool SawAsmBlock = false; const NamedDecl *CurSEHParent = nullptr; /// True if the current function is an outlined SEH helper. This can be a /// finally block or filter expression. bool IsOutlinedSEHHelper = false; /// True if CodeGen currently emits code inside presereved access index /// region. bool IsInPreservedAIRegion = false; /// True if the current statement has nomerge attribute. bool InNoMergeAttributedStmt = false; const CodeGen::CGBlockInfo *BlockInfo = nullptr; llvm::Value *BlockPointer = nullptr; llvm::DenseMap LambdaCaptureFields; FieldDecl *LambdaThisCaptureField = nullptr; /// A mapping from NRVO variables to the flags used to indicate /// when the NRVO has been applied to this variable. llvm::DenseMap NRVOFlags; EHScopeStack EHStack; llvm::SmallVector LifetimeExtendedCleanupStack; llvm::SmallVector SEHTryEpilogueStack; llvm::Instruction *CurrentFuncletPad = nullptr; class CallLifetimeEnd final : public EHScopeStack::Cleanup { llvm::Value *Addr; llvm::Value *Size; public: CallLifetimeEnd(Address addr, llvm::Value *size) : Addr(addr.getPointer()), Size(size) {} void Emit(CodeGenFunction &CGF, Flags flags) override { CGF.EmitLifetimeEnd(Size, Addr); } }; /// Header for data within LifetimeExtendedCleanupStack. struct LifetimeExtendedCleanupHeader { /// The size of the following cleanup object. unsigned Size; /// The kind of cleanup to push: a value from the CleanupKind enumeration. unsigned Kind : 31; /// Whether this is a conditional cleanup. unsigned IsConditional : 1; size_t getSize() const { return Size; } CleanupKind getKind() const { return (CleanupKind)Kind; } bool isConditional() const { return IsConditional; } }; /// i32s containing the indexes of the cleanup destinations. Address NormalCleanupDest = Address::invalid(); unsigned NextCleanupDestIndex = 1; /// EHResumeBlock - Unified block containing a call to llvm.eh.resume. llvm::BasicBlock *EHResumeBlock = nullptr; /// The exception slot. All landing pads write the current exception pointer /// into this alloca. llvm::Value *ExceptionSlot = nullptr; /// The selector slot. Under the MandatoryCleanup model, all landing pads /// write the current selector value into this alloca. llvm::AllocaInst *EHSelectorSlot = nullptr; /// A stack of exception code slots. Entering an __except block pushes a slot /// on the stack and leaving pops one. The __exception_code() intrinsic loads /// a value from the top of the stack. SmallVector SEHCodeSlotStack; /// Value returned by __exception_info intrinsic. llvm::Value *SEHInfo = nullptr; /// Emits a landing pad for the current EH stack. llvm::BasicBlock *EmitLandingPad(); llvm::BasicBlock *getInvokeDestImpl(); /// Parent loop-based directive for scan directive. const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr; llvm::BasicBlock *OMPBeforeScanBlock = nullptr; llvm::BasicBlock *OMPAfterScanBlock = nullptr; llvm::BasicBlock *OMPScanExitBlock = nullptr; llvm::BasicBlock *OMPScanDispatch = nullptr; bool OMPFirstScanLoop = false; /// Manages parent directive for scan directives. class ParentLoopDirectiveForScanRegion { CodeGenFunction &CGF; const OMPExecutableDirective *ParentLoopDirectiveForScan; public: ParentLoopDirectiveForScanRegion( CodeGenFunction &CGF, const OMPExecutableDirective &ParentLoopDirectiveForScan) : CGF(CGF), ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) { CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan; } ~ParentLoopDirectiveForScanRegion() { CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan; } }; template typename DominatingValue::saved_type saveValueInCond(T value) { return DominatingValue::save(*this, value); } class CGFPOptionsRAII { public: CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures); ~CGFPOptionsRAII(); private: CodeGenFunction &CGF; FPOptions OldFPFeatures; Optional FMFGuard; }; FPOptions CurFPFeatures; public: /// ObjCEHValueStack - Stack of Objective-C exception values, used for /// rethrows. SmallVector ObjCEHValueStack; /// A class controlling the emission of a finally block. class FinallyInfo { /// Where the catchall's edge through the cleanup should go. JumpDest RethrowDest; /// A function to call to enter the catch. llvm::FunctionCallee BeginCatchFn; /// An i1 variable indicating whether or not the @finally is /// running for an exception. llvm::AllocaInst *ForEHVar; /// An i8* variable into which the exception pointer to rethrow /// has been saved. llvm::AllocaInst *SavedExnVar; public: void enter(CodeGenFunction &CGF, const Stmt *Finally, llvm::FunctionCallee beginCatchFn, llvm::FunctionCallee endCatchFn, llvm::FunctionCallee rethrowFn); void exit(CodeGenFunction &CGF); }; /// Returns true inside SEH __try blocks. bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); } /// Returns true while emitting a cleanuppad. bool isCleanupPadScope() const { return CurrentFuncletPad && isa(CurrentFuncletPad); } /// pushFullExprCleanup - Push a cleanup to be run at the end of the /// current full-expression. Safe against the possibility that /// we're currently inside a conditionally-evaluated expression. template void pushFullExprCleanup(CleanupKind kind, As... A) { // If we're not in a conditional branch, or if none of the // arguments requires saving, then use the unconditional cleanup. if (!isInConditionalBranch()) return EHStack.pushCleanup(kind, A...); // Stash values in a tuple so we can guarantee the order of saves. typedef std::tuple::saved_type...> SavedTuple; SavedTuple Saved{saveValueInCond(A)...}; typedef EHScopeStack::ConditionalCleanup CleanupType; EHStack.pushCleanupTuple(kind, Saved); initFullExprCleanup(); } /// Queue a cleanup to be pushed after finishing the current full-expression, /// potentially with an active flag. template void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) { if (!isInConditionalBranch()) return pushCleanupAfterFullExprWithActiveFlag(Kind, Address::invalid(), A...); Address ActiveFlag = createCleanupActiveFlag(); assert(!DominatingValue
::needsSaving(ActiveFlag) && "cleanup active flag should never need saving"); typedef std::tuple::saved_type...> SavedTuple; SavedTuple Saved{saveValueInCond(A)...}; typedef EHScopeStack::ConditionalCleanup CleanupType; pushCleanupAfterFullExprWithActiveFlag(Kind, ActiveFlag, Saved); } template void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind, Address ActiveFlag, As... A) { LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind, ActiveFlag.isValid()}; size_t OldSize = LifetimeExtendedCleanupStack.size(); LifetimeExtendedCleanupStack.resize( LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size + (Header.IsConditional ? sizeof(ActiveFlag) : 0)); static_assert(sizeof(Header) % alignof(T) == 0, "Cleanup will be allocated on misaligned address"); char *Buffer = &LifetimeExtendedCleanupStack[OldSize]; new (Buffer) LifetimeExtendedCleanupHeader(Header); new (Buffer + sizeof(Header)) T(A...); if (Header.IsConditional) new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag); } /// Set up the last cleanup that was pushed as a conditional /// full-expression cleanup. void initFullExprCleanup() { initFullExprCleanupWithFlag(createCleanupActiveFlag()); } void initFullExprCleanupWithFlag(Address ActiveFlag); Address createCleanupActiveFlag(); /// PushDestructorCleanup - Push a cleanup to call the /// complete-object destructor of an object of the given type at the /// given address. Does nothing if T is not a C++ class type with a /// non-trivial destructor. void PushDestructorCleanup(QualType T, Address Addr); /// PushDestructorCleanup - Push a cleanup to call the /// complete-object variant of the given destructor on the object at /// the given address. void PushDestructorCleanup(const CXXDestructorDecl *Dtor, QualType T, Address Addr); /// PopCleanupBlock - Will pop the cleanup entry on the stack and /// process all branch fixups. void PopCleanupBlock(bool FallThroughIsBranchThrough = false); /// DeactivateCleanupBlock - Deactivates the given cleanup block. /// The block cannot be reactivated. Pops it if it's the top of the /// stack. /// /// \param DominatingIP - An instruction which is known to /// dominate the current IP (if set) and which lies along /// all paths of execution between the current IP and the /// the point at which the cleanup comes into scope. void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP); /// ActivateCleanupBlock - Activates an initially-inactive cleanup. /// Cannot be used to resurrect a deactivated cleanup. /// /// \param DominatingIP - An instruction which is known to /// dominate the current IP (if set) and which lies along /// all paths of execution between the current IP and the /// the point at which the cleanup comes into scope. void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP); /// Enters a new scope for capturing cleanups, all of which /// will be executed once the scope is exited. class RunCleanupsScope { EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth; size_t LifetimeExtendedCleanupStackSize; bool OldDidCallStackSave; protected: bool PerformCleanup; private: RunCleanupsScope(const RunCleanupsScope &) = delete; void operator=(const RunCleanupsScope &) = delete; protected: CodeGenFunction& CGF; public: /// Enter a new cleanup scope. explicit RunCleanupsScope(CodeGenFunction &CGF) : PerformCleanup(true), CGF(CGF) { CleanupStackDepth = CGF.EHStack.stable_begin(); LifetimeExtendedCleanupStackSize = CGF.LifetimeExtendedCleanupStack.size(); OldDidCallStackSave = CGF.DidCallStackSave; CGF.DidCallStackSave = false; OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth; CGF.CurrentCleanupScopeDepth = CleanupStackDepth; } /// Exit this cleanup scope, emitting any accumulated cleanups. ~RunCleanupsScope() { if (PerformCleanup) ForceCleanup(); } /// Determine whether this scope requires any cleanups. bool requiresCleanups() const { return CGF.EHStack.stable_begin() != CleanupStackDepth; } /// Force the emission of cleanups now, instead of waiting /// until this object is destroyed. /// \param ValuesToReload - A list of values that need to be available at /// the insertion point after cleanup emission. If cleanup emission created /// a shared cleanup block, these value pointers will be rewritten. /// Otherwise, they not will be modified. void ForceCleanup(std::initializer_list ValuesToReload = {}) { assert(PerformCleanup && "Already forced cleanup"); CGF.DidCallStackSave = OldDidCallStackSave; CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, ValuesToReload); PerformCleanup = false; CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; } }; // Cleanup stack depth of the RunCleanupsScope that was pushed most recently. EHScopeStack::stable_iterator CurrentCleanupScopeDepth = EHScopeStack::stable_end(); class LexicalScope : public RunCleanupsScope { SourceRange Range; SmallVector Labels; LexicalScope *ParentScope; LexicalScope(const LexicalScope &) = delete; void operator=(const LexicalScope &) = delete; public: /// Enter a new cleanup scope. explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range) : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) { CGF.CurLexicalScope = this; if (CGDebugInfo *DI = CGF.getDebugInfo()) DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin()); } void addLabel(const LabelDecl *label) { assert(PerformCleanup && "adding label to dead scope?"); Labels.push_back(label); } /// Exit this cleanup scope, emitting any accumulated /// cleanups. ~LexicalScope() { if (CGDebugInfo *DI = CGF.getDebugInfo()) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd()); // If we should perform a cleanup, force them now. Note that // this ends the cleanup scope before rescoping any labels. if (PerformCleanup) { ApplyDebugLocation DL(CGF, Range.getEnd()); ForceCleanup(); } } /// Force the emission of cleanups now, instead of waiting /// until this object is destroyed. void ForceCleanup() { CGF.CurLexicalScope = ParentScope; RunCleanupsScope::ForceCleanup(); if (!Labels.empty()) rescopeLabels(); } bool hasLabels() const { return !Labels.empty(); } void rescopeLabels(); }; typedef llvm::DenseMap DeclMapTy; /// The class used to assign some variables some temporarily addresses. class OMPMapVars { DeclMapTy SavedLocals; DeclMapTy SavedTempAddresses; OMPMapVars(const OMPMapVars &) = delete; void operator=(const OMPMapVars &) = delete; public: explicit OMPMapVars() = default; ~OMPMapVars() { assert(SavedLocals.empty() && "Did not restored original addresses."); }; /// Sets the address of the variable \p LocalVD to be \p TempAddr in /// function \p CGF. /// \return true if at least one variable was set already, false otherwise. bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD, Address TempAddr) { LocalVD = LocalVD->getCanonicalDecl(); // Only save it once. if (SavedLocals.count(LocalVD)) return false; // Copy the existing local entry to SavedLocals. auto it = CGF.LocalDeclMap.find(LocalVD); if (it != CGF.LocalDeclMap.end()) SavedLocals.try_emplace(LocalVD, it->second); else SavedLocals.try_emplace(LocalVD, Address::invalid()); // Generate the private entry. QualType VarTy = LocalVD->getType(); if (VarTy->isReferenceType()) { Address Temp = CGF.CreateMemTemp(VarTy); CGF.Builder.CreateStore(TempAddr.getPointer(), Temp); TempAddr = Temp; } SavedTempAddresses.try_emplace(LocalVD, TempAddr); return true; } /// Applies new addresses to the list of the variables. /// \return true if at least one variable is using new address, false /// otherwise. bool apply(CodeGenFunction &CGF) { copyInto(SavedTempAddresses, CGF.LocalDeclMap); SavedTempAddresses.clear(); return !SavedLocals.empty(); } /// Restores original addresses of the variables. void restore(CodeGenFunction &CGF) { if (!SavedLocals.empty()) { copyInto(SavedLocals, CGF.LocalDeclMap); SavedLocals.clear(); } } private: /// Copy all the entries in the source map over the corresponding /// entries in the destination, which must exist. static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) { for (auto &Pair : Src) { if (!Pair.second.isValid()) { Dest.erase(Pair.first); continue; } auto I = Dest.find(Pair.first); if (I != Dest.end()) I->second = Pair.second; else Dest.insert(Pair); } } }; /// The scope used to remap some variables as private in the OpenMP loop body /// (or other captured region emitted without outlining), and to restore old /// vars back on exit. class OMPPrivateScope : public RunCleanupsScope { OMPMapVars MappedVars; OMPPrivateScope(const OMPPrivateScope &) = delete; void operator=(const OMPPrivateScope &) = delete; public: /// Enter a new OpenMP private scope. explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {} /// Registers \p LocalVD variable as a private and apply \p PrivateGen /// function for it to generate corresponding private variable. \p /// PrivateGen returns an address of the generated private variable. /// \return true if the variable is registered as private, false if it has /// been privatized already. bool addPrivate(const VarDecl *LocalVD, const llvm::function_ref PrivateGen) { assert(PerformCleanup && "adding private to dead scope"); return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen()); } /// Privatizes local variables previously registered as private. /// Registration is separate from the actual privatization to allow /// initializers use values of the original variables, not the private one. /// This is important, for example, if the private variable is a class /// variable initialized by a constructor that references other private /// variables. But at initialization original variables must be used, not /// private copies. /// \return true if at least one variable was privatized, false otherwise. bool Privatize() { return MappedVars.apply(CGF); } void ForceCleanup() { RunCleanupsScope::ForceCleanup(); MappedVars.restore(CGF); } /// Exit scope - all the mapped variables are restored. ~OMPPrivateScope() { if (PerformCleanup) ForceCleanup(); } /// Checks if the global variable is captured in current function. bool isGlobalVarCaptured(const VarDecl *VD) const { VD = VD->getCanonicalDecl(); return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0; } }; /// Save/restore original map of previously emitted local vars in case when we /// need to duplicate emission of the same code several times in the same /// function for OpenMP code. class OMPLocalDeclMapRAII { CodeGenFunction &CGF; DeclMapTy SavedMap; public: OMPLocalDeclMapRAII(CodeGenFunction &CGF) : CGF(CGF), SavedMap(CGF.LocalDeclMap) {} ~OMPLocalDeclMapRAII() { SavedMap.swap(CGF.LocalDeclMap); } }; /// Takes the old cleanup stack size and emits the cleanup blocks /// that have been added. void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, std::initializer_list ValuesToReload = {}); /// Takes the old cleanup stack size and emits the cleanup blocks /// that have been added, then adds all lifetime-extended cleanups from /// the given position to the stack. void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize, size_t OldLifetimeExtendedStackSize, std::initializer_list ValuesToReload = {}); void ResolveBranchFixups(llvm::BasicBlock *Target); /// The given basic block lies in the current EH scope, but may be a /// target of a potentially scope-crossing jump; get a stable handle /// to which we can perform this jump later. JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) { return JumpDest(Target, EHStack.getInnermostNormalCleanup(), NextCleanupDestIndex++); } /// The given basic block lies in the current EH scope, but may be a /// target of a potentially scope-crossing jump; get a stable handle /// to which we can perform this jump later. JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) { return getJumpDestInCurrentScope(createBasicBlock(Name)); } /// EmitBranchThroughCleanup - Emit a branch from the current insert /// block through the normal cleanup handling code (if any) and then /// on to \arg Dest. void EmitBranchThroughCleanup(JumpDest Dest); /// isObviouslyBranchWithoutCleanups - Return true if a branch to the /// specified destination obviously has no cleanups to run. 'false' is always /// a conservatively correct answer for this method. bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const; /// popCatchScope - Pops the catch scope at the top of the EHScope /// stack, emitting any required code (other than the catch handlers /// themselves). void popCatchScope(); llvm::BasicBlock *getEHResumeBlock(bool isCleanup); llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope); llvm::BasicBlock * getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope); /// An object to manage conditionally-evaluated expressions. class ConditionalEvaluation { llvm::BasicBlock *StartBB; public: ConditionalEvaluation(CodeGenFunction &CGF) : StartBB(CGF.Builder.GetInsertBlock()) {} void begin(CodeGenFunction &CGF) { assert(CGF.OutermostConditional != this); if (!CGF.OutermostConditional) CGF.OutermostConditional = this; } void end(CodeGenFunction &CGF) { assert(CGF.OutermostConditional != nullptr); if (CGF.OutermostConditional == this) CGF.OutermostConditional = nullptr; } /// Returns a block which will be executed prior to each /// evaluation of the conditional code. llvm::BasicBlock *getStartingBlock() const { return StartBB; } }; /// isInConditionalBranch - Return true if we're currently emitting /// one branch or the other of a conditional expression. bool isInConditionalBranch() const { return OutermostConditional != nullptr; } void setBeforeOutermostConditional(llvm::Value *value, Address addr) { assert(isInConditionalBranch()); llvm::BasicBlock *block = OutermostConditional->getStartingBlock(); auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back()); store->setAlignment(addr.getAlignment().getAsAlign()); } /// An RAII object to record that we're evaluating a statement /// expression. class StmtExprEvaluation { CodeGenFunction &CGF; /// We have to save the outermost conditional: cleanups in a /// statement expression aren't conditional just because the /// StmtExpr is. ConditionalEvaluation *SavedOutermostConditional; public: StmtExprEvaluation(CodeGenFunction &CGF) : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) { CGF.OutermostConditional = nullptr; } ~StmtExprEvaluation() { CGF.OutermostConditional = SavedOutermostConditional; CGF.EnsureInsertPoint(); } }; /// An object which temporarily prevents a value from being /// destroyed by aggressive peephole optimizations that assume that /// all uses of a value have been realized in the IR. class PeepholeProtection { llvm::Instruction *Inst; friend class CodeGenFunction; public: PeepholeProtection() : Inst(nullptr) {} }; /// A non-RAII class containing all the information about a bound /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for /// this which makes individual mappings very simple; using this /// class directly is useful when you have a variable number of /// opaque values or don't want the RAII functionality for some /// reason. class OpaqueValueMappingData { const OpaqueValueExpr *OpaqueValue; bool BoundLValue; CodeGenFunction::PeepholeProtection Protection; OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue) : OpaqueValue(ov), BoundLValue(boundLValue) {} public: OpaqueValueMappingData() : OpaqueValue(nullptr) {} static bool shouldBindAsLValue(const Expr *expr) { // gl-values should be bound as l-values for obvious reasons. // Records should be bound as l-values because IR generation // always keeps them in memory. Expressions of function type // act exactly like l-values but are formally required to be // r-values in C. return expr->isGLValue() || expr->getType()->isFunctionType() || hasAggregateEvaluationKind(expr->getType()); } static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) { if (shouldBindAsLValue(ov)) return bind(CGF, ov, CGF.EmitLValue(e)); return bind(CGF, ov, CGF.EmitAnyExpr(e)); } static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const LValue &lv) { assert(shouldBindAsLValue(ov)); CGF.OpaqueLValues.insert(std::make_pair(ov, lv)); return OpaqueValueMappingData(ov, true); } static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const RValue &rv) { assert(!shouldBindAsLValue(ov)); CGF.OpaqueRValues.insert(std::make_pair(ov, rv)); OpaqueValueMappingData data(ov, false); // Work around an extremely aggressive peephole optimization in // EmitScalarConversion which assumes that all other uses of a // value are extant. data.Protection = CGF.protectFromPeepholes(rv); return data; } bool isValid() const { return OpaqueValue != nullptr; } void clear() { OpaqueValue = nullptr; } void unbind(CodeGenFunction &CGF) { assert(OpaqueValue && "no data to unbind!"); if (BoundLValue) { CGF.OpaqueLValues.erase(OpaqueValue); } else { CGF.OpaqueRValues.erase(OpaqueValue); CGF.unprotectFromPeepholes(Protection); } } }; /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr. class OpaqueValueMapping { CodeGenFunction &CGF; OpaqueValueMappingData Data; public: static bool shouldBindAsLValue(const Expr *expr) { return OpaqueValueMappingData::shouldBindAsLValue(expr); } /// Build the opaque value mapping for the given conditional /// operator if it's the GNU ?: extension. This is a common /// enough pattern that the convenience operator is really /// helpful. /// OpaqueValueMapping(CodeGenFunction &CGF, const AbstractConditionalOperator *op) : CGF(CGF) { if (isa(op)) // Leave Data empty. return; const BinaryConditionalOperator *e = cast(op); Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(), e->getCommon()); } /// Build the opaque value mapping for an OpaqueValueExpr whose source /// expression is set to the expression the OVE represents. OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV) : CGF(CGF) { if (OV) { assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used " "for OVE with no source expression"); Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr()); } } OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, LValue lvalue) : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) { } OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *opaqueValue, RValue rvalue) : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) { } void pop() { Data.unbind(CGF); Data.clear(); } ~OpaqueValueMapping() { if (Data.isValid()) Data.unbind(CGF); } }; private: CGDebugInfo *DebugInfo; /// Used to create unique names for artificial VLA size debug info variables. unsigned VLAExprCounter = 0; bool DisableDebugInfo = false; /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid /// calling llvm.stacksave for multiple VLAs in the same scope. bool DidCallStackSave = false; /// IndirectBranch - The first time an indirect goto is seen we create a block /// with an indirect branch. Every time we see the address of a label taken, /// we add the label to the indirect goto. Every subsequent indirect goto is /// codegen'd as a jump to the IndirectBranch's basic block. llvm::IndirectBrInst *IndirectBranch = nullptr; /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C /// decls. DeclMapTy LocalDeclMap; // Keep track of the cleanups for callee-destructed parameters pushed to the // cleanup stack so that they can be deactivated later. llvm::DenseMap CalleeDestructedParamCleanups; /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this /// will contain a mapping from said ParmVarDecl to its implicit "object_size" /// parameter. llvm::SmallDenseMap SizeArguments; /// Track escaped local variables with auto storage. Used during SEH /// outlining to produce a call to llvm.localescape. llvm::DenseMap EscapedLocals; /// LabelMap - This keeps track of the LLVM basic block for each C label. llvm::DenseMap LabelMap; // BreakContinueStack - This keeps track of where break and continue // statements should jump to. struct BreakContinue { BreakContinue(JumpDest Break, JumpDest Continue) : BreakBlock(Break), ContinueBlock(Continue) {} JumpDest BreakBlock; JumpDest ContinueBlock; }; SmallVector BreakContinueStack; /// Handles cancellation exit points in OpenMP-related constructs. class OpenMPCancelExitStack { /// Tracks cancellation exit point and join point for cancel-related exit /// and normal exit. struct CancelExit { CancelExit() = default; CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock, JumpDest ContBlock) : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {} OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown; /// true if the exit block has been emitted already by the special /// emitExit() call, false if the default codegen is used. bool HasBeenEmitted = false; JumpDest ExitBlock; JumpDest ContBlock; }; SmallVector Stack; public: OpenMPCancelExitStack() : Stack(1) {} ~OpenMPCancelExitStack() = default; /// Fetches the exit block for the current OpenMP construct. JumpDest getExitBlock() const { return Stack.back().ExitBlock; } /// Emits exit block with special codegen procedure specific for the related /// OpenMP construct + emits code for normal construct cleanup. void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, const llvm::function_ref CodeGen) { if (Stack.back().Kind == Kind && getExitBlock().isValid()) { assert(CGF.getOMPCancelDestination(Kind).isValid()); assert(CGF.HaveInsertPoint()); assert(!Stack.back().HasBeenEmitted); auto IP = CGF.Builder.saveAndClearIP(); CGF.EmitBlock(Stack.back().ExitBlock.getBlock()); CodeGen(CGF); CGF.EmitBranch(Stack.back().ContBlock.getBlock()); CGF.Builder.restoreIP(IP); Stack.back().HasBeenEmitted = true; } CodeGen(CGF); } /// Enter the cancel supporting \a Kind construct. /// \param Kind OpenMP directive that supports cancel constructs. /// \param HasCancel true, if the construct has inner cancel directive, /// false otherwise. void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) { Stack.push_back({Kind, HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit") : JumpDest(), HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont") : JumpDest()}); } /// Emits default exit point for the cancel construct (if the special one /// has not be used) + join point for cancel/normal exits. void exit(CodeGenFunction &CGF) { if (getExitBlock().isValid()) { assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid()); bool HaveIP = CGF.HaveInsertPoint(); if (!Stack.back().HasBeenEmitted) { if (HaveIP) CGF.EmitBranchThroughCleanup(Stack.back().ContBlock); CGF.EmitBlock(Stack.back().ExitBlock.getBlock()); CGF.EmitBranchThroughCleanup(Stack.back().ContBlock); } CGF.EmitBlock(Stack.back().ContBlock.getBlock()); if (!HaveIP) { CGF.Builder.CreateUnreachable(); CGF.Builder.ClearInsertionPoint(); } } Stack.pop_back(); } }; OpenMPCancelExitStack OMPCancelStack; CodeGenPGO PGO; /// Calculate branch weights appropriate for PGO data llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount); llvm::MDNode *createProfileWeights(ArrayRef Weights); llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond, uint64_t LoopCount); public: /// Increment the profiler's counter for the given statement by \p StepV. /// If \p StepV is null, the default increment is 1. void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) { if (CGM.getCodeGenOpts().hasProfileClangInstr()) PGO.emitCounterIncrement(Builder, S, StepV); PGO.setCurrentStmt(S); } /// Get the profiler's count for the given statement. uint64_t getProfileCount(const Stmt *S) { Optional Count = PGO.getStmtCount(S); if (!Count.hasValue()) return 0; return *Count; } /// Set the profiler's current count. void setCurrentProfileCount(uint64_t Count) { PGO.setCurrentRegionCount(Count); } /// Get the profiler's current count. This is generally the count for the most /// recently incremented counter. uint64_t getCurrentProfileCount() { return PGO.getCurrentRegionCount(); } private: /// SwitchInsn - This is nearest current switch instruction. It is null if /// current context is not in a switch. llvm::SwitchInst *SwitchInsn = nullptr; /// The branch weights of SwitchInsn when doing instrumentation based PGO. SmallVector *SwitchWeights = nullptr; /// CaseRangeBlock - This block holds if condition check for last case /// statement range in current switch instruction. llvm::BasicBlock *CaseRangeBlock = nullptr; /// OpaqueLValues - Keeps track of the current set of opaque value /// expressions. llvm::DenseMap OpaqueLValues; llvm::DenseMap OpaqueRValues; // VLASizeMap - This keeps track of the associated size for each VLA type. // We track this by the size expression rather than the type itself because // in certain situations, like a const qualifier applied to an VLA typedef, // multiple VLA types can share the same size expression. // FIXME: Maybe this could be a stack of maps that is pushed/popped as we // enter/leave scopes. llvm::DenseMap VLASizeMap; /// A block containing a single 'unreachable' instruction. Created /// lazily by getUnreachableBlock(). llvm::BasicBlock *UnreachableBlock = nullptr; /// Counts of the number return expressions in the function. unsigned NumReturnExprs = 0; /// Count the number of simple (constant) return expressions in the function. unsigned NumSimpleReturnExprs = 0; /// The last regular (non-return) debug location (breakpoint) in the function. SourceLocation LastStopPoint; public: /// Source location information about the default argument or member /// initializer expression we're evaluating, if any. CurrentSourceLocExprScope CurSourceLocExprScope; using SourceLocExprScopeGuard = CurrentSourceLocExprScope::SourceLocExprScopeGuard; /// A scope within which we are constructing the fields of an object which /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use /// if we need to evaluate a CXXDefaultInitExpr within the evaluation. class FieldConstructionScope { public: FieldConstructionScope(CodeGenFunction &CGF, Address This) : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) { CGF.CXXDefaultInitExprThis = This; } ~FieldConstructionScope() { CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis; } private: CodeGenFunction &CGF; Address OldCXXDefaultInitExprThis; }; /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this' /// is overridden to be the object under construction. class CXXDefaultInitExprScope { public: CXXDefaultInitExprScope(CodeGenFunction &CGF, const CXXDefaultInitExpr *E) : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue), OldCXXThisAlignment(CGF.CXXThisAlignment), SourceLocScope(E, CGF.CurSourceLocExprScope) { CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer(); CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment(); } ~CXXDefaultInitExprScope() { CGF.CXXThisValue = OldCXXThisValue; CGF.CXXThisAlignment = OldCXXThisAlignment; } public: CodeGenFunction &CGF; llvm::Value *OldCXXThisValue; CharUnits OldCXXThisAlignment; SourceLocExprScopeGuard SourceLocScope; }; struct CXXDefaultArgExprScope : SourceLocExprScopeGuard { CXXDefaultArgExprScope(CodeGenFunction &CGF, const CXXDefaultArgExpr *E) : SourceLocExprScopeGuard(E, CGF.CurSourceLocExprScope) {} }; /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the /// current loop index is overridden. class ArrayInitLoopExprScope { public: ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index) : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) { CGF.ArrayInitIndex = Index; } ~ArrayInitLoopExprScope() { CGF.ArrayInitIndex = OldArrayInitIndex; } private: CodeGenFunction &CGF; llvm::Value *OldArrayInitIndex; }; class InlinedInheritingConstructorScope { public: InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD) : CGF(CGF), OldCurGD(CGF.CurGD), OldCurFuncDecl(CGF.CurFuncDecl), OldCurCodeDecl(CGF.CurCodeDecl), OldCXXABIThisDecl(CGF.CXXABIThisDecl), OldCXXABIThisValue(CGF.CXXABIThisValue), OldCXXThisValue(CGF.CXXThisValue), OldCXXABIThisAlignment(CGF.CXXABIThisAlignment), OldCXXThisAlignment(CGF.CXXThisAlignment), OldReturnValue(CGF.ReturnValue), OldFnRetTy(CGF.FnRetTy), OldCXXInheritedCtorInitExprArgs( std::move(CGF.CXXInheritedCtorInitExprArgs)) { CGF.CurGD = GD; CGF.CurFuncDecl = CGF.CurCodeDecl = cast(GD.getDecl()); CGF.CXXABIThisDecl = nullptr; CGF.CXXABIThisValue = nullptr; CGF.CXXThisValue = nullptr; CGF.CXXABIThisAlignment = CharUnits(); CGF.CXXThisAlignment = CharUnits(); CGF.ReturnValue = Address::invalid(); CGF.FnRetTy = QualType(); CGF.CXXInheritedCtorInitExprArgs.clear(); } ~InlinedInheritingConstructorScope() { CGF.CurGD = OldCurGD; CGF.CurFuncDecl = OldCurFuncDecl; CGF.CurCodeDecl = OldCurCodeDecl; CGF.CXXABIThisDecl = OldCXXABIThisDecl; CGF.CXXABIThisValue = OldCXXABIThisValue; CGF.CXXThisValue = OldCXXThisValue; CGF.CXXABIThisAlignment = OldCXXABIThisAlignment; CGF.CXXThisAlignment = OldCXXThisAlignment; CGF.ReturnValue = OldReturnValue; CGF.FnRetTy = OldFnRetTy; CGF.CXXInheritedCtorInitExprArgs = std::move(OldCXXInheritedCtorInitExprArgs); } private: CodeGenFunction &CGF; GlobalDecl OldCurGD; const Decl *OldCurFuncDecl; const Decl *OldCurCodeDecl; ImplicitParamDecl *OldCXXABIThisDecl; llvm::Value *OldCXXABIThisValue; llvm::Value *OldCXXThisValue; CharUnits OldCXXABIThisAlignment; CharUnits OldCXXThisAlignment; Address OldReturnValue; QualType OldFnRetTy; CallArgList OldCXXInheritedCtorInitExprArgs; }; // Helper class for the OpenMP IR Builder. Allows reusability of code used for // region body, and finalization codegen callbacks. This will class will also // contain privatization functions used by the privatization call backs // // TODO: this is temporary class for things that are being moved out of // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or // utility function for use with the OMPBuilder. Once that move to use the // OMPBuilder is done, everything here will either become part of CodeGenFunc. // directly, or a new helper class that will contain functions used by both // this and the OMPBuilder struct OMPBuilderCBHelpers { OMPBuilderCBHelpers() = delete; OMPBuilderCBHelpers(const OMPBuilderCBHelpers &) = delete; OMPBuilderCBHelpers &operator=(const OMPBuilderCBHelpers &) = delete; using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; /// Cleanup action for allocate support. class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup { private: llvm::CallInst *RTLFnCI; public: OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) { RLFnCI->removeFromParent(); } void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { if (!CGF.HaveInsertPoint()) return; CGF.Builder.Insert(RTLFnCI); } }; /// Returns address of the threadprivate variable for the current /// thread. This Also create any necessary OMP runtime calls. /// /// \param VD VarDecl for Threadprivate variable. /// \param VDAddr Address of the Vardecl /// \param Loc The location where the barrier directive was encountered static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Gets the OpenMP-specific address of the local variable /p VD. static Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutinve /// parts of the name static std::string getNameWithSeparators(ArrayRef Parts, StringRef FirstSeparator = ".", StringRef Separator = "."); /// Emit the Finalization for an OMP region /// \param CGF The Codegen function this belongs to /// \param IP Insertion point for generating the finalization code. static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP) { CGBuilderTy::InsertPointGuard IPG(CGF.Builder); assert(IP.getBlock()->end() != IP.getPoint() && "OpenMP IR Builder should cause terminated block!"); llvm::BasicBlock *IPBB = IP.getBlock(); llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor(); assert(DestBB && "Finalization block should have one successor!"); // erase and replace with cleanup branch. IPBB->getTerminator()->eraseFromParent(); CGF.Builder.SetInsertPoint(IPBB); CodeGenFunction::JumpDest Dest = CGF.getJumpDestInCurrentScope(DestBB); CGF.EmitBranchThroughCleanup(Dest); } /// Emit the body of an OMP region /// \param CGF The Codegen function this belongs to /// \param RegionBodyStmt The body statement for the OpenMP region being /// generated /// \param CodeGenIP Insertion point for generating the body code. /// \param FiniBB The finalization basic block static void EmitOMPRegionBody(CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy CodeGenIP, llvm::BasicBlock &FiniBB) { llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock(); if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator()) CodeGenIPBBTI->eraseFromParent(); CGF.Builder.SetInsertPoint(CodeGenIPBB); CGF.EmitStmt(RegionBodyStmt); if (CGF.Builder.saveIP().isSet()) CGF.Builder.CreateBr(&FiniBB); } /// RAII for preserving necessary info during Outlined region body codegen. class OutlinedRegionBodyRAII { llvm::AssertingVH OldAllocaIP; CodeGenFunction::JumpDest OldReturnBlock; CGBuilderTy::InsertPoint IP; CodeGenFunction &CGF; public: OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &RetBB) : CGF(cgf) { assert(AllocaIP.isSet() && "Must specify Insertion point for allocas of outlined function"); OldAllocaIP = CGF.AllocaInsertPt; CGF.AllocaInsertPt = &*AllocaIP.getPoint(); IP = CGF.Builder.saveIP(); OldReturnBlock = CGF.ReturnBlock; CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB); } ~OutlinedRegionBodyRAII() { CGF.AllocaInsertPt = OldAllocaIP; CGF.ReturnBlock = OldReturnBlock; CGF.Builder.restoreIP(IP); } }; /// RAII for preserving necessary info during inlined region body codegen. class InlinedRegionBodyRAII { llvm::AssertingVH OldAllocaIP; CodeGenFunction &CGF; public: InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, llvm::BasicBlock &FiniBB) : CGF(cgf) { // Alloca insertion block should be in the entry block of the containing // function so it expects an empty AllocaIP in which case will reuse the // old alloca insertion point, or a new AllocaIP in the same block as // the old one assert((!AllocaIP.isSet() || CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) && "Insertion point should be in the entry block of containing " "function!"); OldAllocaIP = CGF.AllocaInsertPt; if (AllocaIP.isSet()) CGF.AllocaInsertPt = &*AllocaIP.getPoint(); // TODO: Remove the call, after making sure the counter is not used by // the EHStack. // Since this is an inlined region, it should not modify the // ReturnBlock, and should reuse the one for the enclosing outlined // region. So, the JumpDest being return by the function is discarded (void)CGF.getJumpDestInCurrentScope(&FiniBB); } ~InlinedRegionBodyRAII() { CGF.AllocaInsertPt = OldAllocaIP; } }; }; private: /// CXXThisDecl - When generating code for a C++ member function, /// this will hold the implicit 'this' declaration. ImplicitParamDecl *CXXABIThisDecl = nullptr; llvm::Value *CXXABIThisValue = nullptr; llvm::Value *CXXThisValue = nullptr; CharUnits CXXABIThisAlignment; CharUnits CXXThisAlignment; /// The value of 'this' to use when evaluating CXXDefaultInitExprs within /// this expression. Address CXXDefaultInitExprThis = Address::invalid(); /// The current array initialization index when evaluating an /// ArrayInitIndexExpr within an ArrayInitLoopExpr. llvm::Value *ArrayInitIndex = nullptr; /// The values of function arguments to use when evaluating /// CXXInheritedCtorInitExprs within this context. CallArgList CXXInheritedCtorInitExprArgs; /// CXXStructorImplicitParamDecl - When generating code for a constructor or /// destructor, this will hold the implicit argument (e.g. VTT). ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr; llvm::Value *CXXStructorImplicitParamValue = nullptr; /// OutermostConditional - Points to the outermost active /// conditional control. This is used so that we know if a /// temporary should be destroyed conditionally. ConditionalEvaluation *OutermostConditional = nullptr; /// The current lexical scope. LexicalScope *CurLexicalScope = nullptr; /// The current source location that should be used for exception /// handling code. SourceLocation CurEHLocation; /// BlockByrefInfos - For each __block variable, contains /// information about the layout of the variable. llvm::DenseMap BlockByrefInfos; /// Used by -fsanitize=nullability-return to determine whether the return /// value can be checked. llvm::Value *RetValNullabilityPrecondition = nullptr; /// Check if -fsanitize=nullability-return instrumentation is required for /// this function. bool requiresReturnValueNullabilityCheck() const { return RetValNullabilityPrecondition; } /// Used to store precise source locations for return statements by the /// runtime return value checks. Address ReturnLocation = Address::invalid(); /// Check if the return value of this function requires sanitization. bool requiresReturnValueCheck() const; llvm::BasicBlock *TerminateLandingPad = nullptr; llvm::BasicBlock *TerminateHandler = nullptr; llvm::BasicBlock *TrapBB = nullptr; /// Terminate funclets keyed by parent funclet pad. llvm::MapVector TerminateFunclets; /// Largest vector width used in ths function. Will be used to create a /// function attribute. unsigned LargestVectorWidth = 0; /// True if we need emit the life-time markers. const bool ShouldEmitLifetimeMarkers; /// Add OpenCL kernel arg metadata and the kernel attribute metadata to /// the function metadata. void EmitOpenCLKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn); public: CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false); ~CodeGenFunction(); CodeGenTypes &getTypes() const { return CGM.getTypes(); } ASTContext &getContext() const { return CGM.getContext(); } CGDebugInfo *getDebugInfo() { if (DisableDebugInfo) return nullptr; return DebugInfo; } void disableDebugInfo() { DisableDebugInfo = true; } void enableDebugInfo() { DisableDebugInfo = false; } bool shouldUseFusedARCCalls() { return CGM.getCodeGenOpts().OptimizationLevel == 0; } const LangOptions &getLangOpts() const { return CGM.getLangOpts(); } /// Returns a pointer to the function's exception object and selector slot, /// which is assigned in every landing pad. Address getExceptionSlot(); Address getEHSelectorSlot(); /// Returns the contents of the function's exception object and selector /// slots. llvm::Value *getExceptionFromSlot(); llvm::Value *getSelectorFromSlot(); Address getNormalCleanupDestSlot(); llvm::BasicBlock *getUnreachableBlock() { if (!UnreachableBlock) { UnreachableBlock = createBasicBlock("unreachable"); new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock); } return UnreachableBlock; } llvm::BasicBlock *getInvokeDest() { if (!EHStack.requiresLandingPad()) return nullptr; return getInvokeDestImpl(); } bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; } const TargetInfo &getTarget() const { return Target; } llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); } const TargetCodeGenInfo &getTargetHooks() const { return CGM.getTargetCodeGenInfo(); } //===--------------------------------------------------------------------===// // Cleanups //===--------------------------------------------------------------------===// typedef void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty); void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer); void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, llvm::Value *arrayEnd, QualType elementType, CharUnits elementAlignment, Destroyer *destroyer); void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type); void pushEHDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type); void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr, QualType ElementType); void pushStackRestore(CleanupKind kind, Address SPMem); void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); llvm::Function *generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD); void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, QualType elementType, CharUnits elementAlign, Destroyer *destroyer, bool checkZeroLength, bool useEHCleanup); Destroyer *getDestroyer(QualType::DestructionKind destructionKind); /// Determines whether an EH cleanup is required to destroy a type /// with the given destruction kind. bool needsEHCleanup(QualType::DestructionKind kind) { switch (kind) { case QualType::DK_none: return false; case QualType::DK_cxx_destructor: case QualType::DK_objc_weak_lifetime: case QualType::DK_nontrivial_c_struct: return getLangOpts().Exceptions; case QualType::DK_objc_strong_lifetime: return getLangOpts().Exceptions && CGM.getCodeGenOpts().ObjCAutoRefCountExceptions; } llvm_unreachable("bad destruction kind"); } CleanupKind getCleanupKind(QualType::DestructionKind kind) { return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup); } //===--------------------------------------------------------------------===// // Objective-C //===--------------------------------------------------------------------===// void GenerateObjCMethod(const ObjCMethodDecl *OMD); void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD); /// GenerateObjCGetter - Synthesize an Objective-C property getter function. void GenerateObjCGetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID); void generateObjCGetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, const ObjCMethodDecl *GetterMothodDecl, llvm::Constant *AtomicHelperFn); void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, ObjCMethodDecl *MD, bool ctor); /// GenerateObjCSetter - Synthesize an Objective-C property setter function /// for the given property. void GenerateObjCSetter(ObjCImplementationDecl *IMP, const ObjCPropertyImplDecl *PID); void generateObjCSetterBody(const ObjCImplementationDecl *classImpl, const ObjCPropertyImplDecl *propImpl, llvm::Constant *AtomicHelperFn); //===--------------------------------------------------------------------===// // Block Bits //===--------------------------------------------------------------------===// /// Emit block literal. /// \return an LLVM value which is a pointer to a struct which contains /// information about the block, including the block invoke function, the /// captured variables, etc. llvm::Value *EmitBlockLiteral(const BlockExpr *); llvm::Function *GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &Info, const DeclMapTy &ldm, bool IsLambdaConversionToBlock, bool BuildGlobalBlock); /// Check if \p T is a C++ class that has a destructor that can throw. static bool cxxDestructorCanThrow(QualType T); llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo); llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo); llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction( const ObjCPropertyImplDecl *PID); llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction( const ObjCPropertyImplDecl *PID); llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty); void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags, bool CanThrow); class AutoVarEmission; void emitByrefStructureInit(const AutoVarEmission &emission); /// Enter a cleanup to destroy a __block variable. Note that this /// cleanup should be a no-op if the variable hasn't left the stack /// yet; if a cleanup is required for the variable itself, that needs /// to be done externally. /// /// \param Kind Cleanup kind. /// /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block /// structure that will be passed to _Block_object_dispose. When /// \p LoadBlockVarAddr is true, the address of the field of the block /// structure that holds the address of the __block structure. /// /// \param Flags The flag that will be passed to _Block_object_dispose. /// /// \param LoadBlockVarAddr Indicates whether we need to emit a load from /// \p Addr to get the address of the __block structure. void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags, bool LoadBlockVarAddr, bool CanThrow); void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum, llvm::Value *ptr); Address LoadBlockStruct(); Address GetAddrOfBlockDecl(const VarDecl *var); /// BuildBlockByrefAddress - Computes the location of the /// data in a variable which is declared as __block. Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward = true); Address emitBlockByrefAddress(Address baseAddr, const BlockByrefInfo &info, bool followForward, const llvm::Twine &name); const BlockByrefInfo &getBlockByrefInfo(const VarDecl *var); QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args); void GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo); /// Annotate the function with an attribute that disables TSan checking at /// runtime. void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn); /// Emit code for the start of a function. /// \param Loc The location to be associated with the function. /// \param StartLoc The location of the function body. void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc = SourceLocation(), SourceLocation StartLoc = SourceLocation()); static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor); void EmitConstructorBody(FunctionArgList &Args); void EmitDestructorBody(FunctionArgList &Args); void emitImplicitAssignmentOperatorBody(FunctionArgList &Args); void EmitFunctionBody(const Stmt *Body); void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S); void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, CallArgList &CallArgs); void EmitLambdaBlockInvokeBody(); void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD); void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV) { EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV); } void EmitAsanPrologueOrEpilogue(bool Prologue); /// Emit the unified return block, trying to avoid its emission when /// possible. /// \return The debug location of the user written return statement if the /// return block is is avoided. llvm::DebugLoc EmitReturnBlock(); /// FinishFunction - Complete IR generation of the current function. It is /// legal to call this function even if there is no current insertion point. void FinishFunction(SourceLocation EndLoc=SourceLocation()); void StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo, bool IsUnprototyped); void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee, const ThunkInfo *Thunk, bool IsUnprototyped); void FinishThunk(); /// Emit a musttail call for a thunk with a potentially adjusted this pointer. void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr, llvm::FunctionCallee Callee); /// Generate a thunk for the given method. void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk, bool IsUnprototyped); llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk); void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type, FunctionArgList &Args); void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init); /// Struct with all information about dynamic [sub]class needed to set vptr. struct VPtr { BaseSubobject Base; const CXXRecordDecl *NearestVBase; CharUnits OffsetFromNearestVBase; const CXXRecordDecl *VTableClass; }; /// Initialize the vtable pointer of the given subobject. void InitializeVTablePointer(const VPtr &vptr); typedef llvm::SmallVector VPtrsVector; typedef llvm::SmallPtrSet VisitedVirtualBasesSetTy; VPtrsVector getVTablePointers(const CXXRecordDecl *VTableClass); void getVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, bool BaseIsNonVirtualPrimaryBase, const CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy &VBases, VPtrsVector &vptrs); void InitializeVTablePointers(const CXXRecordDecl *ClassDecl); /// GetVTablePtr - Return the Value of the vtable pointer member pointed /// to by This. llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass); enum CFITypeCheckKind { CFITCK_VCall, CFITCK_NVCall, CFITCK_DerivedCast, CFITCK_UnrelatedCast, CFITCK_ICall, CFITCK_NVMFCall, CFITCK_VMFCall, }; /// Derived is the presumed address of an object of type T after a /// cast. If T is a polymorphic class type, emit a check that the virtual /// table for Derived belongs to a class derived from T. void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc); /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable. /// If vptr CFI is enabled, emit a check that VTable is valid. void EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc); /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for /// RD using llvm.type.test. void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc); /// If whole-program virtual table optimization is enabled, emit an assumption /// that VTable is a member of RD's type identifier. Or, if vptr CFI is /// enabled, emit a check that VTable is a member of RD's type identifier. void EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, llvm::Value *VTable, SourceLocation Loc); /// Returns whether we should perform a type checked load when loading a /// virtual function for virtual calls to members of RD. This is generally /// true when both vcall CFI and whole-program-vtables are enabled. bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD); /// Emit a type checked load from the given vtable. llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable, uint64_t VTableByteOffset); /// EnterDtorCleanups - Enter the cleanups necessary to complete the /// given phase of destruction for a destructor. The end result /// should call destructors on members and base classes in reverse /// order of their construction. void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type); /// ShouldInstrumentFunction - Return true if the current function should be /// instrumented with __cyg_profile_func_* calls bool ShouldInstrumentFunction(); /// ShouldXRayInstrument - Return true if the current function should be /// instrumented with XRay nop sleds. bool ShouldXRayInstrumentFunction() const; /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit /// XRay custom event handling calls. bool AlwaysEmitXRayCustomEvents() const; /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit /// XRay typed event handling calls. bool AlwaysEmitXRayTypedEvents() const; /// Encode an address into a form suitable for use in a function prologue. llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F, llvm::Constant *Addr); /// Decode an address used in a function prologue, encoded by \c /// EncodeAddrForUseInPrologue. llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F, llvm::Value *EncodedAddr); /// EmitFunctionProlog - Emit the target specific LLVM code to load the /// arguments for the given function. This is also responsible for naming the /// LLVM function arguments. void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args); /// EmitFunctionEpilog - Emit the target specific LLVM code to return the /// given temporary. void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc); /// Emit a test that checks if the return value \p RV is nonnull. void EmitReturnValueCheck(llvm::Value *RV); /// EmitStartEHSpec - Emit the start of the exception spec. void EmitStartEHSpec(const Decl *D); /// EmitEndEHSpec - Emit the end of the exception spec. void EmitEndEHSpec(const Decl *D); /// getTerminateLandingPad - Return a landing pad that just calls terminate. llvm::BasicBlock *getTerminateLandingPad(); /// getTerminateLandingPad - Return a cleanup funclet that just calls /// terminate. llvm::BasicBlock *getTerminateFunclet(); /// getTerminateHandler - Return a handler (not a landing pad, just /// a catch handler) that just calls terminate. This is used when /// a terminate scope encloses a try. llvm::BasicBlock *getTerminateHandler(); llvm::Type *ConvertTypeForMem(QualType T); llvm::Type *ConvertType(QualType T); llvm::Type *ConvertType(const TypeDecl *T) { return ConvertType(getContext().getTypeDeclType(T)); } /// LoadObjCSelf - Load the value of self. This function is only valid while /// generating code for an Objective-C method. llvm::Value *LoadObjCSelf(); /// TypeOfSelfObject - Return type of object that this self represents. QualType TypeOfSelfObject(); /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T. static TypeEvaluationKind getEvaluationKind(QualType T); static bool hasScalarEvaluationKind(QualType T) { return getEvaluationKind(T) == TEK_Scalar; } static bool hasAggregateEvaluationKind(QualType T) { return getEvaluationKind(T) == TEK_Aggregate; } /// createBasicBlock - Create an LLVM basic block. llvm::BasicBlock *createBasicBlock(const Twine &name = "", llvm::Function *parent = nullptr, llvm::BasicBlock *before = nullptr) { return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before); } /// getBasicBlockForLabel - Return the LLVM basicblock that the specified /// label maps to. JumpDest getJumpDestForLabel(const LabelDecl *S); /// SimplifyForwardingBlocks - If the given basic block is only a branch to /// another basic block, simplify it. This assumes that no other code could /// potentially reference the basic block. void SimplifyForwardingBlocks(llvm::BasicBlock *BB); /// EmitBlock - Emit the given block \arg BB and set it as the insert point, /// adding a fall-through branch from the current insert block if /// necessary. It is legal to call this function even if there is no current /// insertion point. /// /// IsFinished - If true, indicates that the caller has finished emitting /// branches to the given block and does not expect to emit code into it. This /// means the block can be ignored if it is unreachable. void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false); /// EmitBlockAfterUses - Emit the given block somewhere hopefully /// near its uses, and leave the insertion point in it. void EmitBlockAfterUses(llvm::BasicBlock *BB); /// EmitBranch - Emit a branch to the specified basic block from the current /// insert block, taking care to avoid creation of branches from dummy /// blocks. It is legal to call this function even if there is no current /// insertion point. /// /// This function clears the current insertion point. The caller should follow /// calls to this function with calls to Emit*Block prior to generation new /// code. void EmitBranch(llvm::BasicBlock *Block); /// HaveInsertPoint - True if an insertion point is defined. If not, this /// indicates that the current code being emitted is unreachable. bool HaveInsertPoint() const { return Builder.GetInsertBlock() != nullptr; } /// EnsureInsertPoint - Ensure that an insertion point is defined so that /// emitted IR has a place to go. Note that by definition, if this function /// creates a block then that block is unreachable; callers may do better to /// detect when no insertion point is defined and simply skip IR generation. void EnsureInsertPoint() { if (!HaveInsertPoint()) EmitBlock(createBasicBlock()); } /// ErrorUnsupported - Print out an error that codegen doesn't support the /// specified stmt yet. void ErrorUnsupported(const Stmt *S, const char *Type); //===--------------------------------------------------------------------===// // Helpers //===--------------------------------------------------------------------===// LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source = AlignmentSource::Type) { return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T)); } LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) { return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo); } LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source = AlignmentSource::Type) { return LValue::MakeAddr(Address(V, Alignment), T, getContext(), LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T)); } LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) { return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, TBAAInfo); } LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T); LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T); Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo = nullptr, TBAAAccessInfo *PointeeTBAAInfo = nullptr); LValue EmitLoadOfReferenceLValue(LValue RefLVal); LValue EmitLoadOfReferenceLValue(Address RefAddr, QualType RefTy, AlignmentSource Source = AlignmentSource::Type) { LValue RefLVal = MakeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source), CGM.getTBAAAccessInfo(RefTy)); return EmitLoadOfReferenceLValue(RefLVal); } Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr); LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy); /// CreateTempAlloca - This creates an alloca and inserts it into the entry /// block if \p ArraySize is nullptr, otherwise inserts it at the current /// insertion point of the builder. The caller is responsible for setting an /// appropriate alignment on /// the alloca. /// /// \p ArraySize is the number of array elements to be allocated if it /// is not nullptr. /// /// LangAS::Default is the address space of pointers to local variables and /// temporaries, as exposed in the source language. In certain /// configurations, this is not the same as the alloca address space, and a /// cast is needed to lift the pointer from the alloca AS into /// LangAS::Default. This can happen when the target uses a restricted /// address space for the stack but the source language requires /// LangAS::Default to be a generic address space. The latter condition is /// common for most programming languages; OpenCL is an exception in that /// LangAS::Default is the private address space, which naturally maps /// to the stack. /// /// Because the address of a temporary is often exposed to the program in /// various ways, this function will perform the cast. The original alloca /// instruction is returned through \p Alloca if it is not nullptr. /// /// The cast is not performaed in CreateTempAllocaWithoutCast. This is /// more efficient if the caller knows that the address will not be exposed. llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp", llvm::Value *ArraySize = nullptr); Address CreateTempAlloca(llvm::Type *Ty, CharUnits align, const Twine &Name = "tmp", llvm::Value *ArraySize = nullptr, Address *Alloca = nullptr); Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name = "tmp", llvm::Value *ArraySize = nullptr); /// CreateDefaultAlignedTempAlloca - This creates an alloca with the /// default ABI alignment of the given LLVM type. /// /// IMPORTANT NOTE: This is *not* generally the right alignment for /// any given AST type that happens to have been lowered to the /// given IR type. This should only ever be used for function-local, /// IR-driven manipulations like saving and restoring a value. Do /// not hand this address off to arbitrary IRGen routines, and especially /// do not pass it as an argument to a function that might expect a /// properly ABI-aligned value. Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp"); /// InitTempAlloca - Provide an initial value for the given alloca which /// will be observable at all locations in the function. /// /// The address should be something that was returned from one of /// the CreateTempAlloca or CreateMemTemp routines, and the /// initializer must be valid in the entry block (i.e. it must /// either be a constant or an argument value). void InitTempAlloca(Address Alloca, llvm::Value *Value); /// CreateIRTemp - Create a temporary IR object of the given type, with /// appropriate alignment. This routine should only be used when an temporary /// value needs to be stored into an alloca (for example, to avoid explicit /// PHI construction), but the type is the IR type, not the type appropriate /// for storing in memory. /// /// That is, this is exactly equivalent to CreateMemTemp, but calling /// ConvertType instead of ConvertTypeForMem. Address CreateIRTemp(QualType T, const Twine &Name = "tmp"); /// CreateMemTemp - Create a temporary memory object of the given type, with /// appropriate alignmen and cast it to the default address space. Returns /// the original alloca instruction by \p Alloca if it is not nullptr. Address CreateMemTemp(QualType T, const Twine &Name = "tmp", Address *Alloca = nullptr); Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp", Address *Alloca = nullptr); /// CreateMemTemp - Create a temporary memory object of the given type, with /// appropriate alignmen without casting it to the default address space. Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp"); Address CreateMemTempWithoutCast(QualType T, CharUnits Align, const Twine &Name = "tmp"); /// CreateAggTemp - Create a temporary memory object for the given /// aggregate type. AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp", Address *Alloca = nullptr) { return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca), T.getQualifiers(), AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap); } /// Emit a cast to void* in the appropriate address space. llvm::Value *EmitCastToVoidPtr(llvm::Value *value); /// EvaluateExprAsBool - Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. llvm::Value *EvaluateExprAsBool(const Expr *E); /// EmitIgnoredExpr - Emit an expression in a context which ignores the result. void EmitIgnoredExpr(const Expr *E); /// EmitAnyExpr - Emit code to compute the specified expression which can have /// any type. The result is returned as an RValue struct. If this is an /// aggregate expression, the aggloc/agglocvolatile arguments indicate where /// the result should be returned. /// /// \param ignoreResult True if the resulting value isn't used. RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); // EmitVAListRef - Emit a "reference" to a va_list; this is either the address // or the value of the expression, depending on how va_list is defined. Address EmitVAListRef(const Expr *E); /// Emit a "reference" to a __builtin_ms_va_list; this is /// always the value of the expression, because a __builtin_ms_va_list is a /// pointer to a char. Address EmitMSVAListRef(const Expr *E); /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will /// always be accessible even if no aggregate location is provided. RValue EmitAnyExprToTemp(const Expr *E); /// EmitAnyExprToMem - Emits the code necessary to evaluate an /// arbitrary expression into the given memory location. void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer); void EmitAnyExprToExn(const Expr *E, Address Addr); /// EmitExprAsInit - Emits the code necessary to initialize a /// location in memory with the given initializer. void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit); /// hasVolatileMember - returns true if aggregate type has a volatile /// member. bool hasVolatileMember(QualType T) { if (const RecordType *RT = T->getAs()) { const RecordDecl *RD = cast(RT->getDecl()); return RD->hasVolatileMember(); } return false; } /// Determine whether a return value slot may overlap some other object. AggValueSlot::Overlap_t getOverlapForReturnValue() { // FIXME: Assuming no overlap here breaks guaranteed copy elision for base // class subobjects. These cases may need to be revisited depending on the // resolution of the relevant core issue. return AggValueSlot::DoesNotOverlap; } /// Determine whether a field initialization may overlap some other object. AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); /// Determine whether a base class initialization may overlap some other /// object. AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual); /// Emit an aggregate assignment. void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) { bool IsVolatile = hasVolatileMember(EltTy); EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); } void EmitAggregateCopyCtor(LValue Dest, LValue Src, AggValueSlot::Overlap_t MayOverlap) { EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap); } /// EmitAggregateCopy - Emit an aggregate copy. /// /// \param isVolatile \c true iff either the source or the destination is /// volatile. /// \param MayOverlap Whether the tail padding of the destination might be /// occupied by some other object. More efficient code can often be /// generated if not. void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile = false); /// GetAddrOfLocalVar - Return the address of a local variable. Address GetAddrOfLocalVar(const VarDecl *VD) { auto it = LocalDeclMap.find(VD); assert(it != LocalDeclMap.end() && "Invalid argument to GetAddrOfLocalVar(), no decl!"); return it->second; } /// Given an opaque value expression, return its LValue mapping if it exists, /// otherwise create one. LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); /// Given an opaque value expression, return its RValue mapping if it exists, /// otherwise create one. RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e); /// Get the index of the current ArrayInitLoopExpr, if any. llvm::Value *getArrayInitIndex() { return ArrayInitIndex; } /// getAccessedFieldNo - Given an encoded value and a result number, return /// the input field number being accessed. static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts); llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L); llvm::BasicBlock *GetIndirectGotoBlock(); /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. static bool IsWrappedCXXThis(const Expr *E); /// EmitNullInitialization - Generate code to set a value of the given type to /// null, If the type contains data member pointers, they will be initialized /// to -1 in accordance with the Itanium C++ ABI. void EmitNullInitialization(Address DestPtr, QualType Ty); /// Emits a call to an LLVM variable-argument intrinsic, either /// \c llvm.va_start or \c llvm.va_end. /// \param ArgValue A reference to the \c va_list as emitted by either /// \c EmitVAListRef or \c EmitMSVAListRef. /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise, /// calls \c llvm.va_end. llvm::Value *EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart); /// Generate code to get an argument from the passed in pointer /// and update it accordingly. /// \param VE The \c VAArgExpr for which to generate code. /// \param VAListAddr Receives a reference to the \c va_list as emitted by /// either \c EmitVAListRef or \c EmitMSVAListRef. /// \returns A pointer to the argument. // FIXME: We should be able to get rid of this method and use the va_arg // instruction in LLVM instead once it works well enough. Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr); /// emitArrayLength - Compute the length of an array, even if it's a /// VLA, and drill down to the base element type. llvm::Value *emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr); /// EmitVLASize - Capture all the sizes for the VLA expressions in /// the given variably-modified type and store them in the VLASizeMap. /// /// This function can be called with a null (unreachable) insert point. void EmitVariablyModifiedType(QualType Ty); struct VlaSizePair { llvm::Value *NumElts; QualType Type; VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {} }; /// Return the number of elements for a single dimension /// for the given array type. VlaSizePair getVLAElements1D(const VariableArrayType *vla); VlaSizePair getVLAElements1D(QualType vla); /// Returns an LLVM value that corresponds to the size, /// in non-variably-sized elements, of a variable length array type, /// plus that largest non-variably-sized element type. Assumes that /// the type has already been emitted with EmitVariablyModifiedType. VlaSizePair getVLASize(const VariableArrayType *vla); VlaSizePair getVLASize(QualType vla); /// LoadCXXThis - Load the value of 'this'. This function is only valid while /// generating code for an C++ member function. llvm::Value *LoadCXXThis() { assert(CXXThisValue && "no 'this' value for this function"); return CXXThisValue; } Address LoadCXXThisAddress(); /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have /// virtual bases. // FIXME: Every place that calls LoadCXXVTT is something // that needs to be abstracted properly. llvm::Value *LoadCXXVTT() { assert(CXXStructorImplicitParamValue && "no VTT value for this function"); return CXXStructorImplicitParamValue; } /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a /// complete class to the given direct base. Address GetAddressOfDirectBaseInCompleteClass(Address Value, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual); static bool ShouldNullCheckClassCastValue(const CastExpr *Cast); /// GetAddressOfBaseClass - This function will add the necessary delta to the /// load of 'this' and returns address of the base class. Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc); Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue); /// GetVTTParameter - Return the VTT parameter that should be passed to a /// base constructor/destructor with virtual bases. /// FIXME: VTTs are Itanium ABI-specific, so the definition should move /// to ItaniumCXXABI.cpp together with all the references to VTT. llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, bool Delegating); void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc); // It's important not to confuse this and the previous function. Delegating // constructors are the C++0x feature. The constructor delegate optimization // is used to reduce duplication in the base and complete consturctors where // they are substantially the same. void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, const FunctionArgList &Args); /// Emit a call to an inheriting constructor (that is, one that invokes a /// constructor inherited from a base class) by inlining its definition. This /// is necessary if the ABI does not support forwarding the arguments to the /// base class constructor (because they're variadic or similar). void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, bool Delegating, CallArgList &Args); /// Emit a call to a constructor inherited from a base class, passing the /// current constructor's arguments along unmodified (without even making /// a copy). void EmitInheritedCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E); void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, AggValueSlot ThisAVS, const CXXConstructExpr *E); void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, Address This, CallArgList &Args, AggValueSlot::Overlap_t Overlap, SourceLocation Loc, bool NewPointerIsChecked); /// Emit assumption load for all bases. Requires to be be called only on /// most-derived class and not under construction of the object. void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This); /// Emit assumption that vptr load == global vtable. void EmitVTableAssumptionLoad(const VPtr &vptr, Address This); void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, Address This, Address Src, const CXXConstructExpr *E); void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, const ArrayType *ArrayTy, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization = false); void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, llvm::Value *NumElements, Address ArrayPtr, const CXXConstructExpr *E, bool NewPointerIsChecked, bool ZeroInitialization = false); static Destroyer destroyCXXObject; void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy); void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType, llvm::Type *ElementTy, Address NewPtr, llvm::Value *NumElements, llvm::Value *AllocSizeWithoutCookie); void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr); llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr); void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr); llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E); void EmitCXXDeleteExpr(const CXXDeleteExpr *E); void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr, QualType DeleteTy, llvm::Value *NumElements = nullptr, CharUnits CookieSize = CharUnits()); RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete); llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E); llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE); Address EmitCXXUuidofExpr(const CXXUuidofExpr *E); /// Situations in which we might emit a check for the suitability of a /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in /// compiler-rt. enum TypeCheckKind { /// Checking the operand of a load. Must be suitably sized and aligned. TCK_Load, /// Checking the destination of a store. Must be suitably sized and aligned. TCK_Store, /// Checking the bound value in a reference binding. Must be suitably sized /// and aligned, but is not required to refer to an object (until the /// reference is used), per core issue 453. TCK_ReferenceBinding, /// Checking the object expression in a non-static data member access. Must /// be an object within its lifetime. TCK_MemberAccess, /// Checking the 'this' pointer for a call to a non-static member function. /// Must be an object within its lifetime. TCK_MemberCall, /// Checking the 'this' pointer for a constructor call. TCK_ConstructorCall, /// Checking the operand of a static_cast to a derived pointer type. Must be /// null or an object within its lifetime. TCK_DowncastPointer, /// Checking the operand of a static_cast to a derived reference type. Must /// be an object within its lifetime. TCK_DowncastReference, /// Checking the operand of a cast to a base object. Must be suitably sized /// and aligned. TCK_Upcast, /// Checking the operand of a cast to a virtual base object. Must be an /// object within its lifetime. TCK_UpcastToVirtualBase, /// Checking the value assigned to a _Nonnull pointer. Must not be null. TCK_NonnullAssign, /// Checking the operand of a dynamic_cast or a typeid expression. Must be /// null or an object within its lifetime. TCK_DynamicOperation }; /// Determine whether the pointer type check \p TCK permits null pointers. static bool isNullPointerAllowed(TypeCheckKind TCK); /// Determine whether the pointer type check \p TCK requires a vptr check. static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty); /// Whether any type-checking sanitizers are enabled. If \c false, /// calls to EmitTypeCheck can be skipped. bool sanitizePerformTypeCheck() const; /// Emit a check that \p V is the address of storage of the /// appropriate size and alignment for an object of type \p Type /// (or if ArraySize is provided, for an array of that bound). void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment = CharUnits::Zero(), SanitizerSet SkippedChecks = SanitizerSet(), llvm::Value *ArraySize = nullptr); /// Emit a check that \p Base points into an array object, which /// we can access at index \p Index. \p Accessed should be \c false if we /// this expression is used as an lvalue, for instance in "&Arr[Idx]". void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed); llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); /// Converts Location to a DebugLoc, if debug information is enabled. llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location); /// Get the record field index as represented in debug info. unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex); //===--------------------------------------------------------------------===// // Declaration Emission //===--------------------------------------------------------------------===// /// EmitDecl - Emit a declaration. /// /// This function can be called with a null (unreachable) insert point. void EmitDecl(const Decl &D); /// EmitVarDecl - Emit a local variable declaration. /// /// This function can be called with a null (unreachable) insert point. void EmitVarDecl(const VarDecl &D); void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit); typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, llvm::Value *Address); /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. bool isTrivialInitializer(const Expr *Init); /// EmitAutoVarDecl - Emit an auto variable declaration. /// /// This function can be called with a null (unreachable) insert point. void EmitAutoVarDecl(const VarDecl &D); class AutoVarEmission { friend class CodeGenFunction; const VarDecl *Variable; /// The address of the alloca for languages with explicit address space /// (e.g. OpenCL) or alloca casted to generic pointer for address space /// agnostic languages (e.g. C++). Invalid if the variable was emitted /// as a global constant. Address Addr; llvm::Value *NRVOFlag; /// True if the variable is a __block variable that is captured by an /// escaping block. bool IsEscapingByRef; /// True if the variable is of aggregate type and has a constant /// initializer. bool IsConstantAggregate; /// Non-null if we should use lifetime annotations. llvm::Value *SizeForLifetimeMarkers; /// Address with original alloca instruction. Invalid if the variable was /// emitted as a global constant. Address AllocaAddr; struct Invalid {}; AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()), AllocaAddr(Address::invalid()) {} AutoVarEmission(const VarDecl &variable) : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr), IsEscapingByRef(false), IsConstantAggregate(false), SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {} bool wasEmittedAsGlobal() const { return !Addr.isValid(); } public: static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } bool useLifetimeMarkers() const { return SizeForLifetimeMarkers != nullptr; } llvm::Value *getSizeForLifetimeMarkers() const { assert(useLifetimeMarkers()); return SizeForLifetimeMarkers; } /// Returns the raw, allocated address, which is not necessarily /// the address of the object itself. It is casted to default /// address space for address space agnostic languages. Address getAllocatedAddress() const { return Addr; } /// Returns the address for the original alloca instruction. Address getOriginalAllocatedAddress() const { return AllocaAddr; } /// Returns the address of the object within this declaration. /// Note that this does not chase the forwarding pointer for /// __block decls. Address getObjectAddress(CodeGenFunction &CGF) const { if (!IsEscapingByRef) return Addr; return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false); } }; AutoVarEmission EmitAutoVarAlloca(const VarDecl &var); void EmitAutoVarInit(const AutoVarEmission &emission); void EmitAutoVarCleanups(const AutoVarEmission &emission); void emitAutoVarTypeCleanup(const AutoVarEmission &emission, QualType::DestructionKind dtorKind); /// Emits the alloca and debug information for the size expressions for each /// dimension of an array. It registers the association of its (1-dimensional) /// QualTypes and size expression's debug node, so that CGDebugInfo can /// reference this node when creating the DISubrange object to describe the /// array types. void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo); void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage); class ParamValue { llvm::Value *Value; unsigned Alignment; ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {} public: static ParamValue forDirect(llvm::Value *value) { return ParamValue(value, 0); } static ParamValue forIndirect(Address addr) { assert(!addr.getAlignment().isZero()); return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity()); } bool isIndirect() const { return Alignment != 0; } llvm::Value *getAnyValue() const { return Value; } llvm::Value *getDirectValue() const { assert(!isIndirect()); return Value; } Address getIndirectAddress() const { assert(isIndirect()); return Address(Value, CharUnits::fromQuantity(Alignment)); } }; /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl. void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo); /// protectFromPeepholes - Protect a value that we're intending to /// store to the side, but which will probably be used later, from /// aggressive peepholing optimizations that might delete it. /// /// Pass the result to unprotectFromPeepholes to declare that /// protection is no longer required. /// /// There's no particular reason why this shouldn't apply to /// l-values, it's just that no existing peepholes work on pointers. PeepholeProtection protectFromPeepholes(RValue rvalue); void unprotectFromPeepholes(PeepholeProtection protection); void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue, llvm::Value *TheCheck, llvm::Instruction *Assumption); void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue = nullptr); void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue = nullptr); //===--------------------------------------------------------------------===// // Statement Emission //===--------------------------------------------------------------------===// /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info. void EmitStopPoint(const Stmt *S); /// EmitStmt - Emit the code for the statement \arg S. It is legal to call /// this function even if there is no current insertion point. /// /// This function may clear the current insertion point; callers should use /// EnsureInsertPoint if they wish to subsequently generate code without first /// calling EmitBlock, EmitBranch, or EmitStmt. void EmitStmt(const Stmt *S, ArrayRef Attrs = None); /// EmitSimpleStmt - Try to emit a "simple" statement which does not /// necessarily require an insertion point or debug information; typically /// because the statement amounts to a jump or a container of other /// statements. /// /// \return True if the statement was handled. bool EmitSimpleStmt(const Stmt *S); Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false, AggValueSlot AVS = AggValueSlot::ignored()); Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast = false, AggValueSlot AVS = AggValueSlot::ignored()); /// EmitLabel - Emit the block for the given label. It is legal to call this /// function even if there is no current insertion point. void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt. void EmitLabelStmt(const LabelStmt &S); void EmitAttributedStmt(const AttributedStmt &S); void EmitGotoStmt(const GotoStmt &S); void EmitIndirectGotoStmt(const IndirectGotoStmt &S); void EmitIfStmt(const IfStmt &S); void EmitWhileStmt(const WhileStmt &S, ArrayRef Attrs = None); void EmitDoStmt(const DoStmt &S, ArrayRef Attrs = None); void EmitForStmt(const ForStmt &S, ArrayRef Attrs = None); void EmitReturnStmt(const ReturnStmt &S); void EmitDeclStmt(const DeclStmt &S); void EmitBreakStmt(const BreakStmt &S); void EmitContinueStmt(const ContinueStmt &S); void EmitSwitchStmt(const SwitchStmt &S); void EmitDefaultStmt(const DefaultStmt &S); void EmitCaseStmt(const CaseStmt &S); void EmitCaseStmtRange(const CaseStmt &S); void EmitAsmStmt(const AsmStmt &S); void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S); void EmitObjCAtTryStmt(const ObjCAtTryStmt &S); void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S); void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S); void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S); void EmitCoroutineBody(const CoroutineBodyStmt &S); void EmitCoreturnStmt(const CoreturnStmt &S); RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); LValue EmitCoawaitLValue(const CoawaitExpr *E); RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); LValue EmitCoyieldLValue(const CoyieldExpr *E); RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID); void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); void EmitCXXTryStmt(const CXXTryStmt &S); void EmitSEHTryStmt(const SEHTryStmt &S); void EmitSEHLeaveStmt(const SEHLeaveStmt &S); void EnterSEHTryStmt(const SEHTryStmt &S); void ExitSEHTryStmt(const SEHTryStmt &S); void pushSEHCleanup(CleanupKind kind, llvm::Function *FinallyFunc); void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter, const Stmt *OutlinedStmt); llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF, const SEHExceptStmt &Except); llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF, const SEHFinallyStmt &Finally); void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF, llvm::Value *ParentFP, llvm::Value *EntryEBP); llvm::Value *EmitSEHExceptionCode(); llvm::Value *EmitSEHExceptionInfo(); llvm::Value *EmitSEHAbnormalTermination(); /// Emit simple code for OpenMP directives in Simd-only mode. void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D); /// Scan the outlined statement for captures from the parent function. For /// each capture, mark the capture as escaped and emit a call to /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap. void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt, bool IsFilter); /// Recovers the address of a local in a parent function. ParentVar is the /// address of the variable used in the immediate parent function. It can /// either be an alloca or a call to llvm.localrecover if there are nested /// outlined functions. ParentFP is the frame pointer of the outermost parent /// frame. Address recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF, Address ParentVar, llvm::Value *ParentFP); void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef Attrs = None); /// Controls insertion of cancellation exit blocks in worksharing constructs. class OMPCancelStackRAII { CodeGenFunction &CGF; public: OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) : CGF(CGF) { CGF.OMPCancelStack.enter(CGF, Kind, HasCancel); } ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); } }; /// Returns calculated size of the specified type. llvm::Value *getTypeSize(QualType Ty); LValue InitCapturedStruct(const CapturedStmt &S); llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K); llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S); Address GenerateCapturedStmtArgument(const CapturedStmt &S); llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, SourceLocation Loc); void GenerateOpenMPCapturedVars(const CapturedStmt &S, SmallVectorImpl &CapturedVars); void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy, SourceLocation Loc); /// Perform element by element copying of arrays with type \a /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure /// generated by \a CopyGen. /// /// \param DestAddr Address of the destination array. /// \param SrcAddr Address of the source array. /// \param OriginalType Type of destination and source arrays. /// \param CopyGen Copying procedure that copies value of single array element /// to another single array element. void EmitOMPAggregateAssign( Address DestAddr, Address SrcAddr, QualType OriginalType, const llvm::function_ref CopyGen); /// Emit proper copying of data from one variable to another. /// /// \param OriginalType Original type of the copied variables. /// \param DestAddr Destination address. /// \param SrcAddr Source address. /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has /// type of the base array element). /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of /// the base array element). /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a /// DestVD. void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr, const VarDecl *DestVD, const VarDecl *SrcVD, const Expr *Copy); /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or /// \a X = \a E \a BO \a E. /// /// \param X Value to be updated. /// \param E Update value. /// \param BO Binary operation for update operation. /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update /// expression, false otherwise. /// \param AO Atomic ordering of the generated atomic instructions. /// \param CommonGen Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \returns if simple 'atomicrmw' instruction was /// generated, otherwise. std::pair EmitOMPAtomicSimpleUpdateExpr( LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, llvm::AtomicOrdering AO, SourceLocation Loc, const llvm::function_ref CommonGen); bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope); void EmitOMPPrivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope); void EmitOMPUseDevicePtrClause( const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap &CaptureDeviceAddrMap); void EmitOMPUseDeviceAddrClause( const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, const llvm::DenseMap &CaptureDeviceAddrMap); /// Emit code for copyin clause in \a D directive. The next code is /// generated at the start of outlined functions for directives: /// \code /// threadprivate_var1 = master_threadprivate_var1; /// operator=(threadprivate_var2, master_threadprivate_var2); /// ... /// __kmpc_barrier(&loc, global_tid); /// \endcode /// /// \param D OpenMP directive possibly with 'copyin' clause(s). /// \returns true if at least one copyin variable is found, false otherwise. bool EmitOMPCopyinClause(const OMPExecutableDirective &D); /// Emit initial code for lastprivate variables. If some variable is /// not also firstprivate, then the default initialization is used. Otherwise /// initialization of this variable is performed by EmitOMPFirstprivateClause /// method. /// /// \param D Directive that may have 'lastprivate' directives. /// \param PrivateScope Private scope for capturing lastprivate variables for /// proper codegen in internal captured statement. /// /// \returns true if there is at least one lastprivate variable, false /// otherwise. bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope); /// Emit final copying of lastprivate values to original variables at /// the end of the worksharing or simd directive. /// /// \param D Directive that has at least one 'lastprivate' directives. /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if /// it is the last iteration of the loop code in associated directive, or to /// 'i1 false' otherwise. If this item is nullptr, no final check is required. void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D, bool NoFinals, llvm::Value *IsLastIterCond = nullptr); /// Emit initial code for linear clauses. void EmitOMPLinearClause(const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope); /// Emit final code for linear clauses. /// \param CondGen Optional conditional code for final part of codegen for /// linear clause. void EmitOMPLinearClauseFinal( const OMPLoopDirective &D, const llvm::function_ref CondGen); /// Emit initial code for reduction variables. Creates reduction copies /// and initializes them with the values according to OpenMP standard. /// /// \param D Directive (possibly) with the 'reduction' clause. /// \param PrivateScope Private scope for capturing reduction variables for /// proper codegen in internal captured statement. /// void EmitOMPReductionClauseInit(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope, bool ForInscan = false); /// Emit final update of reduction values to original variables at /// the end of the directive. /// /// \param D Directive that has at least one 'reduction' directives. /// \param ReductionKind The kind of reduction to perform. void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind); /// Emit initial code for linear variables. Creates private copies /// and initializes them with the values according to OpenMP standard. /// /// \param D Directive (possibly) with the 'linear' clause. /// \return true if at least one linear variable is found that should be /// initialized with the value of the original variable, false otherwise. bool EmitOMPLinearClauseInit(const OMPLoopDirective &D); typedef const llvm::function_ref TaskGenTy; void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, OMPTaskDataTy &Data); struct OMPTargetDataInfo { Address BasePointersArray = Address::invalid(); Address PointersArray = Address::invalid(); Address SizesArray = Address::invalid(); Address MappersArray = Address::invalid(); unsigned NumberOfTargetItems = 0; explicit OMPTargetDataInfo() = default; OMPTargetDataInfo(Address BasePointersArray, Address PointersArray, Address SizesArray, Address MappersArray, unsigned NumberOfTargetItems) : BasePointersArray(BasePointersArray), PointersArray(PointersArray), SizesArray(SizesArray), MappersArray(MappersArray), NumberOfTargetItems(NumberOfTargetItems) {} }; void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, OMPTargetDataInfo &InputInfo); void EmitOMPParallelDirective(const OMPParallelDirective &S); void EmitOMPSimdDirective(const OMPSimdDirective &S); void EmitOMPForDirective(const OMPForDirective &S); void EmitOMPForSimdDirective(const OMPForSimdDirective &S); void EmitOMPSectionsDirective(const OMPSectionsDirective &S); void EmitOMPSectionDirective(const OMPSectionDirective &S); void EmitOMPSingleDirective(const OMPSingleDirective &S); void EmitOMPMasterDirective(const OMPMasterDirective &S); void EmitOMPCriticalDirective(const OMPCriticalDirective &S); void EmitOMPParallelForDirective(const OMPParallelForDirective &S); void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S); void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S); void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S); void EmitOMPTaskDirective(const OMPTaskDirective &S); void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S); void EmitOMPBarrierDirective(const OMPBarrierDirective &S); void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S); void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S); void EmitOMPFlushDirective(const OMPFlushDirective &S); void EmitOMPDepobjDirective(const OMPDepobjDirective &S); void EmitOMPScanDirective(const OMPScanDirective &S); void EmitOMPOrderedDirective(const OMPOrderedDirective &S); void EmitOMPAtomicDirective(const OMPAtomicDirective &S); void EmitOMPTargetDirective(const OMPTargetDirective &S); void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S); void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S); void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S); void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S); void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S); void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S); void EmitOMPTeamsDirective(const OMPTeamsDirective &S); void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S); void EmitOMPCancelDirective(const OMPCancelDirective &S); void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S); void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S); void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S); void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S); void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S); void EmitOMPParallelMasterTaskLoopDirective( const OMPParallelMasterTaskLoopDirective &S); void EmitOMPParallelMasterTaskLoopSimdDirective( const OMPParallelMasterTaskLoopSimdDirective &S); void EmitOMPDistributeDirective(const OMPDistributeDirective &S); void EmitOMPDistributeParallelForDirective( const OMPDistributeParallelForDirective &S); void EmitOMPDistributeParallelForSimdDirective( const OMPDistributeParallelForSimdDirective &S); void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S); void EmitOMPTargetParallelForSimdDirective( const OMPTargetParallelForSimdDirective &S); void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S); void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S); void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S); void EmitOMPTeamsDistributeParallelForSimdDirective( const OMPTeamsDistributeParallelForSimdDirective &S); void EmitOMPTeamsDistributeParallelForDirective( const OMPTeamsDistributeParallelForDirective &S); void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S); void EmitOMPTargetTeamsDistributeDirective( const OMPTargetTeamsDistributeDirective &S); void EmitOMPTargetTeamsDistributeParallelForDirective( const OMPTargetTeamsDistributeParallelForDirective &S); void EmitOMPTargetTeamsDistributeParallelForSimdDirective( const OMPTargetTeamsDistributeParallelForSimdDirective &S); void EmitOMPTargetTeamsDistributeSimdDirective( const OMPTargetTeamsDistributeSimdDirective &S); /// Emit device code for the target directive. static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetDirective &S); static void EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelDirective &S); /// Emit device code for the target parallel for directive. static void EmitOMPTargetParallelForDeviceFunction( CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForDirective &S); /// Emit device code for the target parallel for simd directive. static void EmitOMPTargetParallelForSimdDeviceFunction( CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForSimdDirective &S); /// Emit device code for the target teams directive. static void EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDirective &S); /// Emit device code for the target teams distribute directive. static void EmitOMPTargetTeamsDistributeDeviceFunction( CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeDirective &S); /// Emit device code for the target teams distribute simd directive. static void EmitOMPTargetTeamsDistributeSimdDeviceFunction( CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeSimdDirective &S); /// Emit device code for the target simd directive. static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S); /// Emit device code for the target teams distribute parallel for simd /// directive. static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForSimdDirective &S); static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction( CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForDirective &S); /// Emit inner loop of the worksharing/simd construct. /// /// \param S Directive, for which the inner loop must be emitted. /// \param RequiresCleanup true, if directive has some associated private /// variables. /// \param LoopCond Bollean condition for loop continuation. /// \param IncExpr Increment expression for loop control variable. /// \param BodyGen Generator for the inner body of the inner loop. /// \param PostIncGen Genrator for post-increment code (required for ordered /// loop directvies). void EmitOMPInnerLoop( const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, const Expr *IncExpr, const llvm::function_ref BodyGen, const llvm::function_ref PostIncGen); JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind); /// Emit initial code for loop counters of loop-based directives. void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S, OMPPrivateScope &LoopScope); /// Helper for the OpenMP loop directives. void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit); /// Emit code for the worksharing loop-based directive. /// \return true, if this construct has any lastprivate clause, false - /// otherwise. bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB, const CodeGenLoopBoundsTy &CodeGenLoopBounds, const CodeGenDispatchBoundsTy &CGDispatchBounds); /// Emit code for the distribute loop-based directive. void EmitOMPDistributeLoop(const OMPLoopDirective &S, const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr); /// Helpers for the OpenMP loop directives. void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false); void EmitOMPSimdFinal( const OMPLoopDirective &D, const llvm::function_ref CondGen); /// Emits the lvalue for the expression with possibly captured variable. LValue EmitOMPSharedLValue(const Expr *E); private: /// Helpers for blocks. llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info); /// struct with the values to be passed to the OpenMP loop-related functions struct OMPLoopArguments { /// loop lower bound Address LB = Address::invalid(); /// loop upper bound Address UB = Address::invalid(); /// loop stride Address ST = Address::invalid(); /// isLastIteration argument for runtime functions Address IL = Address::invalid(); /// Chunk value generated by sema llvm::Value *Chunk = nullptr; /// EnsureUpperBound Expr *EUB = nullptr; /// IncrementExpression Expr *IncExpr = nullptr; /// Loop initialization Expr *Init = nullptr; /// Loop exit condition Expr *Cond = nullptr; /// Update of LB after a whole chunk has been executed Expr *NextLB = nullptr; /// Update of UB after a whole chunk has been executed Expr *NextUB = nullptr; OMPLoopArguments() = default; OMPLoopArguments(Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk = nullptr, Expr *EUB = nullptr, Expr *IncExpr = nullptr, Expr *Init = nullptr, Expr *Cond = nullptr, Expr *NextLB = nullptr, Expr *NextUB = nullptr) : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB), IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB), NextUB(NextUB) {} }; void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, const CodeGenLoopTy &CodeGenLoop, const CodeGenOrderedTy &CodeGenOrdered); void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, const OMPLoopArguments &LoopArgs, const CodeGenDispatchBoundsTy &CGDispatchBounds); void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, const CodeGenLoopTy &CodeGenLoopContent); /// Emit code for sections directive. void EmitSections(const OMPExecutableDirective &S); public: //===--------------------------------------------------------------------===// // LValue Expression Emission //===--------------------------------------------------------------------===// + /// Create a check that a scalar RValue is non-null. + llvm::Value *EmitNonNullRValueCheck(RValue RV, QualType T); + /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type. RValue GetUndefRValue(QualType Ty); /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E /// and issue an ErrorUnsupported style diagnostic (using the /// provided Name). RValue EmitUnsupportedRValue(const Expr *E, const char *Name); /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue /// an ErrorUnsupported style diagnostic (using the provided Name). LValue EmitUnsupportedLValue(const Expr *E, const char *Name); /// EmitLValue - Emit code to compute a designator that specifies the location /// of the expression. /// /// This can return one of two things: a simple address or a bitfield /// reference. In either case, the LLVM Value* in the LValue structure is /// guaranteed to be an LLVM pointer type. /// /// If this returns a bitfield reference, nothing about the pointee type of /// the LLVM value is known: For example, it may not be a pointer to an /// integer. /// /// If this returns a normal address, and if the lvalue's C type is fixed /// size, this method guarantees that the returned pointer type will point to /// an LLVM type of the same size of the lvalue's type. If the lvalue has a /// variable length type, this is not possible. /// LValue EmitLValue(const Expr *E); /// Same as EmitLValue but additionally we generate checking code to /// guard against undefined behavior. This is only suitable when we know /// that the address will be used to access the object. LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK); RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc); void EmitAtomicInit(Expr *E, LValue lvalue); bool LValueIsSuitableForInlineAtomic(LValue Src); RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot = AggValueSlot::ignored()); RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc, llvm::AtomicOrdering AO, bool IsVolatile = false, AggValueSlot slot = AggValueSlot::ignored()); void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit); void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO, bool IsVolatile, bool isInit); std::pair EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success = llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure = llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored()); void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref &UpdateOp, bool IsVolatile); /// EmitToMemory - Change a scalar value from its value /// representation to its in-memory representation. llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty); /// EmitFromMemory - Change a scalar value from its memory /// representation to its value representation. llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty); /// Check if the scalar \p Value is within the valid range for the given /// type \p Ty. /// /// Returns true if a check is needed (even if the range is unknown). bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc); /// EmitLoadOfScalar - Load a scalar value from an address, taking /// care to appropriately convert from the memory representation to /// the LLVM value representation. llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source = AlignmentSource::Type, bool isNontemporal = false) { return EmitLoadOfScalar(Addr, Volatile, Ty, Loc, LValueBaseInfo(Source), CGM.getTBAAAccessInfo(Ty), isNontemporal); } llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isNontemporal = false); /// EmitLoadOfScalar - Load a scalar value from an address, taking /// care to appropriately convert from the memory representation to /// the LLVM value representation. The l-value must be a simple /// l-value. llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc); /// EmitStoreOfScalar - Store a scalar value to an address, taking /// care to appropriately convert from the memory representation to /// the LLVM value representation. void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source = AlignmentSource::Type, bool isInit = false, bool isNontemporal = false) { EmitStoreOfScalar(Value, Addr, Volatile, Ty, LValueBaseInfo(Source), CGM.getTBAAAccessInfo(Ty), isInit, isNontemporal); } void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo, bool isInit = false, bool isNontemporal = false); /// EmitStoreOfScalar - Store a scalar value to an address, taking /// care to appropriately convert from the memory representation to /// the LLVM value representation. The l-value must be a simple /// l-value. The isInit flag indicates whether this is an initialization. /// If so, atomic qualifiers are ignored and the store is always non-atomic. void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false); /// EmitLoadOfLValue - Given an expression that represents a value lvalue, /// this method emits the address of the lvalue, then loads the result as an /// rvalue, returning the rvalue. RValue EmitLoadOfLValue(LValue V, SourceLocation Loc); RValue EmitLoadOfExtVectorElementLValue(LValue V); RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); RValue EmitLoadOfGlobalRegLValue(LValue LV); /// EmitStoreThroughLValue - Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false); void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst); void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst); /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints /// as EmitStoreThroughLValue. /// /// \param Result [out] - If non-null, this will be set to a Value* for the /// bit-field contents after the store, appropriate for use as the result of /// an assignment to the bit-field. void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr); /// Emit an l-value for an assignment (simple or compound) of complex type. LValue EmitComplexAssignmentLValue(const BinaryOperator *E); LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result); // Note: only available for agg return types LValue EmitBinaryOperatorLValue(const BinaryOperator *E); LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E); // Note: only available for agg return types LValue EmitCallExprLValue(const CallExpr *E); // Note: only available for agg return types LValue EmitVAArgExprLValue(const VAArgExpr *E); LValue EmitDeclRefLValue(const DeclRefExpr *E); LValue EmitStringLiteralLValue(const StringLiteral *E); LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E); LValue EmitPredefinedLValue(const PredefinedExpr *E); LValue EmitUnaryOpLValue(const UnaryOperator *E); LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed = false); LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E); LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, bool IsLowerBound = true); LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E); LValue EmitMemberExpr(const MemberExpr *E); LValue EmitObjCIsaExpr(const ObjCIsaExpr *E); LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E); LValue EmitInitListLValue(const InitListExpr *E); LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E); LValue EmitCastLValue(const CastExpr *E); LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e); Address EmitExtVectorElementLValue(LValue V); RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc); Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr); class ConstantEmission { llvm::PointerIntPair ValueAndIsReference; ConstantEmission(llvm::Constant *C, bool isReference) : ValueAndIsReference(C, isReference) {} public: ConstantEmission() {} static ConstantEmission forReference(llvm::Constant *C) { return ConstantEmission(C, true); } static ConstantEmission forValue(llvm::Constant *C) { return ConstantEmission(C, false); } explicit operator bool() const { return ValueAndIsReference.getOpaqueValue() != nullptr; } bool isReference() const { return ValueAndIsReference.getInt(); } LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const { assert(isReference()); return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(), refExpr->getType()); } llvm::Constant *getValue() const { assert(!isReference()); return ValueAndIsReference.getPointer(); } }; ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr); ConstantEmission tryEmitAsConstant(const MemberExpr *ME); llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E); RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot = AggValueSlot::ignored()); LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e); llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar); LValue EmitLValueForField(LValue Base, const FieldDecl* Field); LValue EmitLValueForLambdaField(const FieldDecl *Field); /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that /// if the Field is a reference, this will return the address of the reference /// and not the address of the value stored in the reference. LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl* Field); LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value* Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers); LValue EmitCXXConstructLValue(const CXXConstructExpr *E); LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E); LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E); LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E); LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E); LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E); LValue EmitStmtExprLValue(const StmtExpr *E); LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E); LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E); void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); //===--------------------------------------------------------------------===// // Scalar Expression Emission //===--------------------------------------------------------------------===// /// EmitCall - Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, SourceLocation Loc); RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke = nullptr) { return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke, SourceLocation()); } RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr); RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue = ReturnValueSlot()); RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue); CGCallee EmitCallee(const Expr *E); void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl); void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl); llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name = ""); llvm::CallInst *EmitRuntimeCall(llvm::FunctionCallee callee, ArrayRef args, const Twine &name = ""); llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name = ""); llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee, ArrayRef args, const Twine &name = ""); SmallVector getBundlesForFunclet(llvm::Value *Callee); llvm::CallBase *EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef Args, const Twine &Name = ""); llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef args, const Twine &name = ""); llvm::CallBase *EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, const Twine &name = ""); void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef args); CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD, NestedNameSpecifier *Qual, llvm::Type *Ty); CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD, CXXDtorType Type, const CXXRecordDecl *RD); // Return the copy constructor name with the prefix "__copy_constructor_" // removed. static std::string getNonTrivialCopyConstructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx); // Return the destructor name with the prefix "__destructor_" removed. static std::string getNonTrivialDestructorStr(QualType QT, CharUnits Alignment, bool IsVolatile, ASTContext &Ctx); // These functions emit calls to the special functions of non-trivial C // structs. void defaultInitNonTrivialCStructVar(LValue Dst); void callCStructDefaultConstructor(LValue Dst); void callCStructDestructor(LValue Dst); void callCStructCopyConstructor(LValue Dst, LValue Src); void callCStructMoveConstructor(LValue Dst, LValue Src); void callCStructCopyAssignmentOperator(LValue Dst, LValue Src); void callCStructMoveAssignmentOperator(LValue Dst, LValue Src); RValue EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method, const CGCallee &Callee, ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E, CallArgList *RtlArgs); RValue EmitCXXDestructorCall(GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy, llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *E); RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue); RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, const Expr *Base); // Compute the object pointer. Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr); RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue); RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue); RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E); RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue); RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue); RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue); RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue); RValue emitRotate(const CallExpr *E, bool IsRotateRight); /// Emit IR for __builtin_os_log_format. RValue emitBuiltinOSLogFormat(const CallExpr &E); /// Emit IR for __builtin_is_aligned. RValue EmitBuiltinIsAligned(const CallExpr *E); /// Emit IR for __builtin_align_up/__builtin_align_down. RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp); llvm::Function *generateBuiltinOSLogHelperFunction( const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment); RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue); /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call /// is unhandled by the current target. llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue); llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty, const llvm::CmpInst::Predicate Fp, const llvm::CmpInst::Predicate Ip, const llvm::Twine &Name = ""); llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy); llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy, QualType RTy); llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, const char *NameHint, unsigned Modifier, const CallExpr *E, SmallVectorImpl &Ops, Address PtrOp0, Address PtrOp1, llvm::Triple::ArchType Arch); llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID, unsigned Modifier, llvm::Type *ArgTy, const CallExpr *E); llvm::Value *EmitNeonCall(llvm::Function *F, SmallVectorImpl &O, const char *name, unsigned shift = 0, bool rightshift = false); llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx, const llvm::ElementCount &Count); llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx); llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty, bool negateForRightShift); llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt, llvm::Type *Ty, bool usgn, const char *name); llvm::Value *vectorWrapScalar16(llvm::Value *Op); /// SVEBuiltinMemEltTy - Returns the memory element type for this memory /// access builtin. Only required if it can't be inferred from the base /// pointer operand. llvm::Type *SVEBuiltinMemEltTy(SVETypeFlags TypeFlags); SmallVector getSVEOverloadTypes(SVETypeFlags TypeFlags, llvm::Type *ReturnType, ArrayRef Ops); llvm::Type *getEltType(SVETypeFlags TypeFlags); llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags); llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags); llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags); llvm::Value *EmitSVEDupX(llvm::Value *Scalar); llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty); llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty); llvm::Value *EmitSVEPMull(SVETypeFlags TypeFlags, llvm::SmallVectorImpl &Ops, unsigned BuiltinID); llvm::Value *EmitSVEMovl(SVETypeFlags TypeFlags, llvm::ArrayRef Ops, unsigned BuiltinID); llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred, llvm::ScalableVectorType *VTy); llvm::Value *EmitSVEGatherLoad(SVETypeFlags TypeFlags, llvm::SmallVectorImpl &Ops, unsigned IntID); llvm::Value *EmitSVEScatterStore(SVETypeFlags TypeFlags, llvm::SmallVectorImpl &Ops, unsigned IntID); llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy, SmallVectorImpl &Ops, unsigned BuiltinID, bool IsZExtReturn); llvm::Value *EmitSVEMaskedStore(const CallExpr *, SmallVectorImpl &Ops, unsigned BuiltinID); llvm::Value *EmitSVEPrefetchLoad(SVETypeFlags TypeFlags, SmallVectorImpl &Ops, unsigned BuiltinID); llvm::Value *EmitSVEGatherPrefetch(SVETypeFlags TypeFlags, SmallVectorImpl &Ops, unsigned IntID); llvm::Value *EmitSVEStructLoad(SVETypeFlags TypeFlags, SmallVectorImpl &Ops, unsigned IntID); llvm::Value *EmitSVEStructStore(SVETypeFlags TypeFlags, SmallVectorImpl &Ops, unsigned IntID); llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch); llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *BuildVector(ArrayRef Ops); llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E); bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, llvm::AtomicOrdering &AO, llvm::SyncScope::ID &SSID); private: enum class MSVCIntrin; public: llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E); llvm::Value *EmitBuiltinAvailable(ArrayRef Args); llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E); llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E); llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E); llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E); llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E); llvm::Value *EmitObjCCollectionLiteral(const Expr *E, const ObjCMethodDecl *MethodWithObjects); llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E); RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return = ReturnValueSlot()); /// Retrieves the default cleanup kind for an ARC cleanup. /// Except under -fobjc-arc-eh, ARC cleanups are normal-only. CleanupKind getARCCleanupKind() { return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions ? NormalAndEHCleanup : NormalCleanup; } // ARC primitives. void EmitARCInitWeak(Address addr, llvm::Value *value); void EmitARCDestroyWeak(Address addr); llvm::Value *EmitARCLoadWeak(Address addr); llvm::Value *EmitARCLoadWeakRetained(Address addr); llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored); void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr); void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr); void EmitARCCopyWeak(Address dst, Address src); void EmitARCMoveWeak(Address dst, Address src); llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value); llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value); llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored); llvm::Value *EmitARCStoreStrongCall(Address addr, llvm::Value *value, bool resultIgnored); llvm::Value *EmitARCRetain(QualType type, llvm::Value *value); llvm::Value *EmitARCRetainNonBlock(llvm::Value *value); llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory); void EmitARCDestroyStrong(Address addr, ARCPreciseLifetime_t precise); void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise); llvm::Value *EmitARCAutorelease(llvm::Value *value); llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value); llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value); llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value); llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value); llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType); llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value, llvm::Type *returnType); void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise); std::pair EmitARCStoreAutoreleasing(const BinaryOperator *e); std::pair EmitARCStoreStrong(const BinaryOperator *e, bool ignored); std::pair EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored); llvm::Value *EmitObjCAlloc(llvm::Value *value, llvm::Type *returnType); llvm::Value *EmitObjCAllocWithZone(llvm::Value *value, llvm::Type *returnType); llvm::Value *EmitObjCAllocInit(llvm::Value *value, llvm::Type *resultType); llvm::Value *EmitObjCThrowOperand(const Expr *expr); llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr); llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr); llvm::Value *EmitARCExtendBlockObject(const Expr *expr); llvm::Value *EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim); llvm::Value *EmitARCRetainScalarExpr(const Expr *expr); llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr); llvm::Value *EmitARCUnsafeUnretainedScalarExpr(const Expr *expr); void EmitARCIntrinsicUse(ArrayRef values); static Destroyer destroyARCStrongImprecise; static Destroyer destroyARCStrongPrecise; static Destroyer destroyARCWeak; static Destroyer emitARCIntrinsicUse; static Destroyer destroyNonTrivialCStruct; void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr); llvm::Value *EmitObjCAutoreleasePoolPush(); llvm::Value *EmitObjCMRRAutoreleasePoolPush(); void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr); void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr); /// Emits a reference binding to the passed in expression. RValue EmitReferenceBindingToExpr(const Expr *E); //===--------------------------------------------------------------------===// // Expression Emission //===--------------------------------------------------------------------===// // Expressions are broken into three classes: scalar, complex, aggregate. /// EmitScalarExpr - Emit the computation of the specified expression of LLVM /// scalar type, returning the result. llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false); /// Emit a conversion from the specified type to the specified destination /// type, both of which are LLVM scalar types. llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc); /// Emit a conversion from the specified complex type to the specified /// destination type, where the destination type is an LLVM scalar type. llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc); /// EmitAggExpr - Emit the computation of the specified expression /// of aggregate type. The result is computed into the given slot, /// which may be null to indicate that the value is not needed. void EmitAggExpr(const Expr *E, AggValueSlot AS); /// EmitAggExprToLValue - Emit the computation of the specified expression of /// aggregate type into a temporary LValue. LValue EmitAggExprToLValue(const Expr *E); /// Build all the stores needed to initialize an aggregate at Dest with the /// value Val. void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile); /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, /// make sure it survives garbage collection until this point. void EmitExtendGCLifetime(llvm::Value *object); /// EmitComplexExpr - Emit the computation of the specified expression of /// complex type, returning the result. ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false, bool IgnoreImag = false); /// EmitComplexExprIntoLValue - Emit the given expression of complex /// type and place its result into the specified l-value. void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit); /// EmitStoreOfComplex - Store a complex number into the specified l-value. void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit); /// EmitLoadOfComplex - Load a complex number from the specified l-value. ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc); Address emitAddrOfRealComponent(Address complex, QualType complexType); Address emitAddrOfImagComponent(Address complex, QualType complexType); /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the /// global variable that has already been created for it. If the initializer /// has a different type than GV does, this may free GV and return a different /// one. Otherwise it just returns GV. llvm::GlobalVariable * AddInitializerToStaticVarDecl(const VarDecl &D, llvm::GlobalVariable *GV); // Emit an @llvm.invariant.start call for the given memory region. void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size); /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++ /// variable with global storage. void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr, bool PerformInit); llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor, llvm::Constant *Addr); /// Call atexit() with a function that passes the given argument to /// the given function. void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn, llvm::Constant *addr); /// Call atexit() with function dtorStub. void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub); /// Call unatexit() with function dtorStub. llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub); /// Emit code in this function to perform a guarded variable /// initialization. Guarded initializations are used when it's not /// possible to prove that an initialization will be done exactly /// once, e.g. with a static local variable or a static data member /// of a class template. void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit); enum class GuardKind { VariableGuard, TlsGuard }; /// Emit a branch to select whether or not to perform guarded initialization. void EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, llvm::BasicBlock *InitBlock, llvm::BasicBlock *NoInitBlock, GuardKind Kind, const VarDecl *D); /// GenerateCXXGlobalInitFunc - Generates code for initializing global /// variables. void GenerateCXXGlobalInitFunc(llvm::Function *Fn, ArrayRef CXXThreadLocals, ConstantAddress Guard = ConstantAddress::invalid()); /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global /// variables. void GenerateCXXGlobalCleanUpFunc( llvm::Function *Fn, const std::vector> &DtorsOrStermFinalizers); void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D, llvm::GlobalVariable *Addr, bool PerformInit); void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest); void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp); void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true); RValue EmitAtomicExpr(AtomicExpr *E); //===--------------------------------------------------------------------===// // Annotations Emission //===--------------------------------------------------------------------===// /// Emit an annotation call (intrinsic). llvm::Value *EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location); /// Emit local annotations for the local variable V, declared by D. void EmitVarAnnotations(const VarDecl *D, llvm::Value *V); /// Emit field annotations for the given field & value. Returns the /// annotation result. Address EmitFieldAnnotations(const FieldDecl *D, Address V); //===--------------------------------------------------------------------===// // Internal Helpers //===--------------------------------------------------------------------===// /// ContainsLabel - Return true if the statement contains a label in it. If /// this statement is not executed normally, it not containing a label means /// that we can just remove the code. static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false); /// containsBreak - Return true if the statement contains a break out of it. /// If the statement (recursively) contains a switch or loop with a break /// inside of it, this is fine. static bool containsBreak(const Stmt *S); /// Determine if the given statement might introduce a declaration into the /// current scope, by being a (possibly-labelled) DeclStmt. static bool mightAddDeclToScope(const Stmt *S); /// ConstantFoldsToSimpleInteger - If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the boolean result in Result. bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels = false); /// ConstantFoldsToSimpleInteger - If the specified expression does not fold /// to a constant, or if it does but contains a label, return false. If it /// constant folds return true and set the folded value. bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result, bool AllowLabels = false); /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an /// if statement) to the specified blocks. Based on the condition, this might /// try to simplify the codegen of the conditional based on the branch. /// TrueCount should be the number of times we expect the condition to /// evaluate to true based on PGO data. void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, llvm::MDNode *Weights = nullptr); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is /// nonnull, if \p LHS is marked _Nonnull. void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc); /// An enumeration which makes it easier to specify whether or not an /// operation is a subtraction. enum { NotSubtraction = false, IsSubtraction = true }; /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to /// detect undefined behavior when the pointer overflow sanitizer is enabled. /// \p SignedIndices indicates whether any of the GEP indices are signed. /// \p IsSubtraction indicates whether the expression used to form the GEP /// is a subtraction. llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr, ArrayRef IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name = ""); /// Specifies which type of sanitizer check to apply when handling a /// particular builtin. enum BuiltinCheckKind { BCK_CTZPassedZero, BCK_CLZPassedZero, }; /// Emits an argument for a call to a builtin. If the builtin sanitizer is /// enabled, a runtime check specified by \p Kind is also emitted. llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); /// Emit a description of a type in a format suitable for passing to /// a runtime sanitizer handler. llvm::Constant *EmitCheckTypeDescriptor(QualType T); /// Convert a value into a format suitable for passing to a runtime /// sanitizer handler. llvm::Value *EmitCheckValue(llvm::Value *V); /// Emit a description of a source location in a format suitable for /// passing to a runtime sanitizer handler. llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc); /// Create a basic block that will either trap or call a handler function in /// the UBSan runtime with the provided arguments, and create a conditional /// branch to it. void EmitCheck(ArrayRef> Checked, SanitizerHandler Check, ArrayRef StaticArgs, ArrayRef DynamicArgs); /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath /// if Cond if false. void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef StaticArgs); /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime /// checking is enabled. Otherwise, just emit an unreachable instruction. void EmitUnreachable(SourceLocation Loc); /// Create a basic block that will call the trap intrinsic, and emit a /// conditional branch to it, for the -ftrapv checks. void EmitTrapCheck(llvm::Value *Checked); /// Emit a call to trap or debugtrap and attach function attribute /// "trap-func-name" if specified. llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID); /// Emit a stub for the cross-DSO CFI check function. void EmitCfiCheckStub(); /// Emit a cross-DSO CFI failure handling function. void EmitCfiCheckFail(); /// Create a check for a function parameter that may potentially be /// declared as non-null. void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum); /// EmitCallArg - Emit a single call argument. void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType); /// EmitDelegateCallArg - We are performing a delegate call; that /// is, the current function is delegating to another one. Produce /// a r-value suitable for passing the given parameter. void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc); /// SetFPAccuracy - Set the minimum required accuracy of the given floating /// point operation, expressed as the maximum relative error in ulp. void SetFPAccuracy(llvm::Value *Val, float Accuracy); /// SetFPModel - Control floating point behavior via fp-model settings. void SetFPModel(); /// Set the codegen fast-math flags. void SetFastMathFlags(FPOptions FPFeatures); private: llvm::MDNode *getRangeForLoadFromType(QualType Ty); void EmitReturnOfRValue(RValue RV, QualType Ty); void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New); llvm::SmallVector, 4> DeferredReplacements; /// Set the address of a local variable. void setAddrOfLocalVar(const VarDecl *VD, Address Addr) { assert(!LocalDeclMap.count(VD) && "Decl already exists in LocalDeclMap!"); LocalDeclMap.insert({VD, Addr}); } /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty /// from function arguments into \arg Dst. See ABIArgInfo::Expand. /// /// \param AI - The first function argument of the expansion. void ExpandTypeFromArgs(QualType Ty, LValue Dst, llvm::Function::arg_iterator &AI); /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg /// Ty, into individual arguments on the provided vector \arg IRCallArgs, /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand. void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, SmallVectorImpl &IRCallArgs, unsigned &IRCallArgPos); llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, std::string &ConstraintStr); llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, QualType InputType, std::string &ConstraintStr, SourceLocation Loc); /// Attempts to statically evaluate the object size of E. If that /// fails, emits code to figure the size of E out for us. This is /// pass_object_size aware. /// /// If EmittedExpr is non-null, this will use that instead of re-emitting E. llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, llvm::IntegerType *ResType, llvm::Value *EmittedE, bool IsDynamic); /// Emits the size of E, as required by __builtin_object_size. This /// function is aware of pass_object_size parameters, and will act accordingly /// if E is a parameter with the pass_object_size attribute. llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type, llvm::IntegerType *ResType, llvm::Value *EmittedE, bool IsDynamic); void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D, Address Loc); public: #ifndef NDEBUG // Determine whether the given argument is an Objective-C method // that may have type parameters in its signature. static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { const DeclContext *dc = method->getDeclContext(); if (const ObjCInterfaceDecl *classDecl= dyn_cast(dc)) { return classDecl->getTypeParamListAsWritten(); } if (const ObjCCategoryDecl *catDecl = dyn_cast(dc)) { return catDecl->getTypeParamList(); } return false; } template static bool isObjCMethodWithTypeParams(const T *) { return false; } #endif enum class EvaluationOrder { ///! No language constraints on evaluation order. Default, ///! Language semantics require left-to-right evaluation. ForceLeftToRight, ///! Language semantics require right-to-left evaluation. ForceRightToLeft }; /// EmitCallArgs - Emit call arguments for a function. template void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range ArgRange, AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, EvaluationOrder Order = EvaluationOrder::Default) { SmallVector ArgTypes; CallExpr::const_arg_iterator Arg = ArgRange.begin(); assert((ParamsToSkip == 0 || CallArgTypeInfo) && "Can't skip parameters if type info is not provided"); if (CallArgTypeInfo) { #ifndef NDEBUG bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo); #endif // First, use the argument types that the type info knows about for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip, E = CallArgTypeInfo->param_type_end(); I != E; ++I, ++Arg) { assert(Arg != ArgRange.end() && "Running over edge of argument list!"); assert((isGenericMethod || ((*I)->isVariablyModifiedType() || (*I).getNonReferenceType()->isObjCRetainableType() || getContext() .getCanonicalType((*I).getNonReferenceType()) .getTypePtr() == getContext() .getCanonicalType((*Arg)->getType()) .getTypePtr())) && "type mismatch in call argument!"); ArgTypes.push_back(*I); } } // Either we've emitted all the call args, or we have a call to variadic // function. assert((Arg == ArgRange.end() || !CallArgTypeInfo || CallArgTypeInfo->isVariadic()) && "Extra arguments in non-variadic function!"); // If we still have any arguments, emit them using the type of the argument. for (auto *A : llvm::make_range(Arg, ArgRange.end())) ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType()); EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order); } void EmitCallArgs(CallArgList &Args, ArrayRef ArgTypes, llvm::iterator_range ArgRange, AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, EvaluationOrder Order = EvaluationOrder::Default); /// EmitPointerWithAlignment - Given an expression with a pointer type, /// emit the value and compute our best estimate of the alignment of the /// pointee. /// /// \param BaseInfo - If non-null, this will be initialized with /// information about the source of the alignment and the may-alias /// attribute. Note that this function will conservatively fall back on /// the type when it doesn't recognize the expression and may-alias will /// be set to false. /// /// One reasonable way to use this information is when there's a language /// guarantee that the pointer must be aligned to some stricter value, and /// we're simply trying to ensure that sufficiently obvious uses of under- /// aligned objects don't get miscompiled; for example, a placement new /// into the address of a local variable. In such a case, it's quite /// reasonable to just ignore the returned alignment when it isn't from an /// explicit source. Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr, TBAAAccessInfo *TBAAInfo = nullptr); /// If \p E references a parameter with pass_object_size info or a constant /// array size modifier, emit the object size divided by the size of \p EltTy. /// Otherwise return null. llvm::Value *LoadPassedObjectSize(const Expr *E, QualType EltTy); void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK); struct MultiVersionResolverOption { llvm::Function *Function; FunctionDecl *FD; struct Conds { StringRef Architecture; llvm::SmallVector Features; Conds(StringRef Arch, ArrayRef Feats) : Architecture(Arch), Features(Feats.begin(), Feats.end()) {} } Conditions; MultiVersionResolverOption(llvm::Function *F, StringRef Arch, ArrayRef Feats) : Function(F), Conditions(Arch, Feats) {} }; // Emits the body of a multiversion function's resolver. Assumes that the // options are already sorted in the proper order, with the 'default' option // last (if it exists). void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef Options); static uint64_t GetX86CpuSupportsMask(ArrayRef FeatureStrs); private: QualType getVarArgType(const Expr *Arg); void EmitDeclMetadata(); BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType, const AutoVarEmission &emission); void AddObjCARCExceptionMetadata(llvm::Instruction *Inst); llvm::Value *GetValueForARMHint(unsigned BuiltinID); llvm::Value *EmitX86CpuIs(const CallExpr *E); llvm::Value *EmitX86CpuIs(StringRef CPUStr); llvm::Value *EmitX86CpuSupports(const CallExpr *E); llvm::Value *EmitX86CpuSupports(ArrayRef FeatureStrs); llvm::Value *EmitX86CpuSupports(uint64_t Mask); llvm::Value *EmitX86CpuInit(); llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO); }; inline DominatingLLVMValue::saved_type DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) { if (!needsSaving(value)) return saved_type(value, false); // Otherwise, we need an alloca. auto align = CharUnits::fromQuantity( CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType())); Address alloca = CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save"); CGF.Builder.CreateStore(value, alloca); return saved_type(alloca.getPointer(), true); } inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF, saved_type value) { // If the value says it wasn't saved, trust that it's still dominating. if (!value.getInt()) return value.getPointer(); // Otherwise, it should be an alloca instruction, as set up in save(). auto alloca = cast(value.getPointer()); return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlign()); } } // end namespace CodeGen // Map the LangOption for floating point exception behavior into // the corresponding enum in the IR. llvm::fp::ExceptionBehavior ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind); } // end namespace clang #endif diff --git a/clang/test/CodeGenCXX/ubsan-nullability-arg.cpp b/clang/test/CodeGenCXX/ubsan-nullability-arg.cpp new file mode 100644 index 000000000000..fbebd153a9ea --- /dev/null +++ b/clang/test/CodeGenCXX/ubsan-nullability-arg.cpp @@ -0,0 +1,51 @@ +// RUN: %clang_cc1 -x c++ -triple x86_64-apple-darwin10 -emit-llvm -o - %s -fsanitize=nullability-arg | FileCheck %s -check-prefixes=ITANIUM,ALL +// RUN: %clang_cc1 -x c++ -triple x86_64-pc-windows-msvc -emit-llvm -o - %s -fsanitize=nullability-arg | FileCheck %s -check-prefixes=MSVC,ALL + +namespace method_ptr { + +struct S0 { + void foo1(); +}; + +void foo1(void (S0::*_Nonnull f)()); + +// ITANIUM-LABEL: @_ZN10method_ptr5test1Ev(){{.*}} { +// ITANIUM: br i1 icmp ne (i64 ptrtoint (void (%"struct.method_ptr::S0"*)* @_ZN10method_ptr2S04foo1Ev to i64), i64 0), label %[[CONT:.*]], label %[[FAIL:[^,]*]] +// ITANIUM-EMPTY: +// ITANIUM-NEXT: [[FAIL]]: +// ITANIUM-NEXT: call void @__ubsan_handle_nullability_arg + +// MSVC-LABEL: @"?test1@method_ptr@@YAXXZ"(){{.*}} { +// MSVC: br i1 true, label %[[CONT:.*]], label %[[FAIL:[^,]*]] +// MSVC-EMPTY: +// MSVC-NEXT: [[FAIL]]: +// MSVC-NEXT: call void @__ubsan_handle_nullability_arg +void test1() { + foo1(&S0::foo1); +} + +} // namespace method_ptr + +namespace data_ptr { + +struct S0 { + int field1; +}; + +using member_ptr = int S0::*; + +void foo1(member_ptr _Nonnull); + +// ITANIUM-LABEL: @_ZN8data_ptr5test1ENS_2S0E( +// MSVC-LABEL: @"?test1@data_ptr@@YAXUS0@1@@Z"( +// ALL: [[DATA_PTR_CHECK:%.*]] = icmp ne {{.*}}, -1, !nosanitize +// ALL-NEXT: br i1 [[DATA_PTR_CHECK]], label %[[CONT:.*]], label %[[FAIL:[^,]+]] +// ALL-EMPTY: +// ALL-NEXT: [[FAIL]]: +// ALL-NEXT: call void @__ubsan_handle_nullability_arg +void test1(S0 s) { + int S0::*member = &S0::field1; + foo1(member); +} + +} // namespace data_ptr