Index: clang/include/clang/Basic/TargetInfo.h =================================================================== --- clang/include/clang/Basic/TargetInfo.h +++ clang/include/clang/Basic/TargetInfo.h @@ -161,6 +161,10 @@ /// If non-zero, specifies a fixed alignment value for bitfields that follow /// zero length bitfield, regardless of the zero length bitfield type. unsigned ZeroLengthBitfieldBoundary; + + /// If non-zero, specifies a maximum alignment to truncate alignment + /// specified in the aligned attribute of a static variable to this value. + unsigned MaxAlignedAttribute; }; /// OpenCL type kinds. @@ -774,6 +778,10 @@ return ZeroLengthBitfieldBoundary; } + /// Get the maximum alignment in bits for a static variable with + /// aligned attribute. + unsigned getMaxAlignedAttribute() const { return MaxAlignedAttribute; } + /// Check whether explicit bitfield alignment attributes should be // honored, as in "__attribute__((aligned(2))) int b : 1;". bool useExplicitBitFieldAlignment() const { Index: clang/lib/AST/ASTContext.cpp =================================================================== --- clang/lib/AST/ASTContext.cpp +++ clang/lib/AST/ASTContext.cpp @@ -1783,6 +1783,13 @@ } } + // Some targets have hard limitation on the maximum requestable alignment in + // aligned attribute for static variables. + const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); + const auto *VD = dyn_cast(D); + if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) + Align = std::min(Align, MaxAlignedAttr); + return toCharUnitsFromBits(Align); } Index: clang/lib/Basic/TargetInfo.cpp =================================================================== --- clang/lib/Basic/TargetInfo.cpp +++ clang/lib/Basic/TargetInfo.cpp @@ -106,6 +106,7 @@ UseZeroLengthBitfieldAlignment = false; UseExplicitBitFieldAlignment = true; ZeroLengthBitfieldBoundary = 0; + MaxAlignedAttribute = 0; HalfFormat = &llvm::APFloat::IEEEhalf(); FloatFormat = &llvm::APFloat::IEEEsingle(); DoubleFormat = &llvm::APFloat::IEEEdouble(); Index: clang/lib/Basic/Targets/OSTargets.h =================================================================== --- clang/lib/Basic/Targets/OSTargets.h +++ clang/lib/Basic/Targets/OSTargets.h @@ -788,6 +788,7 @@ ZOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) : OSTargetInfo(Triple, Opts) { this->WCharType = TargetInfo::UnsignedInt; + this->MaxAlignedAttribute = 128; this->UseBitFieldTypeAlignment = false; this->UseZeroLengthBitfieldAlignment = true; this->ZeroLengthBitfieldBoundary = 32; Index: clang/test/CodeGen/SystemZ/zos-alignment.c =================================================================== --- clang/test/CodeGen/SystemZ/zos-alignment.c +++ clang/test/CodeGen/SystemZ/zos-alignment.c @@ -1,4 +1,16 @@ -// RUN: %clang_cc1 -emit-llvm-only -triple s390x-none-zos -fdump-record-layouts %s | FileCheck %s +// RUN: %clang_cc1 -emit-llvm-only -triple s390x-none-zos -fdump-record-layouts %s | FileCheck %s --check-prefix=CHECK +// RUN: %clang_cc1 -emit-llvm -triple s390x-none-zos %s -o - | FileCheck %s --check-prefix=DECL + +static int __attribute__((aligned(32))) v0; +int __attribute__((aligned(32))) v1; +typedef int __attribute__((aligned(32))) int32; +static int32 v2; +int32 v3; +int f0() { return v0 + v1 + v2 + v3; } +// DECL: @v0 {{.*}} align 16 +// DECL-NEXT: @v1 {{.*}} align 32 +// DECL-NEXT: @v2 {{.*}} align 16 +// DECL-NEXT: @v3 {{.*}} align 32 struct s0 { short a:3; Index: myPatch.diff =================================================================== --- /dev/null +++ myPatch.diff @@ -0,0 +1,15083 @@ +diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h +index 9791cb6bbee7..470fee490ef1 100644 +--- a/clang/include/clang/Basic/TargetInfo.h ++++ b/clang/include/clang/Basic/TargetInfo.h +@@ -1,1543 +1,1551 @@ + //===--- TargetInfo.h - Expose information about the target -----*- C++ -*-===// + // + // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + // See https://llvm.org/LICENSE.txt for license information. + // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + // + //===----------------------------------------------------------------------===// + /// + /// \file + /// Defines the clang::TargetInfo interface. + /// + //===----------------------------------------------------------------------===// + + #ifndef LLVM_CLANG_BASIC_TARGETINFO_H + #define LLVM_CLANG_BASIC_TARGETINFO_H + + #include "clang/Basic/AddressSpaces.h" + #include "clang/Basic/CodeGenOptions.h" + #include "clang/Basic/LLVM.h" + #include "clang/Basic/LangOptions.h" + #include "clang/Basic/Specifiers.h" + #include "clang/Basic/TargetCXXABI.h" + #include "clang/Basic/TargetOptions.h" + #include "llvm/ADT/APFloat.h" + #include "llvm/ADT/APInt.h" + #include "llvm/ADT/ArrayRef.h" + #include "llvm/ADT/IntrusiveRefCntPtr.h" + #include "llvm/ADT/Optional.h" + #include "llvm/ADT/SmallSet.h" + #include "llvm/ADT/StringMap.h" + #include "llvm/ADT/StringRef.h" + #include "llvm/ADT/Triple.h" + #include "llvm/Frontend/OpenMP/OMPGridValues.h" + #include "llvm/Support/DataTypes.h" + #include "llvm/Support/Error.h" + #include "llvm/Support/VersionTuple.h" + #include + #include + #include + + namespace llvm { + struct fltSemantics; + class DataLayout; + } + + namespace clang { + class DiagnosticsEngine; + class LangOptions; + class CodeGenOptions; + class MacroBuilder; + class QualType; + class SourceLocation; + class SourceManager; + + namespace Builtin { struct Info; } + + /// Fields controlling how types are laid out in memory; these may need to + /// be copied for targets like AMDGPU that base their ABIs on an auxiliary + /// CPU target. + struct TransferrableTargetInfo { + unsigned char PointerWidth, PointerAlign; + unsigned char BoolWidth, BoolAlign; + unsigned char IntWidth, IntAlign; + unsigned char HalfWidth, HalfAlign; + unsigned char BFloat16Width, BFloat16Align; + unsigned char FloatWidth, FloatAlign; + unsigned char DoubleWidth, DoubleAlign; + unsigned char LongDoubleWidth, LongDoubleAlign, Float128Align; + unsigned char LargeArrayMinWidth, LargeArrayAlign; + unsigned char LongWidth, LongAlign; + unsigned char LongLongWidth, LongLongAlign; + + // Fixed point bit widths + unsigned char ShortAccumWidth, ShortAccumAlign; + unsigned char AccumWidth, AccumAlign; + unsigned char LongAccumWidth, LongAccumAlign; + unsigned char ShortFractWidth, ShortFractAlign; + unsigned char FractWidth, FractAlign; + unsigned char LongFractWidth, LongFractAlign; + + // If true, unsigned fixed point types have the same number of fractional bits + // as their signed counterparts, forcing the unsigned types to have one extra + // bit of padding. Otherwise, unsigned fixed point types have + // one more fractional bit than its corresponding signed type. This is false + // by default. + bool PaddingOnUnsignedFixedPoint; + + // Fixed point integral and fractional bit sizes + // Saturated types share the same integral/fractional bits as their + // corresponding unsaturated types. + // For simplicity, the fractional bits in a _Fract type will be one less the + // width of that _Fract type. This leaves all signed _Fract types having no + // padding and unsigned _Fract types will only have 1 bit of padding after the + // sign if PaddingOnUnsignedFixedPoint is set. + unsigned char ShortAccumScale; + unsigned char AccumScale; + unsigned char LongAccumScale; + + unsigned char SuitableAlign; + unsigned char DefaultAlignForAttributeAligned; + unsigned char MinGlobalAlign; + + unsigned short NewAlign; + unsigned MaxVectorAlign; + unsigned MaxTLSAlign; + + const llvm::fltSemantics *HalfFormat, *BFloat16Format, *FloatFormat, + *DoubleFormat, *LongDoubleFormat, *Float128Format; + + ///===---- Target Data Type Query Methods -------------------------------===// + enum IntType { + NoInt = 0, + SignedChar, + UnsignedChar, + SignedShort, + UnsignedShort, + SignedInt, + UnsignedInt, + SignedLong, + UnsignedLong, + SignedLongLong, + UnsignedLongLong + }; + + enum RealType { + NoFloat = 255, + Float = 0, + Double, + LongDouble, + Float128 + }; + protected: + IntType SizeType, IntMaxType, PtrDiffType, IntPtrType, WCharType, + WIntType, Char16Type, Char32Type, Int64Type, SigAtomicType, + ProcessIDType; + + /// Whether Objective-C's built-in boolean type should be signed char. + /// + /// Otherwise, when this flag is not set, the normal built-in boolean type is + /// used. + unsigned UseSignedCharForObjCBool : 1; + + /// Control whether the alignment of bit-field types is respected when laying + /// out structures. If true, then the alignment of the bit-field type will be + /// used to (a) impact the alignment of the containing structure, and (b) + /// ensure that the individual bit-field will not straddle an alignment + /// boundary. + unsigned UseBitFieldTypeAlignment : 1; + + /// Whether zero length bitfields (e.g., int : 0;) force alignment of + /// the next bitfield. + /// + /// If the alignment of the zero length bitfield is greater than the member + /// that follows it, `bar', `bar' will be aligned as the type of the + /// zero-length bitfield. + unsigned UseZeroLengthBitfieldAlignment : 1; + + /// Whether explicit bit field alignment attributes are honored. + unsigned UseExplicitBitFieldAlignment : 1; + + /// If non-zero, specifies a fixed alignment value for bitfields that follow + /// zero length bitfield, regardless of the zero length bitfield type. + unsigned ZeroLengthBitfieldBoundary; ++ ++ /// If non-zero, specifies a maximum alignment to truncate alignment ++ /// specified in the aligned attribute of a static variable to this value. ++ unsigned MaxAlignedAttribute; + }; + + /// OpenCL type kinds. + enum OpenCLTypeKind : uint8_t { + OCLTK_Default, + OCLTK_ClkEvent, + OCLTK_Event, + OCLTK_Image, + OCLTK_Pipe, + OCLTK_Queue, + OCLTK_ReserveID, + OCLTK_Sampler, + }; + + /// Exposes information about the current target. + /// + class TargetInfo : public virtual TransferrableTargetInfo, + public RefCountedBase { + std::shared_ptr TargetOpts; + llvm::Triple Triple; + protected: + // Target values set by the ctor of the actual target implementation. Default + // values are specified by the TargetInfo constructor. + bool BigEndian; + bool TLSSupported; + bool VLASupported; + bool NoAsmVariants; // True if {|} are normal characters. + bool HasLegalHalfType; // True if the backend supports operations on the half + // LLVM IR type. + bool HasFloat128; + bool HasFloat16; + bool HasBFloat16; + bool HasStrictFP; + + unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth; + unsigned short SimdDefaultAlign; + std::unique_ptr DataLayout; + const char *MCountName; + unsigned char RegParmMax, SSERegParmMax; + TargetCXXABI TheCXXABI; + const LangASMap *AddrSpaceMap; + const unsigned *GridValues = + nullptr; // Array of target-specific GPU grid values that must be + // consistent between host RTL (plugin), device RTL, and clang. + + mutable StringRef PlatformName; + mutable VersionTuple PlatformMinVersion; + + unsigned HasAlignMac68kSupport : 1; + unsigned RealTypeUsesObjCFPRet : 3; + unsigned ComplexLongDoubleUsesFP2Ret : 1; + + unsigned HasBuiltinMSVaList : 1; + + unsigned IsRenderScriptTarget : 1; + + unsigned HasAArch64SVETypes : 1; + + unsigned HasRISCVVTypes : 1; + + unsigned AllowAMDGPUUnsafeFPAtomics : 1; + + unsigned ARMCDECoprocMask : 8; + + unsigned MaxOpenCLWorkGroupSize; + + // TargetInfo Constructor. Default initializes all fields. + TargetInfo(const llvm::Triple &T); + + void resetDataLayout(StringRef DL); + + public: + /// Construct a target for the given options. + /// + /// \param Opts - The options to use to initialize the target. The target may + /// modify the options to canonicalize the target feature information to match + /// what the backend expects. + static TargetInfo * + CreateTargetInfo(DiagnosticsEngine &Diags, + const std::shared_ptr &Opts); + + virtual ~TargetInfo(); + + /// Retrieve the target options. + TargetOptions &getTargetOpts() const { + assert(TargetOpts && "Missing target options"); + return *TargetOpts; + } + + /// The different kinds of __builtin_va_list types defined by + /// the target implementation. + enum BuiltinVaListKind { + /// typedef char* __builtin_va_list; + CharPtrBuiltinVaList = 0, + + /// typedef void* __builtin_va_list; + VoidPtrBuiltinVaList, + + /// __builtin_va_list as defined by the AArch64 ABI + /// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055a/IHI0055A_aapcs64.pdf + AArch64ABIBuiltinVaList, + + /// __builtin_va_list as defined by the PNaCl ABI: + /// http://www.chromium.org/nativeclient/pnacl/bitcode-abi#TOC-Machine-Types + PNaClABIBuiltinVaList, + + /// __builtin_va_list as defined by the Power ABI: + /// https://www.power.org + /// /resources/downloads/Power-Arch-32-bit-ABI-supp-1.0-Embedded.pdf + PowerABIBuiltinVaList, + + /// __builtin_va_list as defined by the x86-64 ABI: + /// http://refspecs.linuxbase.org/elf/x86_64-abi-0.21.pdf + X86_64ABIBuiltinVaList, + + /// __builtin_va_list as defined by ARM AAPCS ABI + /// http://infocenter.arm.com + // /help/topic/com.arm.doc.ihi0042d/IHI0042D_aapcs.pdf + AAPCSABIBuiltinVaList, + + // typedef struct __va_list_tag + // { + // long __gpr; + // long __fpr; + // void *__overflow_arg_area; + // void *__reg_save_area; + // } va_list[1]; + SystemZBuiltinVaList, + + // typedef struct __va_list_tag { + // void *__current_saved_reg_area_pointer; + // void *__saved_reg_area_end_pointer; + // void *__overflow_area_pointer; + //} va_list; + HexagonBuiltinVaList + }; + + protected: + /// Specify if mangling based on address space map should be used or + /// not for language specific address spaces + bool UseAddrSpaceMapMangling; + + public: + IntType getSizeType() const { return SizeType; } + IntType getSignedSizeType() const { + switch (SizeType) { + case UnsignedShort: + return SignedShort; + case UnsignedInt: + return SignedInt; + case UnsignedLong: + return SignedLong; + case UnsignedLongLong: + return SignedLongLong; + default: + llvm_unreachable("Invalid SizeType"); + } + } + IntType getIntMaxType() const { return IntMaxType; } + IntType getUIntMaxType() const { + return getCorrespondingUnsignedType(IntMaxType); + } + IntType getPtrDiffType(unsigned AddrSpace) const { + return AddrSpace == 0 ? PtrDiffType : getPtrDiffTypeV(AddrSpace); + } + IntType getUnsignedPtrDiffType(unsigned AddrSpace) const { + return getCorrespondingUnsignedType(getPtrDiffType(AddrSpace)); + } + IntType getIntPtrType() const { return IntPtrType; } + IntType getUIntPtrType() const { + return getCorrespondingUnsignedType(IntPtrType); + } + IntType getWCharType() const { return WCharType; } + IntType getWIntType() const { return WIntType; } + IntType getChar16Type() const { return Char16Type; } + IntType getChar32Type() const { return Char32Type; } + IntType getInt64Type() const { return Int64Type; } + IntType getUInt64Type() const { + return getCorrespondingUnsignedType(Int64Type); + } + IntType getSigAtomicType() const { return SigAtomicType; } + IntType getProcessIDType() const { return ProcessIDType; } + + static IntType getCorrespondingUnsignedType(IntType T) { + switch (T) { + case SignedChar: + return UnsignedChar; + case SignedShort: + return UnsignedShort; + case SignedInt: + return UnsignedInt; + case SignedLong: + return UnsignedLong; + case SignedLongLong: + return UnsignedLongLong; + default: + llvm_unreachable("Unexpected signed integer type"); + } + } + + /// In the event this target uses the same number of fractional bits for its + /// unsigned types as it does with its signed counterparts, there will be + /// exactly one bit of padding. + /// Return true if unsigned fixed point types have padding for this target. + bool doUnsignedFixedPointTypesHavePadding() const { + return PaddingOnUnsignedFixedPoint; + } + + /// Return the width (in bits) of the specified integer type enum. + /// + /// For example, SignedInt -> getIntWidth(). + unsigned getTypeWidth(IntType T) const; + + /// Return integer type with specified width. + virtual IntType getIntTypeByWidth(unsigned BitWidth, bool IsSigned) const; + + /// Return the smallest integer type with at least the specified width. + virtual IntType getLeastIntTypeByWidth(unsigned BitWidth, + bool IsSigned) const; + + /// Return floating point type with specified width. On PPC, there are + /// three possible types for 128-bit floating point: "PPC double-double", + /// IEEE 754R quad precision, and "long double" (which under the covers + /// is represented as one of those two). At this time, there is no support + /// for an explicit "PPC double-double" type (i.e. __ibm128) so we only + /// need to differentiate between "long double" and IEEE quad precision. + RealType getRealTypeByWidth(unsigned BitWidth, bool ExplicitIEEE) const; + + /// Return the alignment (in bits) of the specified integer type enum. + /// + /// For example, SignedInt -> getIntAlign(). + unsigned getTypeAlign(IntType T) const; + + /// Returns true if the type is signed; false otherwise. + static bool isTypeSigned(IntType T); + + /// Return the width of pointers on this target, for the + /// specified address space. + uint64_t getPointerWidth(unsigned AddrSpace) const { + return AddrSpace == 0 ? PointerWidth : getPointerWidthV(AddrSpace); + } + uint64_t getPointerAlign(unsigned AddrSpace) const { + return AddrSpace == 0 ? PointerAlign : getPointerAlignV(AddrSpace); + } + + /// Return the maximum width of pointers on this target. + virtual uint64_t getMaxPointerWidth() const { + return PointerWidth; + } + + /// Get integer value for null pointer. + /// \param AddrSpace address space of pointee in source language. + virtual uint64_t getNullPointerValue(LangAS AddrSpace) const { return 0; } + + /// Return the size of '_Bool' and C++ 'bool' for this target, in bits. + unsigned getBoolWidth() const { return BoolWidth; } + + /// Return the alignment of '_Bool' and C++ 'bool' for this target. + unsigned getBoolAlign() const { return BoolAlign; } + + unsigned getCharWidth() const { return 8; } // FIXME + unsigned getCharAlign() const { return 8; } // FIXME + + /// Return the size of 'signed short' and 'unsigned short' for this + /// target, in bits. + unsigned getShortWidth() const { return 16; } // FIXME + + /// Return the alignment of 'signed short' and 'unsigned short' for + /// this target. + unsigned getShortAlign() const { return 16; } // FIXME + + /// getIntWidth/Align - Return the size of 'signed int' and 'unsigned int' for + /// this target, in bits. + unsigned getIntWidth() const { return IntWidth; } + unsigned getIntAlign() const { return IntAlign; } + + /// getLongWidth/Align - Return the size of 'signed long' and 'unsigned long' + /// for this target, in bits. + unsigned getLongWidth() const { return LongWidth; } + unsigned getLongAlign() const { return LongAlign; } + + /// getLongLongWidth/Align - Return the size of 'signed long long' and + /// 'unsigned long long' for this target, in bits. + unsigned getLongLongWidth() const { return LongLongWidth; } + unsigned getLongLongAlign() const { return LongLongAlign; } + + /// getShortAccumWidth/Align - Return the size of 'signed short _Accum' and + /// 'unsigned short _Accum' for this target, in bits. + unsigned getShortAccumWidth() const { return ShortAccumWidth; } + unsigned getShortAccumAlign() const { return ShortAccumAlign; } + + /// getAccumWidth/Align - Return the size of 'signed _Accum' and + /// 'unsigned _Accum' for this target, in bits. + unsigned getAccumWidth() const { return AccumWidth; } + unsigned getAccumAlign() const { return AccumAlign; } + + /// getLongAccumWidth/Align - Return the size of 'signed long _Accum' and + /// 'unsigned long _Accum' for this target, in bits. + unsigned getLongAccumWidth() const { return LongAccumWidth; } + unsigned getLongAccumAlign() const { return LongAccumAlign; } + + /// getShortFractWidth/Align - Return the size of 'signed short _Fract' and + /// 'unsigned short _Fract' for this target, in bits. + unsigned getShortFractWidth() const { return ShortFractWidth; } + unsigned getShortFractAlign() const { return ShortFractAlign; } + + /// getFractWidth/Align - Return the size of 'signed _Fract' and + /// 'unsigned _Fract' for this target, in bits. + unsigned getFractWidth() const { return FractWidth; } + unsigned getFractAlign() const { return FractAlign; } + + /// getLongFractWidth/Align - Return the size of 'signed long _Fract' and + /// 'unsigned long _Fract' for this target, in bits. + unsigned getLongFractWidth() const { return LongFractWidth; } + unsigned getLongFractAlign() const { return LongFractAlign; } + + /// getShortAccumScale/IBits - Return the number of fractional/integral bits + /// in a 'signed short _Accum' type. + unsigned getShortAccumScale() const { return ShortAccumScale; } + unsigned getShortAccumIBits() const { + return ShortAccumWidth - ShortAccumScale - 1; + } + + /// getAccumScale/IBits - Return the number of fractional/integral bits + /// in a 'signed _Accum' type. + unsigned getAccumScale() const { return AccumScale; } + unsigned getAccumIBits() const { return AccumWidth - AccumScale - 1; } + + /// getLongAccumScale/IBits - Return the number of fractional/integral bits + /// in a 'signed long _Accum' type. + unsigned getLongAccumScale() const { return LongAccumScale; } + unsigned getLongAccumIBits() const { + return LongAccumWidth - LongAccumScale - 1; + } + + /// getUnsignedShortAccumScale/IBits - Return the number of + /// fractional/integral bits in a 'unsigned short _Accum' type. + unsigned getUnsignedShortAccumScale() const { + return PaddingOnUnsignedFixedPoint ? ShortAccumScale : ShortAccumScale + 1; + } + unsigned getUnsignedShortAccumIBits() const { + return PaddingOnUnsignedFixedPoint + ? getShortAccumIBits() + : ShortAccumWidth - getUnsignedShortAccumScale(); + } + + /// getUnsignedAccumScale/IBits - Return the number of fractional/integral + /// bits in a 'unsigned _Accum' type. + unsigned getUnsignedAccumScale() const { + return PaddingOnUnsignedFixedPoint ? AccumScale : AccumScale + 1; + } + unsigned getUnsignedAccumIBits() const { + return PaddingOnUnsignedFixedPoint ? getAccumIBits() + : AccumWidth - getUnsignedAccumScale(); + } + + /// getUnsignedLongAccumScale/IBits - Return the number of fractional/integral + /// bits in a 'unsigned long _Accum' type. + unsigned getUnsignedLongAccumScale() const { + return PaddingOnUnsignedFixedPoint ? LongAccumScale : LongAccumScale + 1; + } + unsigned getUnsignedLongAccumIBits() const { + return PaddingOnUnsignedFixedPoint + ? getLongAccumIBits() + : LongAccumWidth - getUnsignedLongAccumScale(); + } + + /// getShortFractScale - Return the number of fractional bits + /// in a 'signed short _Fract' type. + unsigned getShortFractScale() const { return ShortFractWidth - 1; } + + /// getFractScale - Return the number of fractional bits + /// in a 'signed _Fract' type. + unsigned getFractScale() const { return FractWidth - 1; } + + /// getLongFractScale - Return the number of fractional bits + /// in a 'signed long _Fract' type. + unsigned getLongFractScale() const { return LongFractWidth - 1; } + + /// getUnsignedShortFractScale - Return the number of fractional bits + /// in a 'unsigned short _Fract' type. + unsigned getUnsignedShortFractScale() const { + return PaddingOnUnsignedFixedPoint ? getShortFractScale() + : getShortFractScale() + 1; + } + + /// getUnsignedFractScale - Return the number of fractional bits + /// in a 'unsigned _Fract' type. + unsigned getUnsignedFractScale() const { + return PaddingOnUnsignedFixedPoint ? getFractScale() : getFractScale() + 1; + } + + /// getUnsignedLongFractScale - Return the number of fractional bits + /// in a 'unsigned long _Fract' type. + unsigned getUnsignedLongFractScale() const { + return PaddingOnUnsignedFixedPoint ? getLongFractScale() + : getLongFractScale() + 1; + } + + /// Determine whether the __int128 type is supported on this target. + virtual bool hasInt128Type() const { + return (getPointerWidth(0) >= 64) || getTargetOpts().ForceEnableInt128; + } // FIXME + + /// Determine whether the _ExtInt type is supported on this target. This + /// limitation is put into place for ABI reasons. + virtual bool hasExtIntType() const { + return false; + } + + /// Determine whether _Float16 is supported on this target. + virtual bool hasLegalHalfType() const { return HasLegalHalfType; } + + /// Determine whether the __float128 type is supported on this target. + virtual bool hasFloat128Type() const { return HasFloat128; } + + /// Determine whether the _Float16 type is supported on this target. + virtual bool hasFloat16Type() const { return HasFloat16; } + + /// Determine whether the _BFloat16 type is supported on this target. + virtual bool hasBFloat16Type() const { return HasBFloat16; } + + /// Determine whether constrained floating point is supported on this target. + virtual bool hasStrictFP() const { return HasStrictFP; } + + /// Return the alignment that is the largest alignment ever used for any + /// scalar/SIMD data type on the target machine you are compiling for + /// (including types with an extended alignment requirement). + unsigned getSuitableAlign() const { return SuitableAlign; } + + /// Return the default alignment for __attribute__((aligned)) on + /// this target, to be used if no alignment value is specified. + unsigned getDefaultAlignForAttributeAligned() const { + return DefaultAlignForAttributeAligned; + } + + /// getMinGlobalAlign - Return the minimum alignment of a global variable, + /// unless its alignment is explicitly reduced via attributes. + virtual unsigned getMinGlobalAlign (uint64_t) const { + return MinGlobalAlign; + } + + /// Return the largest alignment for which a suitably-sized allocation with + /// '::operator new(size_t)' is guaranteed to produce a correctly-aligned + /// pointer. + unsigned getNewAlign() const { + return NewAlign ? NewAlign : std::max(LongDoubleAlign, LongLongAlign); + } + + /// getWCharWidth/Align - Return the size of 'wchar_t' for this target, in + /// bits. + unsigned getWCharWidth() const { return getTypeWidth(WCharType); } + unsigned getWCharAlign() const { return getTypeAlign(WCharType); } + + /// getChar16Width/Align - Return the size of 'char16_t' for this target, in + /// bits. + unsigned getChar16Width() const { return getTypeWidth(Char16Type); } + unsigned getChar16Align() const { return getTypeAlign(Char16Type); } + + /// getChar32Width/Align - Return the size of 'char32_t' for this target, in + /// bits. + unsigned getChar32Width() const { return getTypeWidth(Char32Type); } + unsigned getChar32Align() const { return getTypeAlign(Char32Type); } + + /// getHalfWidth/Align/Format - Return the size/align/format of 'half'. + unsigned getHalfWidth() const { return HalfWidth; } + unsigned getHalfAlign() const { return HalfAlign; } + const llvm::fltSemantics &getHalfFormat() const { return *HalfFormat; } + + /// getFloatWidth/Align/Format - Return the size/align/format of 'float'. + unsigned getFloatWidth() const { return FloatWidth; } + unsigned getFloatAlign() const { return FloatAlign; } + const llvm::fltSemantics &getFloatFormat() const { return *FloatFormat; } + + /// getBFloat16Width/Align/Format - Return the size/align/format of '__bf16'. + unsigned getBFloat16Width() const { return BFloat16Width; } + unsigned getBFloat16Align() const { return BFloat16Align; } + const llvm::fltSemantics &getBFloat16Format() const { return *BFloat16Format; } + + /// getDoubleWidth/Align/Format - Return the size/align/format of 'double'. + unsigned getDoubleWidth() const { return DoubleWidth; } + unsigned getDoubleAlign() const { return DoubleAlign; } + const llvm::fltSemantics &getDoubleFormat() const { return *DoubleFormat; } + + /// getLongDoubleWidth/Align/Format - Return the size/align/format of 'long + /// double'. + unsigned getLongDoubleWidth() const { return LongDoubleWidth; } + unsigned getLongDoubleAlign() const { return LongDoubleAlign; } + const llvm::fltSemantics &getLongDoubleFormat() const { + return *LongDoubleFormat; + } + + /// getFloat128Width/Align/Format - Return the size/align/format of + /// '__float128'. + unsigned getFloat128Width() const { return 128; } + unsigned getFloat128Align() const { return Float128Align; } + const llvm::fltSemantics &getFloat128Format() const { + return *Float128Format; + } + + /// Return the mangled code of long double. + virtual const char *getLongDoubleMangling() const { return "e"; } + + /// Return the mangled code of __float128. + virtual const char *getFloat128Mangling() const { return "g"; } + + /// Return the mangled code of bfloat. + virtual const char *getBFloat16Mangling() const { + llvm_unreachable("bfloat not implemented on this target"); + } + + /// Return the value for the C99 FLT_EVAL_METHOD macro. + virtual unsigned getFloatEvalMethod() const { return 0; } + + // getLargeArrayMinWidth/Align - Return the minimum array size that is + // 'large' and its alignment. + unsigned getLargeArrayMinWidth() const { return LargeArrayMinWidth; } + unsigned getLargeArrayAlign() const { return LargeArrayAlign; } + + /// Return the maximum width lock-free atomic operation which will + /// ever be supported for the given target + unsigned getMaxAtomicPromoteWidth() const { return MaxAtomicPromoteWidth; } + /// Return the maximum width lock-free atomic operation which can be + /// inlined given the supported features of the given target. + unsigned getMaxAtomicInlineWidth() const { return MaxAtomicInlineWidth; } + /// Set the maximum inline or promote width lock-free atomic operation + /// for the given target. + virtual void setMaxAtomicWidth() {} + /// Returns true if the given target supports lock-free atomic + /// operations at the specified width and alignment. + virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, + uint64_t AlignmentInBits) const { + return AtomicSizeInBits <= AlignmentInBits && + AtomicSizeInBits <= getMaxAtomicInlineWidth() && + (AtomicSizeInBits <= getCharWidth() || + llvm::isPowerOf2_64(AtomicSizeInBits / getCharWidth())); + } + + /// Return the maximum vector alignment supported for the given target. + unsigned getMaxVectorAlign() const { return MaxVectorAlign; } + /// Return default simd alignment for the given target. Generally, this + /// value is type-specific, but this alignment can be used for most of the + /// types for the given target. + unsigned getSimdDefaultAlign() const { return SimdDefaultAlign; } + + unsigned getMaxOpenCLWorkGroupSize() const { return MaxOpenCLWorkGroupSize; } + + /// Return the alignment (in bits) of the thrown exception object. This is + /// only meaningful for targets that allocate C++ exceptions in a system + /// runtime, such as those using the Itanium C++ ABI. + virtual unsigned getExnObjectAlignment() const { + // Itanium says that an _Unwind_Exception has to be "double-word" + // aligned (and thus the end of it is also so-aligned), meaning 16 + // bytes. Of course, that was written for the actual Itanium, + // which is a 64-bit platform. Classically, the ABI doesn't really + // specify the alignment on other platforms, but in practice + // libUnwind declares the struct with __attribute__((aligned)), so + // we assume that alignment here. (It's generally 16 bytes, but + // some targets overwrite it.) + return getDefaultAlignForAttributeAligned(); + } + + /// Return the size of intmax_t and uintmax_t for this target, in bits. + unsigned getIntMaxTWidth() const { + return getTypeWidth(IntMaxType); + } + + // Return the size of unwind_word for this target. + virtual unsigned getUnwindWordWidth() const { return getPointerWidth(0); } + + /// Return the "preferred" register width on this target. + virtual unsigned getRegisterWidth() const { + // Currently we assume the register width on the target matches the pointer + // width, we can introduce a new variable for this if/when some target wants + // it. + return PointerWidth; + } + + /// Returns the name of the mcount instrumentation function. + const char *getMCountName() const { + return MCountName; + } + + /// Check if the Objective-C built-in boolean type should be signed + /// char. + /// + /// Otherwise, if this returns false, the normal built-in boolean type + /// should also be used for Objective-C. + bool useSignedCharForObjCBool() const { + return UseSignedCharForObjCBool; + } + void noSignedCharForObjCBool() { + UseSignedCharForObjCBool = false; + } + + /// Check whether the alignment of bit-field types is respected + /// when laying out structures. + bool useBitFieldTypeAlignment() const { + return UseBitFieldTypeAlignment; + } + + /// Check whether zero length bitfields should force alignment of + /// the next member. + bool useZeroLengthBitfieldAlignment() const { + return UseZeroLengthBitfieldAlignment; + } + + /// Get the fixed alignment value in bits for a member that follows + /// a zero length bitfield. + unsigned getZeroLengthBitfieldBoundary() const { + return ZeroLengthBitfieldBoundary; + } + ++ /// Get the maximum alignment in bits for a static variable with ++ /// aligned attribute. ++ unsigned getMaxAlignedAttribute() const { return MaxAlignedAttribute; } ++ + /// Check whether explicit bitfield alignment attributes should be + // honored, as in "__attribute__((aligned(2))) int b : 1;". + bool useExplicitBitFieldAlignment() const { + return UseExplicitBitFieldAlignment; + } + + /// Check whether this target support '\#pragma options align=mac68k'. + bool hasAlignMac68kSupport() const { + return HasAlignMac68kSupport; + } + + /// Return the user string for the specified integer type enum. + /// + /// For example, SignedShort -> "short". + static const char *getTypeName(IntType T); + + /// Return the constant suffix for the specified integer type enum. + /// + /// For example, SignedLong -> "L". + const char *getTypeConstantSuffix(IntType T) const; + + /// Return the printf format modifier for the specified + /// integer type enum. + /// + /// For example, SignedLong -> "l". + static const char *getTypeFormatModifier(IntType T); + + /// Check whether the given real type should use the "fpret" flavor of + /// Objective-C message passing on this target. + bool useObjCFPRetForRealType(RealType T) const { + return RealTypeUsesObjCFPRet & (1 << T); + } + + /// Check whether _Complex long double should use the "fp2ret" flavor + /// of Objective-C message passing on this target. + bool useObjCFP2RetForComplexLongDouble() const { + return ComplexLongDoubleUsesFP2Ret; + } + + /// Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used + /// to convert to and from __fp16. + /// FIXME: This function should be removed once all targets stop using the + /// conversion intrinsics. + virtual bool useFP16ConversionIntrinsics() const { + return true; + } + + /// Specify if mangling based on address space map should be used or + /// not for language specific address spaces + bool useAddressSpaceMapMangling() const { + return UseAddrSpaceMapMangling; + } + + ///===---- Other target property query methods --------------------------===// + + /// Appends the target-specific \#define values for this + /// target set to the specified buffer. + virtual void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const = 0; + + + /// Return information about target-specific builtins for + /// the current primary target, and info about which builtins are non-portable + /// across the current set of primary and secondary targets. + virtual ArrayRef getTargetBuiltins() const = 0; + + /// The __builtin_clz* and __builtin_ctz* built-in + /// functions are specified to have undefined results for zero inputs, but + /// on targets that support these operations in a way that provides + /// well-defined results for zero without loss of performance, it is a good + /// idea to avoid optimizing based on that undef behavior. + virtual bool isCLZForZeroUndef() const { return true; } + + /// Returns the kind of __builtin_va_list type that should be used + /// with this target. + virtual BuiltinVaListKind getBuiltinVaListKind() const = 0; + + /// Returns whether or not type \c __builtin_ms_va_list type is + /// available on this target. + bool hasBuiltinMSVaList() const { return HasBuiltinMSVaList; } + + /// Returns true for RenderScript. + bool isRenderScriptTarget() const { return IsRenderScriptTarget; } + + /// Returns whether or not the AArch64 SVE built-in types are + /// available on this target. + bool hasAArch64SVETypes() const { return HasAArch64SVETypes; } + + /// Returns whether or not the RISC-V V built-in types are + /// available on this target. + bool hasRISCVVTypes() const { return HasRISCVVTypes; } + + /// Returns whether or not the AMDGPU unsafe floating point atomics are + /// allowed. + bool allowAMDGPUUnsafeFPAtomics() const { return AllowAMDGPUUnsafeFPAtomics; } + + /// For ARM targets returns a mask defining which coprocessors are configured + /// as Custom Datapath. + uint32_t getARMCDECoprocMask() const { return ARMCDECoprocMask; } + + /// Returns whether the passed in string is a valid clobber in an + /// inline asm statement. + /// + /// This is used by Sema. + bool isValidClobber(StringRef Name) const; + + /// Returns whether the passed in string is a valid register name + /// according to GCC. + /// + /// This is used by Sema for inline asm statements. + virtual bool isValidGCCRegisterName(StringRef Name) const; + + /// Returns the "normalized" GCC register name. + /// + /// ReturnCannonical true will return the register name without any additions + /// such as "{}" or "%" in it's canonical form, for example: + /// ReturnCanonical = true and Name = "rax", will return "ax". + StringRef getNormalizedGCCRegisterName(StringRef Name, + bool ReturnCanonical = false) const; + + virtual bool isSPRegName(StringRef) const { return false; } + + /// Extracts a register from the passed constraint (if it is a + /// single-register constraint) and the asm label expression related to a + /// variable in the input or output list of an inline asm statement. + /// + /// This function is used by Sema in order to diagnose conflicts between + /// the clobber list and the input/output lists. + virtual StringRef getConstraintRegister(StringRef Constraint, + StringRef Expression) const { + return ""; + } + + struct ConstraintInfo { + enum { + CI_None = 0x00, + CI_AllowsMemory = 0x01, + CI_AllowsRegister = 0x02, + CI_ReadWrite = 0x04, // "+r" output constraint (read and write). + CI_HasMatchingInput = 0x08, // This output operand has a matching input. + CI_ImmediateConstant = 0x10, // This operand must be an immediate constant + CI_EarlyClobber = 0x20, // "&" output constraint (early clobber). + }; + unsigned Flags; + int TiedOperand; + struct { + int Min; + int Max; + bool isConstrained; + } ImmRange; + llvm::SmallSet ImmSet; + + std::string ConstraintStr; // constraint: "=rm" + std::string Name; // Operand name: [foo] with no []'s. + public: + ConstraintInfo(StringRef ConstraintStr, StringRef Name) + : Flags(0), TiedOperand(-1), ConstraintStr(ConstraintStr.str()), + Name(Name.str()) { + ImmRange.Min = ImmRange.Max = 0; + ImmRange.isConstrained = false; + } + + const std::string &getConstraintStr() const { return ConstraintStr; } + const std::string &getName() const { return Name; } + bool isReadWrite() const { return (Flags & CI_ReadWrite) != 0; } + bool earlyClobber() { return (Flags & CI_EarlyClobber) != 0; } + bool allowsRegister() const { return (Flags & CI_AllowsRegister) != 0; } + bool allowsMemory() const { return (Flags & CI_AllowsMemory) != 0; } + + /// Return true if this output operand has a matching + /// (tied) input operand. + bool hasMatchingInput() const { return (Flags & CI_HasMatchingInput) != 0; } + + /// Return true if this input operand is a matching + /// constraint that ties it to an output operand. + /// + /// If this returns true then getTiedOperand will indicate which output + /// operand this is tied to. + bool hasTiedOperand() const { return TiedOperand != -1; } + unsigned getTiedOperand() const { + assert(hasTiedOperand() && "Has no tied operand!"); + return (unsigned)TiedOperand; + } + + bool requiresImmediateConstant() const { + return (Flags & CI_ImmediateConstant) != 0; + } + bool isValidAsmImmediate(const llvm::APInt &Value) const { + if (!ImmSet.empty()) + return Value.isSignedIntN(32) && + ImmSet.count(Value.getZExtValue()) != 0; + return !ImmRange.isConstrained || + (Value.sge(ImmRange.Min) && Value.sle(ImmRange.Max)); + } + + void setIsReadWrite() { Flags |= CI_ReadWrite; } + void setEarlyClobber() { Flags |= CI_EarlyClobber; } + void setAllowsMemory() { Flags |= CI_AllowsMemory; } + void setAllowsRegister() { Flags |= CI_AllowsRegister; } + void setHasMatchingInput() { Flags |= CI_HasMatchingInput; } + void setRequiresImmediate(int Min, int Max) { + Flags |= CI_ImmediateConstant; + ImmRange.Min = Min; + ImmRange.Max = Max; + ImmRange.isConstrained = true; + } + void setRequiresImmediate(llvm::ArrayRef Exacts) { + Flags |= CI_ImmediateConstant; + for (int Exact : Exacts) + ImmSet.insert(Exact); + } + void setRequiresImmediate(int Exact) { + Flags |= CI_ImmediateConstant; + ImmSet.insert(Exact); + } + void setRequiresImmediate() { + Flags |= CI_ImmediateConstant; + } + + /// Indicate that this is an input operand that is tied to + /// the specified output operand. + /// + /// Copy over the various constraint information from the output. + void setTiedOperand(unsigned N, ConstraintInfo &Output) { + Output.setHasMatchingInput(); + Flags = Output.Flags; + TiedOperand = N; + // Don't copy Name or constraint string. + } + }; + + /// Validate register name used for global register variables. + /// + /// This function returns true if the register passed in RegName can be used + /// for global register variables on this target. In addition, it returns + /// true in HasSizeMismatch if the size of the register doesn't match the + /// variable size passed in RegSize. + virtual bool validateGlobalRegisterVariable(StringRef RegName, + unsigned RegSize, + bool &HasSizeMismatch) const { + HasSizeMismatch = false; + return true; + } + + // validateOutputConstraint, validateInputConstraint - Checks that + // a constraint is valid and provides information about it. + // FIXME: These should return a real error instead of just true/false. + bool validateOutputConstraint(ConstraintInfo &Info) const; + bool validateInputConstraint(MutableArrayRef OutputConstraints, + ConstraintInfo &info) const; + + virtual bool validateOutputSize(const llvm::StringMap &FeatureMap, + StringRef /*Constraint*/, + unsigned /*Size*/) const { + return true; + } + + virtual bool validateInputSize(const llvm::StringMap &FeatureMap, + StringRef /*Constraint*/, + unsigned /*Size*/) const { + return true; + } + virtual bool + validateConstraintModifier(StringRef /*Constraint*/, + char /*Modifier*/, + unsigned /*Size*/, + std::string &/*SuggestedModifier*/) const { + return true; + } + virtual bool + validateAsmConstraint(const char *&Name, + TargetInfo::ConstraintInfo &info) const = 0; + + bool resolveSymbolicName(const char *&Name, + ArrayRef OutputConstraints, + unsigned &Index) const; + + // Constraint parm will be left pointing at the last character of + // the constraint. In practice, it won't be changed unless the + // constraint is longer than one character. + virtual std::string convertConstraint(const char *&Constraint) const { + // 'p' defaults to 'r', but can be overridden by targets. + if (*Constraint == 'p') + return std::string("r"); + return std::string(1, *Constraint); + } + + /// Returns a string of target-specific clobbers, in LLVM format. + virtual const char *getClobbers() const = 0; + + /// Returns true if NaN encoding is IEEE 754-2008. + /// Only MIPS allows a different encoding. + virtual bool isNan2008() const { + return true; + } + + /// Returns the target triple of the primary target. + const llvm::Triple &getTriple() const { + return Triple; + } + + /// Returns the target ID if supported. + virtual llvm::Optional getTargetID() const { return llvm::None; } + + const llvm::DataLayout &getDataLayout() const { + assert(DataLayout && "Uninitialized DataLayout!"); + return *DataLayout; + } + + struct GCCRegAlias { + const char * const Aliases[5]; + const char * const Register; + }; + + struct AddlRegName { + const char * const Names[5]; + const unsigned RegNum; + }; + + /// Does this target support "protected" visibility? + /// + /// Any target which dynamic libraries will naturally support + /// something like "default" (meaning that the symbol is visible + /// outside this shared object) and "hidden" (meaning that it isn't) + /// visibilities, but "protected" is really an ELF-specific concept + /// with weird semantics designed around the convenience of dynamic + /// linker implementations. Which is not to suggest that there's + /// consistent target-independent semantics for "default" visibility + /// either; the entire thing is pretty badly mangled. + virtual bool hasProtectedVisibility() const { return true; } + + /// Does this target aim for semantic compatibility with + /// Microsoft C++ code using dllimport/export attributes? + virtual bool shouldDLLImportComdatSymbols() const { + return getTriple().isWindowsMSVCEnvironment() || + getTriple().isWindowsItaniumEnvironment() || getTriple().isPS4CPU(); + } + + /// An optional hook that targets can implement to perform semantic + /// checking on attribute((section("foo"))) specifiers. + /// + /// In this case, "foo" is passed in to be checked. If the section + /// specifier is invalid, the backend should return an Error that indicates + /// the problem. + /// + /// This hook is a simple quality of implementation feature to catch errors + /// and give good diagnostics in cases when the assembler or code generator + /// would otherwise reject the section specifier. + /// + virtual llvm::Error isValidSectionSpecifier(StringRef SR) const { + return llvm::Error::success(); + } + + /// Set forced language options. + /// + /// Apply changes to the target information with respect to certain + /// language options which change the target configuration and adjust + /// the language based on the target options where applicable. + virtual void adjust(LangOptions &Opts); + + /// Adjust target options based on codegen options. + virtual void adjustTargetOptions(const CodeGenOptions &CGOpts, + TargetOptions &TargetOpts) const {} + + /// Initialize the map with the default set of target features for the + /// CPU this should include all legal feature strings on the target. + /// + /// \return False on error (invalid features). + virtual bool initFeatureMap(llvm::StringMap &Features, + DiagnosticsEngine &Diags, StringRef CPU, + const std::vector &FeatureVec) const; + + /// Get the ABI currently in use. + virtual StringRef getABI() const { return StringRef(); } + + /// Get the C++ ABI currently in use. + TargetCXXABI getCXXABI() const { + return TheCXXABI; + } + + /// Target the specified CPU. + /// + /// \return False on error (invalid CPU name). + virtual bool setCPU(const std::string &Name) { + return false; + } + + /// Fill a SmallVectorImpl with the valid values to setCPU. + virtual void fillValidCPUList(SmallVectorImpl &Values) const {} + + /// Fill a SmallVectorImpl with the valid values for tuning CPU. + virtual void fillValidTuneCPUList(SmallVectorImpl &Values) const { + fillValidCPUList(Values); + } + + /// brief Determine whether this TargetInfo supports the given CPU name. + virtual bool isValidCPUName(StringRef Name) const { + return true; + } + + /// brief Determine whether this TargetInfo supports the given CPU name for + // tuning. + virtual bool isValidTuneCPUName(StringRef Name) const { + return isValidCPUName(Name); + } + + /// brief Determine whether this TargetInfo supports tune in target attribute. + virtual bool supportsTargetAttributeTune() const { + return false; + } + + /// Use the specified ABI. + /// + /// \return False on error (invalid ABI name). + virtual bool setABI(const std::string &Name) { + return false; + } + + /// Use the specified unit for FP math. + /// + /// \return False on error (invalid unit name). + virtual bool setFPMath(StringRef Name) { + return false; + } + + /// Enable or disable a specific target feature; + /// the feature name must be valid. + virtual void setFeatureEnabled(llvm::StringMap &Features, + StringRef Name, + bool Enabled) const { + Features[Name] = Enabled; + } + + /// Determine whether this TargetInfo supports the given feature. + virtual bool isValidFeatureName(StringRef Feature) const { + return true; + } + + struct BranchProtectionInfo { + LangOptions::SignReturnAddressScopeKind SignReturnAddr = + LangOptions::SignReturnAddressScopeKind::None; + LangOptions::SignReturnAddressKeyKind SignKey = + LangOptions::SignReturnAddressKeyKind::AKey; + bool BranchTargetEnforcement = false; + }; + + /// Determine if this TargetInfo supports the given branch protection + /// specification + virtual bool validateBranchProtection(StringRef Spec, + BranchProtectionInfo &BPI, + StringRef &Err) const { + Err = ""; + return false; + } + + /// Perform initialization based on the user configured + /// set of features (e.g., +sse4). + /// + /// The list is guaranteed to have at most one entry per feature. + /// + /// The target may modify the features list, to change which options are + /// passed onwards to the backend. + /// FIXME: This part should be fixed so that we can change handleTargetFeatures + /// to merely a TargetInfo initialization routine. + /// + /// \return False on error. + virtual bool handleTargetFeatures(std::vector &Features, + DiagnosticsEngine &Diags) { + return true; + } + + /// Determine whether the given target has the given feature. + virtual bool hasFeature(StringRef Feature) const { + return false; + } + + /// Identify whether this target supports multiversioning of functions, + /// which requires support for cpu_supports and cpu_is functionality. + bool supportsMultiVersioning() const { return getTriple().isX86(); } + + /// Identify whether this target supports IFuncs. + bool supportsIFunc() const { return getTriple().isOSBinFormatELF(); } + + // Validate the contents of the __builtin_cpu_supports(const char*) + // argument. + virtual bool validateCpuSupports(StringRef Name) const { return false; } + + // Return the target-specific priority for features/cpus/vendors so + // that they can be properly sorted for checking. + virtual unsigned multiVersionSortPriority(StringRef Name) const { + return 0; + } + + // Validate the contents of the __builtin_cpu_is(const char*) + // argument. + virtual bool validateCpuIs(StringRef Name) const { return false; } + + // Validate a cpu_dispatch/cpu_specific CPU option, which is a different list + // from cpu_is, since it checks via features rather than CPUs directly. + virtual bool validateCPUSpecificCPUDispatch(StringRef Name) const { + return false; + } + + // Get the character to be added for mangling purposes for cpu_specific. + virtual char CPUSpecificManglingCharacter(StringRef Name) const { + llvm_unreachable( + "cpu_specific Multiversioning not implemented on this target"); + } + + // Get a list of the features that make up the CPU option for + // cpu_specific/cpu_dispatch so that it can be passed to llvm as optimization + // options. + virtual void getCPUSpecificCPUDispatchFeatures( + StringRef Name, llvm::SmallVectorImpl &Features) const { + llvm_unreachable( + "cpu_specific Multiversioning not implemented on this target"); + } + + // Get the cache line size of a given cpu. This method switches over + // the given cpu and returns "None" if the CPU is not found. + virtual Optional getCPUCacheLineSize() const { return None; } + + // Returns maximal number of args passed in registers. + unsigned getRegParmMax() const { + assert(RegParmMax < 7 && "RegParmMax value is larger than AST can handle"); + return RegParmMax; + } + + /// Whether the target supports thread-local storage. + bool isTLSSupported() const { + return TLSSupported; + } + + /// Return the maximum alignment (in bits) of a TLS variable + /// + /// Gets the maximum alignment (in bits) of a TLS variable on this target. + /// Returns zero if there is no such constraint. + unsigned getMaxTLSAlign() const { return MaxTLSAlign; } + + /// Whether target supports variable-length arrays. + bool isVLASupported() const { return VLASupported; } + + /// Whether the target supports SEH __try. + bool isSEHTrySupported() const { + return getTriple().isOSWindows() && + (getTriple().isX86() || + getTriple().getArch() == llvm::Triple::aarch64); + } + + /// Return true if {|} are normal characters in the asm string. + /// + /// If this returns false (the default), then {abc|xyz} is syntax + /// that says that when compiling for asm variant #0, "abc" should be + /// generated, but when compiling for asm variant #1, "xyz" should be + /// generated. + bool hasNoAsmVariants() const { + return NoAsmVariants; + } + + /// Return the register number that __builtin_eh_return_regno would + /// return with the specified argument. + /// This corresponds with TargetLowering's getExceptionPointerRegister + /// and getExceptionSelectorRegister in the backend. + virtual int getEHDataRegisterNumber(unsigned RegNo) const { + return -1; + } + + /// Return the section to use for C++ static initialization functions. + virtual const char *getStaticInitSectionSpecifier() const { + return nullptr; + } + + const LangASMap &getAddressSpaceMap() const { return *AddrSpaceMap; } + + /// Map from the address space field in builtin description strings to the + /// language address space. + virtual LangAS getOpenCLBuiltinAddressSpace(unsigned AS) const { + return getLangASFromTargetAS(AS); + } + + /// Map from the address space field in builtin description strings to the + /// language address space. + virtual LangAS getCUDABuiltinAddressSpace(unsigned AS) const { + return getLangASFromTargetAS(AS); + } + + /// Return an AST address space which can be used opportunistically + /// for constant global memory. It must be possible to convert pointers into + /// this address space to LangAS::Default. If no such address space exists, + /// this may return None, and such optimizations will be disabled. + virtual llvm::Optional getConstantAddressSpace() const { + return LangAS::Default; + } + + /// Return a target-specific GPU grid value based on the GVIDX enum \p gv + unsigned getGridValue(llvm::omp::GVIDX gv) const { + assert(GridValues != nullptr && "GridValues not initialized"); + return GridValues[gv]; + } + + /// Retrieve the name of the platform as it is used in the + /// availability attribute. + StringRef getPlatformName() const { return PlatformName; } + + /// Retrieve the minimum desired version of the platform, to + /// which the program should be compiled. + VersionTuple getPlatformMinVersion() const { return PlatformMinVersion; } + + bool isBigEndian() const { return BigEndian; } + bool isLittleEndian() const { return !BigEndian; } + + /// Gets the default calling convention for the given target and + /// declaration context. + virtual CallingConv getDefaultCallingConv() const { + // Not all targets will specify an explicit calling convention that we can + // express. This will always do the right thing, even though it's not + // an explicit calling convention. + return CC_C; + } + + enum CallingConvCheckResult { + CCCR_OK, + CCCR_Warning, + CCCR_Ignore, + CCCR_Error, + }; + + /// Determines whether a given calling convention is valid for the + /// target. A calling convention can either be accepted, produce a warning + /// and be substituted with the default calling convention, or (someday) + /// produce an error (such as using thiscall on a non-instance function). + virtual CallingConvCheckResult checkCallingConvention(CallingConv CC) const { + switch (CC) { + default: + return CCCR_Warning; + case CC_C: + return CCCR_OK; + } + } + + enum CallingConvKind { + CCK_Default, + CCK_ClangABI4OrPS4, + CCK_MicrosoftWin64 + }; + + virtual CallingConvKind getCallingConvKind(bool ClangABICompat4) const; + + /// Controls if __builtin_longjmp / __builtin_setjmp can be lowered to + /// llvm.eh.sjlj.longjmp / llvm.eh.sjlj.setjmp. + virtual bool hasSjLjLowering() const { + return false; + } + + /// Check if the target supports CFProtection branch. + virtual bool + checkCFProtectionBranchSupported(DiagnosticsEngine &Diags) const; + + /// Check if the target supports CFProtection branch. + virtual bool + checkCFProtectionReturnSupported(DiagnosticsEngine &Diags) const; + + /// Whether target allows to overalign ABI-specified preferred alignment + virtual bool allowsLargerPreferedTypeAlignment() const { return true; } + + /// Whether target defaults to the `power` alignment rules of AIX. + virtual bool defaultsToAIXPowerAlignment() const { return false; } + + /// Set supported OpenCL extensions and optional core features. + virtual void setSupportedOpenCLOpts() {} + + virtual void supportAllOpenCLOpts(bool V = true) { + #define OPENCLEXTNAME(Ext) getTargetOpts().OpenCLFeaturesMap[#Ext] = V; + #include "clang/Basic/OpenCLExtensions.def" + } + + /// Set supported OpenCL extensions as written on command line + virtual void setCommandLineOpenCLOpts() { + for (const auto &Ext : getTargetOpts().OpenCLExtensionsAsWritten) { + bool IsPrefixed = (Ext[0] == '+' || Ext[0] == '-'); + std::string Name = IsPrefixed ? Ext.substr(1) : Ext; + bool V = IsPrefixed ? Ext[0] == '+' : true; + + if (Name == "all") { + supportAllOpenCLOpts(V); + continue; + } + + getTargetOpts().OpenCLFeaturesMap[Name] = V; + } + } + + /// Define OpenCL macros based on target settings and language version + void getOpenCLFeatureDefines(const LangOptions &Opts, + MacroBuilder &Builder) const; + + /// Get supported OpenCL extensions and optional core features. + llvm::StringMap &getSupportedOpenCLOpts() { + return getTargetOpts().OpenCLFeaturesMap; + } + + /// Get const supported OpenCL extensions and optional core features. + const llvm::StringMap &getSupportedOpenCLOpts() const { + return getTargetOpts().OpenCLFeaturesMap; + } + + /// Get address space for OpenCL type. + virtual LangAS getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const; + + /// \returns Target specific vtbl ptr address space. + virtual unsigned getVtblPtrAddressSpace() const { + return 0; + } + + /// \returns If a target requires an address within a target specific address + /// space \p AddressSpace to be converted in order to be used, then return the + /// corresponding target specific DWARF address space. + /// + /// \returns Otherwise return None and no conversion will be emitted in the + /// DWARF. + virtual Optional getDWARFAddressSpace(unsigned AddressSpace) const { + return None; + } + + /// \returns The version of the SDK which was used during the compilation if + /// one was specified, or an empty version otherwise. + const llvm::VersionTuple &getSDKVersion() const { + return getTargetOpts().SDKVersion; + } + + /// Check the target is valid after it is fully initialized. + virtual bool validateTarget(DiagnosticsEngine &Diags) const { + return true; + } + + virtual void setAuxTarget(const TargetInfo *Aux) {} + + /// Whether target allows debuginfo types for decl only variables. + virtual bool allowDebugInfoForExternalVar() const { return false; } + + protected: + /// Copy type and layout related info. + void copyAuxTarget(const TargetInfo *Aux); + virtual uint64_t getPointerWidthV(unsigned AddrSpace) const { + return PointerWidth; + } + virtual uint64_t getPointerAlignV(unsigned AddrSpace) const { + return PointerAlign; + } + virtual enum IntType getPtrDiffTypeV(unsigned AddrSpace) const { + return PtrDiffType; + } + virtual ArrayRef getGCCRegNames() const = 0; + virtual ArrayRef getGCCRegAliases() const = 0; + virtual ArrayRef getGCCAddlRegNames() const { + return None; + } + + private: + // Assert the values for the fractional and integral bits for each fixed point + // type follow the restrictions given in clause 6.2.6.3 of N1169. + void CheckFixedPointBits() const; + }; + + } // end namespace clang + + #endif +diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp +index d52aea88e092..c824bcfa0866 100644 +--- a/clang/lib/AST/ASTContext.cpp ++++ b/clang/lib/AST/ASTContext.cpp +@@ -1,11560 +1,11567 @@ + //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// + // + // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + // See https://llvm.org/LICENSE.txt for license information. + // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + // + //===----------------------------------------------------------------------===// + // + // This file implements the ASTContext interface. + // + //===----------------------------------------------------------------------===// + + #include "clang/AST/ASTContext.h" + #include "CXXABI.h" + #include "Interp/Context.h" + #include "clang/AST/APValue.h" + #include "clang/AST/ASTConcept.h" + #include "clang/AST/ASTMutationListener.h" + #include "clang/AST/ASTTypeTraits.h" + #include "clang/AST/Attr.h" + #include "clang/AST/AttrIterator.h" + #include "clang/AST/CharUnits.h" + #include "clang/AST/Comment.h" + #include "clang/AST/Decl.h" + #include "clang/AST/DeclBase.h" + #include "clang/AST/DeclCXX.h" + #include "clang/AST/DeclContextInternals.h" + #include "clang/AST/DeclObjC.h" + #include "clang/AST/DeclOpenMP.h" + #include "clang/AST/DeclTemplate.h" + #include "clang/AST/DeclarationName.h" + #include "clang/AST/DependenceFlags.h" + #include "clang/AST/Expr.h" + #include "clang/AST/ExprCXX.h" + #include "clang/AST/ExprConcepts.h" + #include "clang/AST/ExternalASTSource.h" + #include "clang/AST/Mangle.h" + #include "clang/AST/MangleNumberingContext.h" + #include "clang/AST/NestedNameSpecifier.h" + #include "clang/AST/ParentMapContext.h" + #include "clang/AST/RawCommentList.h" + #include "clang/AST/RecordLayout.h" + #include "clang/AST/Stmt.h" + #include "clang/AST/TemplateBase.h" + #include "clang/AST/TemplateName.h" + #include "clang/AST/Type.h" + #include "clang/AST/TypeLoc.h" + #include "clang/AST/UnresolvedSet.h" + #include "clang/AST/VTableBuilder.h" + #include "clang/Basic/AddressSpaces.h" + #include "clang/Basic/Builtins.h" + #include "clang/Basic/CommentOptions.h" + #include "clang/Basic/ExceptionSpecificationType.h" + #include "clang/Basic/IdentifierTable.h" + #include "clang/Basic/LLVM.h" + #include "clang/Basic/LangOptions.h" + #include "clang/Basic/Linkage.h" + #include "clang/Basic/Module.h" + #include "clang/Basic/NoSanitizeList.h" + #include "clang/Basic/ObjCRuntime.h" + #include "clang/Basic/SourceLocation.h" + #include "clang/Basic/SourceManager.h" + #include "clang/Basic/Specifiers.h" + #include "clang/Basic/TargetCXXABI.h" + #include "clang/Basic/TargetInfo.h" + #include "clang/Basic/XRayLists.h" + #include "llvm/ADT/APFixedPoint.h" + #include "llvm/ADT/APInt.h" + #include "llvm/ADT/APSInt.h" + #include "llvm/ADT/ArrayRef.h" + #include "llvm/ADT/DenseMap.h" + #include "llvm/ADT/DenseSet.h" + #include "llvm/ADT/FoldingSet.h" + #include "llvm/ADT/None.h" + #include "llvm/ADT/Optional.h" + #include "llvm/ADT/PointerUnion.h" + #include "llvm/ADT/STLExtras.h" + #include "llvm/ADT/SmallPtrSet.h" + #include "llvm/ADT/SmallVector.h" + #include "llvm/ADT/StringExtras.h" + #include "llvm/ADT/StringRef.h" + #include "llvm/ADT/Triple.h" + #include "llvm/Support/Capacity.h" + #include "llvm/Support/Casting.h" + #include "llvm/Support/Compiler.h" + #include "llvm/Support/ErrorHandling.h" + #include "llvm/Support/MD5.h" + #include "llvm/Support/MathExtras.h" + #include "llvm/Support/raw_ostream.h" + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + using namespace clang; + + enum FloatingRank { + BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank + }; + + /// \returns location that is relevant when searching for Doc comments related + /// to \p D. + static SourceLocation getDeclLocForCommentSearch(const Decl *D, + SourceManager &SourceMgr) { + assert(D); + + // User can not attach documentation to implicit declarations. + if (D->isImplicit()) + return {}; + + // User can not attach documentation to implicit instantiations. + if (const auto *FD = dyn_cast(D)) { + if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return {}; + } + + if (const auto *VD = dyn_cast(D)) { + if (VD->isStaticDataMember() && + VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return {}; + } + + if (const auto *CRD = dyn_cast(D)) { + if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return {}; + } + + if (const auto *CTSD = dyn_cast(D)) { + TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); + if (TSK == TSK_ImplicitInstantiation || + TSK == TSK_Undeclared) + return {}; + } + + if (const auto *ED = dyn_cast(D)) { + if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return {}; + } + if (const auto *TD = dyn_cast(D)) { + // When tag declaration (but not definition!) is part of the + // decl-specifier-seq of some other declaration, it doesn't get comment + if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) + return {}; + } + // TODO: handle comments for function parameters properly. + if (isa(D)) + return {}; + + // TODO: we could look up template parameter documentation in the template + // documentation. + if (isa(D) || + isa(D) || + isa(D)) + return {}; + + // Find declaration location. + // For Objective-C declarations we generally don't expect to have multiple + // declarators, thus use declaration starting location as the "declaration + // location". + // For all other declarations multiple declarators are used quite frequently, + // so we use the location of the identifier as the "declaration location". + if (isa(D) || isa(D) || + isa(D) || + isa(D) || + isa(D) || + // Allow association with Y across {} in `typedef struct X {} Y`. + isa(D)) + return D->getBeginLoc(); + else { + const SourceLocation DeclLoc = D->getLocation(); + if (DeclLoc.isMacroID()) { + if (isa(D)) { + // If location of the typedef name is in a macro, it is because being + // declared via a macro. Try using declaration's starting location as + // the "declaration location". + return D->getBeginLoc(); + } else if (const auto *TD = dyn_cast(D)) { + // If location of the tag decl is inside a macro, but the spelling of + // the tag name comes from a macro argument, it looks like a special + // macro like NS_ENUM is being used to define the tag decl. In that + // case, adjust the source location to the expansion loc so that we can + // attach the comment to the tag decl. + if (SourceMgr.isMacroArgExpansion(DeclLoc) && + TD->isCompleteDefinition()) + return SourceMgr.getExpansionLoc(DeclLoc); + } + } + return DeclLoc; + } + + return {}; + } + + RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( + const Decl *D, const SourceLocation RepresentativeLocForDecl, + const std::map &CommentsInTheFile) const { + // If the declaration doesn't map directly to a location in a file, we + // can't find the comment. + if (RepresentativeLocForDecl.isInvalid() || + !RepresentativeLocForDecl.isFileID()) + return nullptr; + + // If there are no comments anywhere, we won't find anything. + if (CommentsInTheFile.empty()) + return nullptr; + + // Decompose the location for the declaration and find the beginning of the + // file buffer. + const std::pair DeclLocDecomp = + SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); + + // Slow path. + auto OffsetCommentBehindDecl = + CommentsInTheFile.lower_bound(DeclLocDecomp.second); + + // First check whether we have a trailing comment. + if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { + RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; + if ((CommentBehindDecl->isDocumentation() || + LangOpts.CommentOpts.ParseAllComments) && + CommentBehindDecl->isTrailingComment() && + (isa(D) || isa(D) || isa(D) || + isa(D) || isa(D))) { + + // Check that Doxygen trailing comment comes after the declaration, starts + // on the same line and in the same file as the declaration. + if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == + Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, + OffsetCommentBehindDecl->first)) { + return CommentBehindDecl; + } + } + } + + // The comment just after the declaration was not a trailing comment. + // Let's look at the previous comment. + if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) + return nullptr; + + auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; + RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; + + // Check that we actually have a non-member Doxygen comment. + if (!(CommentBeforeDecl->isDocumentation() || + LangOpts.CommentOpts.ParseAllComments) || + CommentBeforeDecl->isTrailingComment()) + return nullptr; + + // Decompose the end of the comment. + const unsigned CommentEndOffset = + Comments.getCommentEndOffset(CommentBeforeDecl); + + // Get the corresponding buffer. + bool Invalid = false; + const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, + &Invalid).data(); + if (Invalid) + return nullptr; + + // Extract text between the comment and declaration. + StringRef Text(Buffer + CommentEndOffset, + DeclLocDecomp.second - CommentEndOffset); + + // There should be no other declarations or preprocessor directives between + // comment and declaration. + if (Text.find_first_of(";{}#@") != StringRef::npos) + return nullptr; + + return CommentBeforeDecl; + } + + RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { + const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); + + // If the declaration doesn't map directly to a location in a file, we + // can't find the comment. + if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) + return nullptr; + + if (ExternalSource && !CommentsLoaded) { + ExternalSource->ReadComments(); + CommentsLoaded = true; + } + + if (Comments.empty()) + return nullptr; + + const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; + const auto CommentsInThisFile = Comments.getCommentsInFile(File); + if (!CommentsInThisFile || CommentsInThisFile->empty()) + return nullptr; + + return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); + } + + void ASTContext::addComment(const RawComment &RC) { + assert(LangOpts.RetainCommentsFromSystemHeaders || + !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); + Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); + } + + /// If we have a 'templated' declaration for a template, adjust 'D' to + /// refer to the actual template. + /// If we have an implicit instantiation, adjust 'D' to refer to template. + static const Decl &adjustDeclToTemplate(const Decl &D) { + if (const auto *FD = dyn_cast(&D)) { + // Is this function declaration part of a function template? + if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) + return *FTD; + + // Nothing to do if function is not an implicit instantiation. + if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) + return D; + + // Function is an implicit instantiation of a function template? + if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) + return *FTD; + + // Function is instantiated from a member definition of a class template? + if (const FunctionDecl *MemberDecl = + FD->getInstantiatedFromMemberFunction()) + return *MemberDecl; + + return D; + } + if (const auto *VD = dyn_cast(&D)) { + // Static data member is instantiated from a member definition of a class + // template? + if (VD->isStaticDataMember()) + if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) + return *MemberDecl; + + return D; + } + if (const auto *CRD = dyn_cast(&D)) { + // Is this class declaration part of a class template? + if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) + return *CTD; + + // Class is an implicit instantiation of a class template or partial + // specialization? + if (const auto *CTSD = dyn_cast(CRD)) { + if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) + return D; + llvm::PointerUnion + PU = CTSD->getSpecializedTemplateOrPartial(); + return PU.is() + ? *static_cast(PU.get()) + : *static_cast( + PU.get()); + } + + // Class is instantiated from a member definition of a class template? + if (const MemberSpecializationInfo *Info = + CRD->getMemberSpecializationInfo()) + return *Info->getInstantiatedFrom(); + + return D; + } + if (const auto *ED = dyn_cast(&D)) { + // Enum is instantiated from a member definition of a class template? + if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) + return *MemberDecl; + + return D; + } + // FIXME: Adjust alias templates? + return D; + } + + const RawComment *ASTContext::getRawCommentForAnyRedecl( + const Decl *D, + const Decl **OriginalDecl) const { + if (!D) { + if (OriginalDecl) + OriginalDecl = nullptr; + return nullptr; + } + + D = &adjustDeclToTemplate(*D); + + // Any comment directly attached to D? + { + auto DeclComment = DeclRawComments.find(D); + if (DeclComment != DeclRawComments.end()) { + if (OriginalDecl) + *OriginalDecl = D; + return DeclComment->second; + } + } + + // Any comment attached to any redeclaration of D? + const Decl *CanonicalD = D->getCanonicalDecl(); + if (!CanonicalD) + return nullptr; + + { + auto RedeclComment = RedeclChainComments.find(CanonicalD); + if (RedeclComment != RedeclChainComments.end()) { + if (OriginalDecl) + *OriginalDecl = RedeclComment->second; + auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); + assert(CommentAtRedecl != DeclRawComments.end() && + "This decl is supposed to have comment attached."); + return CommentAtRedecl->second; + } + } + + // Any redeclarations of D that we haven't checked for comments yet? + // We can't use DenseMap::iterator directly since it'd get invalid. + auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { + auto LookupRes = CommentlessRedeclChains.find(CanonicalD); + if (LookupRes != CommentlessRedeclChains.end()) + return LookupRes->second; + return nullptr; + }(); + + for (const auto Redecl : D->redecls()) { + assert(Redecl); + // Skip all redeclarations that have been checked previously. + if (LastCheckedRedecl) { + if (LastCheckedRedecl == Redecl) { + LastCheckedRedecl = nullptr; + } + continue; + } + const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); + if (RedeclComment) { + cacheRawCommentForDecl(*Redecl, *RedeclComment); + if (OriginalDecl) + *OriginalDecl = Redecl; + return RedeclComment; + } + CommentlessRedeclChains[CanonicalD] = Redecl; + } + + if (OriginalDecl) + *OriginalDecl = nullptr; + return nullptr; + } + + void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, + const RawComment &Comment) const { + assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); + DeclRawComments.try_emplace(&OriginalD, &Comment); + const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); + RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); + CommentlessRedeclChains.erase(CanonicalDecl); + } + + static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, + SmallVectorImpl &Redeclared) { + const DeclContext *DC = ObjCMethod->getDeclContext(); + if (const auto *IMD = dyn_cast(DC)) { + const ObjCInterfaceDecl *ID = IMD->getClassInterface(); + if (!ID) + return; + // Add redeclared method here. + for (const auto *Ext : ID->known_extensions()) { + if (ObjCMethodDecl *RedeclaredMethod = + Ext->getMethod(ObjCMethod->getSelector(), + ObjCMethod->isInstanceMethod())) + Redeclared.push_back(RedeclaredMethod); + } + } + } + + void ASTContext::attachCommentsToJustParsedDecls(ArrayRef Decls, + const Preprocessor *PP) { + if (Comments.empty() || Decls.empty()) + return; + + FileID File; + for (Decl *D : Decls) { + SourceLocation Loc = D->getLocation(); + if (Loc.isValid()) { + // See if there are any new comments that are not attached to a decl. + // The location doesn't have to be precise - we care only about the file. + File = SourceMgr.getDecomposedLoc(Loc).first; + break; + } + } + + if (File.isInvalid()) + return; + + auto CommentsInThisFile = Comments.getCommentsInFile(File); + if (!CommentsInThisFile || CommentsInThisFile->empty() || + CommentsInThisFile->rbegin()->second->isAttached()) + return; + + // There is at least one comment not attached to a decl. + // Maybe it should be attached to one of Decls? + // + // Note that this way we pick up not only comments that precede the + // declaration, but also comments that *follow* the declaration -- thanks to + // the lookahead in the lexer: we've consumed the semicolon and looked + // ahead through comments. + + for (const Decl *D : Decls) { + assert(D); + if (D->isInvalidDecl()) + continue; + + D = &adjustDeclToTemplate(*D); + + const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); + + if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) + continue; + + if (DeclRawComments.count(D) > 0) + continue; + + if (RawComment *const DocComment = + getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { + cacheRawCommentForDecl(*D, *DocComment); + comments::FullComment *FC = DocComment->parse(*this, PP, D); + ParsedComments[D->getCanonicalDecl()] = FC; + } + } + } + + comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, + const Decl *D) const { + auto *ThisDeclInfo = new (*this) comments::DeclInfo; + ThisDeclInfo->CommentDecl = D; + ThisDeclInfo->IsFilled = false; + ThisDeclInfo->fill(); + ThisDeclInfo->CommentDecl = FC->getDecl(); + if (!ThisDeclInfo->TemplateParameters) + ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; + comments::FullComment *CFC = + new (*this) comments::FullComment(FC->getBlocks(), + ThisDeclInfo); + return CFC; + } + + comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { + const RawComment *RC = getRawCommentForDeclNoCache(D); + return RC ? RC->parse(*this, nullptr, D) : nullptr; + } + + comments::FullComment *ASTContext::getCommentForDecl( + const Decl *D, + const Preprocessor *PP) const { + if (!D || D->isInvalidDecl()) + return nullptr; + D = &adjustDeclToTemplate(*D); + + const Decl *Canonical = D->getCanonicalDecl(); + llvm::DenseMap::iterator Pos = + ParsedComments.find(Canonical); + + if (Pos != ParsedComments.end()) { + if (Canonical != D) { + comments::FullComment *FC = Pos->second; + comments::FullComment *CFC = cloneFullComment(FC, D); + return CFC; + } + return Pos->second; + } + + const Decl *OriginalDecl = nullptr; + + const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); + if (!RC) { + if (isa(D) || isa(D)) { + SmallVector Overridden; + const auto *OMD = dyn_cast(D); + if (OMD && OMD->isPropertyAccessor()) + if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) + if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) + return cloneFullComment(FC, D); + if (OMD) + addRedeclaredMethods(OMD, Overridden); + getOverriddenMethods(dyn_cast(D), Overridden); + for (unsigned i = 0, e = Overridden.size(); i < e; i++) + if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) + return cloneFullComment(FC, D); + } + else if (const auto *TD = dyn_cast(D)) { + // Attach any tag type's documentation to its typedef if latter + // does not have one of its own. + QualType QT = TD->getUnderlyingType(); + if (const auto *TT = QT->getAs()) + if (const Decl *TD = TT->getDecl()) + if (comments::FullComment *FC = getCommentForDecl(TD, PP)) + return cloneFullComment(FC, D); + } + else if (const auto *IC = dyn_cast(D)) { + while (IC->getSuperClass()) { + IC = IC->getSuperClass(); + if (comments::FullComment *FC = getCommentForDecl(IC, PP)) + return cloneFullComment(FC, D); + } + } + else if (const auto *CD = dyn_cast(D)) { + if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) + if (comments::FullComment *FC = getCommentForDecl(IC, PP)) + return cloneFullComment(FC, D); + } + else if (const auto *RD = dyn_cast(D)) { + if (!(RD = RD->getDefinition())) + return nullptr; + // Check non-virtual bases. + for (const auto &I : RD->bases()) { + if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) + continue; + QualType Ty = I.getType(); + if (Ty.isNull()) + continue; + if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { + if (!(NonVirtualBase= NonVirtualBase->getDefinition())) + continue; + + if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) + return cloneFullComment(FC, D); + } + } + // Check virtual bases. + for (const auto &I : RD->vbases()) { + if (I.getAccessSpecifier() != AS_public) + continue; + QualType Ty = I.getType(); + if (Ty.isNull()) + continue; + if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { + if (!(VirtualBase= VirtualBase->getDefinition())) + continue; + if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) + return cloneFullComment(FC, D); + } + } + } + return nullptr; + } + + // If the RawComment was attached to other redeclaration of this Decl, we + // should parse the comment in context of that other Decl. This is important + // because comments can contain references to parameter names which can be + // different across redeclarations. + if (D != OriginalDecl && OriginalDecl) + return getCommentForDecl(OriginalDecl, PP); + + comments::FullComment *FC = RC->parse(*this, PP, D); + ParsedComments[Canonical] = FC; + return FC; + } + + void + ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &C, + TemplateTemplateParmDecl *Parm) { + ID.AddInteger(Parm->getDepth()); + ID.AddInteger(Parm->getPosition()); + ID.AddBoolean(Parm->isParameterPack()); + + TemplateParameterList *Params = Parm->getTemplateParameters(); + ID.AddInteger(Params->size()); + for (TemplateParameterList::const_iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + if (const auto *TTP = dyn_cast(*P)) { + ID.AddInteger(0); + ID.AddBoolean(TTP->isParameterPack()); + const TypeConstraint *TC = TTP->getTypeConstraint(); + ID.AddBoolean(TC != nullptr); + if (TC) + TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, + /*Canonical=*/true); + if (TTP->isExpandedParameterPack()) { + ID.AddBoolean(true); + ID.AddInteger(TTP->getNumExpansionParameters()); + } else + ID.AddBoolean(false); + continue; + } + + if (const auto *NTTP = dyn_cast(*P)) { + ID.AddInteger(1); + ID.AddBoolean(NTTP->isParameterPack()); + ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); + if (NTTP->isExpandedParameterPack()) { + ID.AddBoolean(true); + ID.AddInteger(NTTP->getNumExpansionTypes()); + for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { + QualType T = NTTP->getExpansionType(I); + ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); + } + } else + ID.AddBoolean(false); + continue; + } + + auto *TTP = cast(*P); + ID.AddInteger(2); + Profile(ID, C, TTP); + } + Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); + ID.AddBoolean(RequiresClause != nullptr); + if (RequiresClause) + RequiresClause->Profile(ID, C, /*Canonical=*/true); + } + + static Expr * + canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, + QualType ConstrainedType) { + // This is a bit ugly - we need to form a new immediately-declared + // constraint that references the new parameter; this would ideally + // require semantic analysis (e.g. template struct S {}; - the + // converted arguments of C could be an argument pack if C is + // declared as template concept C = ...). + // We don't have semantic analysis here so we dig deep into the + // ready-made constraint expr and change the thing manually. + ConceptSpecializationExpr *CSE; + if (const auto *Fold = dyn_cast(IDC)) + CSE = cast(Fold->getLHS()); + else + CSE = cast(IDC); + ArrayRef OldConverted = CSE->getTemplateArguments(); + SmallVector NewConverted; + NewConverted.reserve(OldConverted.size()); + if (OldConverted.front().getKind() == TemplateArgument::Pack) { + // The case: + // template concept C = true; + // template T> struct S; -> constraint is C<{T, int}> + NewConverted.push_back(ConstrainedType); + for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) + NewConverted.push_back(Arg); + TemplateArgument NewPack(NewConverted); + + NewConverted.clear(); + NewConverted.push_back(NewPack); + assert(OldConverted.size() == 1 && + "Template parameter pack should be the last parameter"); + } else { + assert(OldConverted.front().getKind() == TemplateArgument::Type && + "Unexpected first argument kind for immediately-declared " + "constraint"); + NewConverted.push_back(ConstrainedType); + for (auto &Arg : OldConverted.drop_front(1)) + NewConverted.push_back(Arg); + } + Expr *NewIDC = ConceptSpecializationExpr::Create( + C, CSE->getNamedConcept(), NewConverted, nullptr, + CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); + + if (auto *OrigFold = dyn_cast(IDC)) + NewIDC = new (C) CXXFoldExpr( + OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, + BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, + SourceLocation(), /*NumExpansions=*/None); + return NewIDC; + } + + TemplateTemplateParmDecl * + ASTContext::getCanonicalTemplateTemplateParmDecl( + TemplateTemplateParmDecl *TTP) const { + // Check if we already have a canonical template template parameter. + llvm::FoldingSetNodeID ID; + CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); + void *InsertPos = nullptr; + CanonicalTemplateTemplateParm *Canonical + = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); + if (Canonical) + return Canonical->getParam(); + + // Build a canonical template parameter list. + TemplateParameterList *Params = TTP->getTemplateParameters(); + SmallVector CanonParams; + CanonParams.reserve(Params->size()); + for (TemplateParameterList::const_iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + if (const auto *TTP = dyn_cast(*P)) { + TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, + getTranslationUnitDecl(), SourceLocation(), SourceLocation(), + TTP->getDepth(), TTP->getIndex(), nullptr, false, + TTP->isParameterPack(), TTP->hasTypeConstraint(), + TTP->isExpandedParameterPack() ? + llvm::Optional(TTP->getNumExpansionParameters()) : None); + if (const auto *TC = TTP->getTypeConstraint()) { + QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); + Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( + *this, TC->getImmediatelyDeclaredConstraint(), + ParamAsArgument); + TemplateArgumentListInfo CanonArgsAsWritten; + if (auto *Args = TC->getTemplateArgsAsWritten()) + for (const auto &ArgLoc : Args->arguments()) + CanonArgsAsWritten.addArgument( + TemplateArgumentLoc(ArgLoc.getArgument(), + TemplateArgumentLocInfo())); + NewTTP->setTypeConstraint( + NestedNameSpecifierLoc(), + DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), + SourceLocation()), /*FoundDecl=*/nullptr, + // Actually canonicalizing a TemplateArgumentLoc is difficult so we + // simply omit the ArgsAsWritten + TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); + } + CanonParams.push_back(NewTTP); + } else if (const auto *NTTP = dyn_cast(*P)) { + QualType T = getCanonicalType(NTTP->getType()); + TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); + NonTypeTemplateParmDecl *Param; + if (NTTP->isExpandedParameterPack()) { + SmallVector ExpandedTypes; + SmallVector ExpandedTInfos; + for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { + ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); + ExpandedTInfos.push_back( + getTrivialTypeSourceInfo(ExpandedTypes.back())); + } + + Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + NTTP->getDepth(), + NTTP->getPosition(), nullptr, + T, + TInfo, + ExpandedTypes, + ExpandedTInfos); + } else { + Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), + SourceLocation(), + NTTP->getDepth(), + NTTP->getPosition(), nullptr, + T, + NTTP->isParameterPack(), + TInfo); + } + if (AutoType *AT = T->getContainedAutoType()) { + if (AT->isConstrained()) { + Param->setPlaceholderTypeConstraint( + canonicalizeImmediatelyDeclaredConstraint( + *this, NTTP->getPlaceholderTypeConstraint(), T)); + } + } + CanonParams.push_back(Param); + + } else + CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( + cast(*P))); + } + + Expr *CanonRequiresClause = nullptr; + if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) + CanonRequiresClause = RequiresClause; + + TemplateTemplateParmDecl *CanonTTP + = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), TTP->getDepth(), + TTP->getPosition(), + TTP->isParameterPack(), + nullptr, + TemplateParameterList::Create(*this, SourceLocation(), + SourceLocation(), + CanonParams, + SourceLocation(), + CanonRequiresClause)); + + // Get the new insert position for the node we care about. + Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); + assert(!Canonical && "Shouldn't be in the map!"); + (void)Canonical; + + // Create the canonical template template parameter entry. + Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); + CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); + return CanonTTP; + } + + CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { + if (!LangOpts.CPlusPlus) return nullptr; + + switch (T.getCXXABI().getKind()) { + case TargetCXXABI::AppleARM64: + case TargetCXXABI::Fuchsia: + case TargetCXXABI::GenericARM: // Same as Itanium at this level + case TargetCXXABI::iOS: + case TargetCXXABI::WatchOS: + case TargetCXXABI::GenericAArch64: + case TargetCXXABI::GenericMIPS: + case TargetCXXABI::GenericItanium: + case TargetCXXABI::WebAssembly: + case TargetCXXABI::XL: + return CreateItaniumCXXABI(*this); + case TargetCXXABI::Microsoft: + return CreateMicrosoftCXXABI(*this); + } + llvm_unreachable("Invalid CXXABI type!"); + } + + interp::Context &ASTContext::getInterpContext() { + if (!InterpContext) { + InterpContext.reset(new interp::Context(*this)); + } + return *InterpContext.get(); + } + + ParentMapContext &ASTContext::getParentMapContext() { + if (!ParentMapCtx) + ParentMapCtx.reset(new ParentMapContext(*this)); + return *ParentMapCtx.get(); + } + + static const LangASMap *getAddressSpaceMap(const TargetInfo &T, + const LangOptions &LOpts) { + if (LOpts.FakeAddressSpaceMap) { + // The fake address space map must have a distinct entry for each + // language-specific address space. + static const unsigned FakeAddrSpaceMap[] = { + 0, // Default + 1, // opencl_global + 3, // opencl_local + 2, // opencl_constant + 0, // opencl_private + 4, // opencl_generic + 5, // opencl_global_device + 6, // opencl_global_host + 7, // cuda_device + 8, // cuda_constant + 9, // cuda_shared + 10, // ptr32_sptr + 11, // ptr32_uptr + 12 // ptr64 + }; + return &FakeAddrSpaceMap; + } else { + return &T.getAddressSpaceMap(); + } + } + + static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, + const LangOptions &LangOpts) { + switch (LangOpts.getAddressSpaceMapMangling()) { + case LangOptions::ASMM_Target: + return TI.useAddressSpaceMapMangling(); + case LangOptions::ASMM_On: + return true; + case LangOptions::ASMM_Off: + return false; + } + llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); + } + + ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, + IdentifierTable &idents, SelectorTable &sels, + Builtin::Context &builtins) + : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), + TemplateSpecializationTypes(this_()), + DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), + SubstTemplateTemplateParmPacks(this_()), + CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), + NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), + XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, + LangOpts.XRayNeverInstrumentFiles, + LangOpts.XRayAttrListFiles, SM)), + ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), + PrintingPolicy(LOpts), Idents(idents), Selectors(sels), + BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM), + CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), + CompCategories(this_()), LastSDM(nullptr, 0) { + TUDecl = TranslationUnitDecl::Create(*this); + TraversalScope = {TUDecl}; + } + + ASTContext::~ASTContext() { + // Release the DenseMaps associated with DeclContext objects. + // FIXME: Is this the ideal solution? + ReleaseDeclContextMaps(); + + // Call all of the deallocation functions on all of their targets. + for (auto &Pair : Deallocations) + (Pair.first)(Pair.second); + + // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed + // because they can contain DenseMaps. + for (llvm::DenseMap::iterator + I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) + // Increment in loop to prevent using deallocated memory. + if (auto *R = const_cast((I++)->second)) + R->Destroy(*this); + + for (llvm::DenseMap::iterator + I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { + // Increment in loop to prevent using deallocated memory. + if (auto *R = const_cast((I++)->second)) + R->Destroy(*this); + } + + for (llvm::DenseMap::iterator A = DeclAttrs.begin(), + AEnd = DeclAttrs.end(); + A != AEnd; ++A) + A->second->~AttrVec(); + + for (const auto &Value : ModuleInitializers) + Value.second->~PerModuleInitializers(); + } + + void ASTContext::setTraversalScope(const std::vector &TopLevelDecls) { + TraversalScope = TopLevelDecls; + getParentMapContext().clear(); + } + + void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { + Deallocations.push_back({Callback, Data}); + } + + void + ASTContext::setExternalSource(IntrusiveRefCntPtr Source) { + ExternalSource = std::move(Source); + } + + void ASTContext::PrintStats() const { + llvm::errs() << "\n*** AST Context Stats:\n"; + llvm::errs() << " " << Types.size() << " types total.\n"; + + unsigned counts[] = { + #define TYPE(Name, Parent) 0, + #define ABSTRACT_TYPE(Name, Parent) + #include "clang/AST/TypeNodes.inc" + 0 // Extra + }; + + for (unsigned i = 0, e = Types.size(); i != e; ++i) { + Type *T = Types[i]; + counts[(unsigned)T->getTypeClass()]++; + } + + unsigned Idx = 0; + unsigned TotalBytes = 0; + #define TYPE(Name, Parent) \ + if (counts[Idx]) \ + llvm::errs() << " " << counts[Idx] << " " << #Name \ + << " types, " << sizeof(Name##Type) << " each " \ + << "(" << counts[Idx] * sizeof(Name##Type) \ + << " bytes)\n"; \ + TotalBytes += counts[Idx] * sizeof(Name##Type); \ + ++Idx; + #define ABSTRACT_TYPE(Name, Parent) + #include "clang/AST/TypeNodes.inc" + + llvm::errs() << "Total bytes = " << TotalBytes << "\n"; + + // Implicit special member functions. + llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" + << NumImplicitDefaultConstructors + << " implicit default constructors created\n"; + llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" + << NumImplicitCopyConstructors + << " implicit copy constructors created\n"; + if (getLangOpts().CPlusPlus) + llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" + << NumImplicitMoveConstructors + << " implicit move constructors created\n"; + llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" + << NumImplicitCopyAssignmentOperators + << " implicit copy assignment operators created\n"; + if (getLangOpts().CPlusPlus) + llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" + << NumImplicitMoveAssignmentOperators + << " implicit move assignment operators created\n"; + llvm::errs() << NumImplicitDestructorsDeclared << "/" + << NumImplicitDestructors + << " implicit destructors created\n"; + + if (ExternalSource) { + llvm::errs() << "\n"; + ExternalSource->PrintStats(); + } + + BumpAlloc.PrintStats(); + } + + void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, + bool NotifyListeners) { + if (NotifyListeners) + if (auto *Listener = getASTMutationListener()) + Listener->RedefinedHiddenDefinition(ND, M); + + MergedDefModules[cast(ND->getCanonicalDecl())].push_back(M); + } + + void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { + auto It = MergedDefModules.find(cast(ND->getCanonicalDecl())); + if (It == MergedDefModules.end()) + return; + + auto &Merged = It->second; + llvm::DenseSet Found; + for (Module *&M : Merged) + if (!Found.insert(M).second) + M = nullptr; + Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end()); + } + + ArrayRef + ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { + auto MergedIt = + MergedDefModules.find(cast(Def->getCanonicalDecl())); + if (MergedIt == MergedDefModules.end()) + return None; + return MergedIt->second; + } + + void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { + if (LazyInitializers.empty()) + return; + + auto *Source = Ctx.getExternalSource(); + assert(Source && "lazy initializers but no external source"); + + auto LazyInits = std::move(LazyInitializers); + LazyInitializers.clear(); + + for (auto ID : LazyInits) + Initializers.push_back(Source->GetExternalDecl(ID)); + + assert(LazyInitializers.empty() && + "GetExternalDecl for lazy module initializer added more inits"); + } + + void ASTContext::addModuleInitializer(Module *M, Decl *D) { + // One special case: if we add a module initializer that imports another + // module, and that module's only initializer is an ImportDecl, simplify. + if (const auto *ID = dyn_cast(D)) { + auto It = ModuleInitializers.find(ID->getImportedModule()); + + // Maybe the ImportDecl does nothing at all. (Common case.) + if (It == ModuleInitializers.end()) + return; + + // Maybe the ImportDecl only imports another ImportDecl. + auto &Imported = *It->second; + if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { + Imported.resolve(*this); + auto *OnlyDecl = Imported.Initializers.front(); + if (isa(OnlyDecl)) + D = OnlyDecl; + } + } + + auto *&Inits = ModuleInitializers[M]; + if (!Inits) + Inits = new (*this) PerModuleInitializers; + Inits->Initializers.push_back(D); + } + + void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef IDs) { + auto *&Inits = ModuleInitializers[M]; + if (!Inits) + Inits = new (*this) PerModuleInitializers; + Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), + IDs.begin(), IDs.end()); + } + + ArrayRef ASTContext::getModuleInitializers(Module *M) { + auto It = ModuleInitializers.find(M); + if (It == ModuleInitializers.end()) + return None; + + auto *Inits = It->second; + Inits->resolve(*this); + return Inits->Initializers; + } + + ExternCContextDecl *ASTContext::getExternCContextDecl() const { + if (!ExternCContext) + ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); + + return ExternCContext; + } + + BuiltinTemplateDecl * + ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, + const IdentifierInfo *II) const { + auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK); + BuiltinTemplate->setImplicit(); + TUDecl->addDecl(BuiltinTemplate); + + return BuiltinTemplate; + } + + BuiltinTemplateDecl * + ASTContext::getMakeIntegerSeqDecl() const { + if (!MakeIntegerSeqDecl) + MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, + getMakeIntegerSeqName()); + return MakeIntegerSeqDecl; + } + + BuiltinTemplateDecl * + ASTContext::getTypePackElementDecl() const { + if (!TypePackElementDecl) + TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, + getTypePackElementName()); + return TypePackElementDecl; + } + + RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, + RecordDecl::TagKind TK) const { + SourceLocation Loc; + RecordDecl *NewDecl; + if (getLangOpts().CPlusPlus) + NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, + Loc, &Idents.get(Name)); + else + NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, + &Idents.get(Name)); + NewDecl->setImplicit(); + NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( + const_cast(*this), TypeVisibilityAttr::Default)); + return NewDecl; + } + + TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, + StringRef Name) const { + TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); + TypedefDecl *NewDecl = TypedefDecl::Create( + const_cast(*this), getTranslationUnitDecl(), + SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); + NewDecl->setImplicit(); + return NewDecl; + } + + TypedefDecl *ASTContext::getInt128Decl() const { + if (!Int128Decl) + Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); + return Int128Decl; + } + + TypedefDecl *ASTContext::getUInt128Decl() const { + if (!UInt128Decl) + UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); + return UInt128Decl; + } + + void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { + auto *Ty = new (*this, TypeAlignment) BuiltinType(K); + R = CanQualType::CreateUnsafe(QualType(Ty, 0)); + Types.push_back(Ty); + } + + void ASTContext::InitBuiltinTypes(const TargetInfo &Target, + const TargetInfo *AuxTarget) { + assert((!this->Target || this->Target == &Target) && + "Incorrect target reinitialization"); + assert(VoidTy.isNull() && "Context reinitialized?"); + + this->Target = &Target; + this->AuxTarget = AuxTarget; + + ABI.reset(createCXXABI(Target)); + AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); + AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); + + // C99 6.2.5p19. + InitBuiltinType(VoidTy, BuiltinType::Void); + + // C99 6.2.5p2. + InitBuiltinType(BoolTy, BuiltinType::Bool); + // C99 6.2.5p3. + if (LangOpts.CharIsSigned) + InitBuiltinType(CharTy, BuiltinType::Char_S); + else + InitBuiltinType(CharTy, BuiltinType::Char_U); + // C99 6.2.5p4. + InitBuiltinType(SignedCharTy, BuiltinType::SChar); + InitBuiltinType(ShortTy, BuiltinType::Short); + InitBuiltinType(IntTy, BuiltinType::Int); + InitBuiltinType(LongTy, BuiltinType::Long); + InitBuiltinType(LongLongTy, BuiltinType::LongLong); + + // C99 6.2.5p6. + InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); + InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); + InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); + InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); + InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); + + // C99 6.2.5p10. + InitBuiltinType(FloatTy, BuiltinType::Float); + InitBuiltinType(DoubleTy, BuiltinType::Double); + InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); + + // GNU extension, __float128 for IEEE quadruple precision + InitBuiltinType(Float128Ty, BuiltinType::Float128); + + // C11 extension ISO/IEC TS 18661-3 + InitBuiltinType(Float16Ty, BuiltinType::Float16); + + // ISO/IEC JTC1 SC22 WG14 N1169 Extension + InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); + InitBuiltinType(AccumTy, BuiltinType::Accum); + InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); + InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); + InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); + InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); + InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); + InitBuiltinType(FractTy, BuiltinType::Fract); + InitBuiltinType(LongFractTy, BuiltinType::LongFract); + InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); + InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); + InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); + InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); + InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); + InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); + InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); + InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); + InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); + InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); + InitBuiltinType(SatFractTy, BuiltinType::SatFract); + InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); + InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); + InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); + InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); + + // GNU extension, 128-bit integers. + InitBuiltinType(Int128Ty, BuiltinType::Int128); + InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); + + // C++ 3.9.1p5 + if (TargetInfo::isTypeSigned(Target.getWCharType())) + InitBuiltinType(WCharTy, BuiltinType::WChar_S); + else // -fshort-wchar makes wchar_t be unsigned. + InitBuiltinType(WCharTy, BuiltinType::WChar_U); + if (LangOpts.CPlusPlus && LangOpts.WChar) + WideCharTy = WCharTy; + else { + // C99 (or C++ using -fno-wchar). + WideCharTy = getFromTargetType(Target.getWCharType()); + } + + WIntTy = getFromTargetType(Target.getWIntType()); + + // C++20 (proposed) + InitBuiltinType(Char8Ty, BuiltinType::Char8); + + if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ + InitBuiltinType(Char16Ty, BuiltinType::Char16); + else // C99 + Char16Ty = getFromTargetType(Target.getChar16Type()); + + if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ + InitBuiltinType(Char32Ty, BuiltinType::Char32); + else // C99 + Char32Ty = getFromTargetType(Target.getChar32Type()); + + // Placeholder type for type-dependent expressions whose type is + // completely unknown. No code should ever check a type against + // DependentTy and users should never see it; however, it is here to + // help diagnose failures to properly check for type-dependent + // expressions. + InitBuiltinType(DependentTy, BuiltinType::Dependent); + + // Placeholder type for functions. + InitBuiltinType(OverloadTy, BuiltinType::Overload); + + // Placeholder type for bound members. + InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); + + // Placeholder type for pseudo-objects. + InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); + + // "any" type; useful for debugger-like clients. + InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); + + // Placeholder type for unbridged ARC casts. + InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); + + // Placeholder type for builtin functions. + InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); + + // Placeholder type for OMP array sections. + if (LangOpts.OpenMP) { + InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); + InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); + InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); + } + if (LangOpts.MatrixTypes) + InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); + + // C99 6.2.5p11. + FloatComplexTy = getComplexType(FloatTy); + DoubleComplexTy = getComplexType(DoubleTy); + LongDoubleComplexTy = getComplexType(LongDoubleTy); + Float128ComplexTy = getComplexType(Float128Ty); + + // Builtin types for 'id', 'Class', and 'SEL'. + InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); + InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); + InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); + + if (LangOpts.OpenCL) { + #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + InitBuiltinType(SingletonId, BuiltinType::Id); + #include "clang/Basic/OpenCLImageTypes.def" + + InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); + InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); + InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); + InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); + InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); + + #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ + InitBuiltinType(Id##Ty, BuiltinType::Id); + #include "clang/Basic/OpenCLExtensionTypes.def" + } + + if (Target.hasAArch64SVETypes()) { + #define SVE_TYPE(Name, Id, SingletonId) \ + InitBuiltinType(SingletonId, BuiltinType::Id); + #include "clang/Basic/AArch64SVEACLETypes.def" + } + + if (Target.getTriple().isPPC64() && + Target.hasFeature("paired-vector-memops")) { + if (Target.hasFeature("mma")) { + #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ + InitBuiltinType(Id##Ty, BuiltinType::Id); + #include "clang/Basic/PPCTypes.def" + } + #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ + InitBuiltinType(Id##Ty, BuiltinType::Id); + #include "clang/Basic/PPCTypes.def" + } + + if (Target.hasRISCVVTypes()) { + #define RVV_TYPE(Name, Id, SingletonId) \ + InitBuiltinType(SingletonId, BuiltinType::Id); + #include "clang/Basic/RISCVVTypes.def" + } + + // Builtin type for __objc_yes and __objc_no + ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? + SignedCharTy : BoolTy); + + ObjCConstantStringType = QualType(); + + ObjCSuperType = QualType(); + + // void * type + if (LangOpts.OpenCLGenericAddressSpace) { + auto Q = VoidTy.getQualifiers(); + Q.setAddressSpace(LangAS::opencl_generic); + VoidPtrTy = getPointerType(getCanonicalType( + getQualifiedType(VoidTy.getUnqualifiedType(), Q))); + } else { + VoidPtrTy = getPointerType(VoidTy); + } + + // nullptr type (C++0x 2.14.7) + InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); + + // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 + InitBuiltinType(HalfTy, BuiltinType::Half); + + InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); + + // Builtin type used to help define __builtin_va_list. + VaListTagDecl = nullptr; + + // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. + if (LangOpts.MicrosoftExt || LangOpts.Borland) { + MSGuidTagDecl = buildImplicitRecord("_GUID"); + TUDecl->addDecl(MSGuidTagDecl); + } + } + + DiagnosticsEngine &ASTContext::getDiagnostics() const { + return SourceMgr.getDiagnostics(); + } + + AttrVec& ASTContext::getDeclAttrs(const Decl *D) { + AttrVec *&Result = DeclAttrs[D]; + if (!Result) { + void *Mem = Allocate(sizeof(AttrVec)); + Result = new (Mem) AttrVec; + } + + return *Result; + } + + /// Erase the attributes corresponding to the given declaration. + void ASTContext::eraseDeclAttrs(const Decl *D) { + llvm::DenseMap::iterator Pos = DeclAttrs.find(D); + if (Pos != DeclAttrs.end()) { + Pos->second->~AttrVec(); + DeclAttrs.erase(Pos); + } + } + + // FIXME: Remove ? + MemberSpecializationInfo * + ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { + assert(Var->isStaticDataMember() && "Not a static data member"); + return getTemplateOrSpecializationInfo(Var) + .dyn_cast(); + } + + ASTContext::TemplateOrSpecializationInfo + ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { + llvm::DenseMap::iterator Pos = + TemplateOrInstantiation.find(Var); + if (Pos == TemplateOrInstantiation.end()) + return {}; + + return Pos->second; + } + + void + ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, + TemplateSpecializationKind TSK, + SourceLocation PointOfInstantiation) { + assert(Inst->isStaticDataMember() && "Not a static data member"); + assert(Tmpl->isStaticDataMember() && "Not a static data member"); + setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( + Tmpl, TSK, PointOfInstantiation)); + } + + void + ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, + TemplateOrSpecializationInfo TSI) { + assert(!TemplateOrInstantiation[Inst] && + "Already noted what the variable was instantiated from"); + TemplateOrInstantiation[Inst] = TSI; + } + + NamedDecl * + ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { + auto Pos = InstantiatedFromUsingDecl.find(UUD); + if (Pos == InstantiatedFromUsingDecl.end()) + return nullptr; + + return Pos->second; + } + + void + ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { + assert((isa(Pattern) || + isa(Pattern) || + isa(Pattern)) && + "pattern decl is not a using decl"); + assert((isa(Inst) || + isa(Inst) || + isa(Inst)) && + "instantiation did not produce a using decl"); + assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); + InstantiatedFromUsingDecl[Inst] = Pattern; + } + + UsingShadowDecl * + ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { + llvm::DenseMap::const_iterator Pos + = InstantiatedFromUsingShadowDecl.find(Inst); + if (Pos == InstantiatedFromUsingShadowDecl.end()) + return nullptr; + + return Pos->second; + } + + void + ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, + UsingShadowDecl *Pattern) { + assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); + InstantiatedFromUsingShadowDecl[Inst] = Pattern; + } + + FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { + llvm::DenseMap::iterator Pos + = InstantiatedFromUnnamedFieldDecl.find(Field); + if (Pos == InstantiatedFromUnnamedFieldDecl.end()) + return nullptr; + + return Pos->second; + } + + void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, + FieldDecl *Tmpl) { + assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); + assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); + assert(!InstantiatedFromUnnamedFieldDecl[Inst] && + "Already noted what unnamed field was instantiated from"); + + InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; + } + + ASTContext::overridden_cxx_method_iterator + ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { + return overridden_methods(Method).begin(); + } + + ASTContext::overridden_cxx_method_iterator + ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { + return overridden_methods(Method).end(); + } + + unsigned + ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { + auto Range = overridden_methods(Method); + return Range.end() - Range.begin(); + } + + ASTContext::overridden_method_range + ASTContext::overridden_methods(const CXXMethodDecl *Method) const { + llvm::DenseMap::const_iterator Pos = + OverriddenMethods.find(Method->getCanonicalDecl()); + if (Pos == OverriddenMethods.end()) + return overridden_method_range(nullptr, nullptr); + return overridden_method_range(Pos->second.begin(), Pos->second.end()); + } + + void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, + const CXXMethodDecl *Overridden) { + assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); + OverriddenMethods[Method].push_back(Overridden); + } + + void ASTContext::getOverriddenMethods( + const NamedDecl *D, + SmallVectorImpl &Overridden) const { + assert(D); + + if (const auto *CXXMethod = dyn_cast(D)) { + Overridden.append(overridden_methods_begin(CXXMethod), + overridden_methods_end(CXXMethod)); + return; + } + + const auto *Method = dyn_cast(D); + if (!Method) + return; + + SmallVector OverDecls; + Method->getOverriddenMethods(OverDecls); + Overridden.append(OverDecls.begin(), OverDecls.end()); + } + + void ASTContext::addedLocalImportDecl(ImportDecl *Import) { + assert(!Import->getNextLocalImport() && + "Import declaration already in the chain"); + assert(!Import->isFromASTFile() && "Non-local import declaration"); + if (!FirstLocalImport) { + FirstLocalImport = Import; + LastLocalImport = Import; + return; + } + + LastLocalImport->setNextLocalImport(Import); + LastLocalImport = Import; + } + + //===----------------------------------------------------------------------===// + // Type Sizing and Analysis + //===----------------------------------------------------------------------===// + + /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified + /// scalar floating point type. + const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { + switch (T->castAs()->getKind()) { + default: + llvm_unreachable("Not a floating point type!"); + case BuiltinType::BFloat16: + return Target->getBFloat16Format(); + case BuiltinType::Float16: + case BuiltinType::Half: + return Target->getHalfFormat(); + case BuiltinType::Float: return Target->getFloatFormat(); + case BuiltinType::Double: return Target->getDoubleFormat(); + case BuiltinType::LongDouble: + if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) + return AuxTarget->getLongDoubleFormat(); + return Target->getLongDoubleFormat(); + case BuiltinType::Float128: + if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) + return AuxTarget->getFloat128Format(); + return Target->getFloat128Format(); + } + } + + CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { + unsigned Align = Target->getCharWidth(); + + bool UseAlignAttrOnly = false; + if (unsigned AlignFromAttr = D->getMaxAlignment()) { + Align = AlignFromAttr; + + // __attribute__((aligned)) can increase or decrease alignment + // *except* on a struct or struct member, where it only increases + // alignment unless 'packed' is also specified. + // + // It is an error for alignas to decrease alignment, so we can + // ignore that possibility; Sema should diagnose it. + if (isa(D)) { + UseAlignAttrOnly = D->hasAttr() || + cast(D)->getParent()->hasAttr(); + } else { + UseAlignAttrOnly = true; + } + } + else if (isa(D)) + UseAlignAttrOnly = + D->hasAttr() || + cast(D)->getParent()->hasAttr(); + + // If we're using the align attribute only, just ignore everything + // else about the declaration and its type. + if (UseAlignAttrOnly) { + // do nothing + } else if (const auto *VD = dyn_cast(D)) { + QualType T = VD->getType(); + if (const auto *RT = T->getAs()) { + if (ForAlignof) + T = RT->getPointeeType(); + else + T = getPointerType(RT->getPointeeType()); + } + QualType BaseT = getBaseElementType(T); + if (T->isFunctionType()) + Align = getTypeInfoImpl(T.getTypePtr()).Align; + else if (!BaseT->isIncompleteType()) { + // Adjust alignments of declarations with array type by the + // large-array alignment on the target. + if (const ArrayType *arrayType = getAsArrayType(T)) { + unsigned MinWidth = Target->getLargeArrayMinWidth(); + if (!ForAlignof && MinWidth) { + if (isa(arrayType)) + Align = std::max(Align, Target->getLargeArrayAlign()); + else if (isa(arrayType) && + MinWidth <= getTypeSize(cast(arrayType))) + Align = std::max(Align, Target->getLargeArrayAlign()); + } + } + Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); + if (BaseT.getQualifiers().hasUnaligned()) + Align = Target->getCharWidth(); + if (const auto *VD = dyn_cast(D)) { + if (VD->hasGlobalStorage() && !ForAlignof) { + uint64_t TypeSize = getTypeSize(T.getTypePtr()); + Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); + } + } + } + + // Fields can be subject to extra alignment constraints, like if + // the field is packed, the struct is packed, or the struct has a + // a max-field-alignment constraint (#pragma pack). So calculate + // the actual alignment of the field within the struct, and then + // (as we're expected to) constrain that by the alignment of the type. + if (const auto *Field = dyn_cast(VD)) { + const RecordDecl *Parent = Field->getParent(); + // We can only produce a sensible answer if the record is valid. + if (!Parent->isInvalidDecl()) { + const ASTRecordLayout &Layout = getASTRecordLayout(Parent); + + // Start with the record's overall alignment. + unsigned FieldAlign = toBits(Layout.getAlignment()); + + // Use the GCD of that and the offset within the record. + uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); + if (Offset > 0) { + // Alignment is always a power of 2, so the GCD will be a power of 2, + // which means we get to do this crazy thing instead of Euclid's. + uint64_t LowBitOfOffset = Offset & (~Offset + 1); + if (LowBitOfOffset < FieldAlign) + FieldAlign = static_cast(LowBitOfOffset); + } + + Align = std::min(Align, FieldAlign); + } + } + } + ++ // Some targets have hard limitation on the maximum requestable alignment in ++ // aligned attribute for static variables. ++ const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); ++ const auto *VD = dyn_cast(D); ++ if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) ++ Align = std::min(Align, MaxAlignedAttr); ++ + return toCharUnitsFromBits(Align); + } + + CharUnits ASTContext::getExnObjectAlignment() const { + return toCharUnitsFromBits(Target->getExnObjectAlignment()); + } + + // getTypeInfoDataSizeInChars - Return the size of a type, in + // chars. If the type is a record, its data size is returned. This is + // the size of the memcpy that's performed when assigning this type + // using a trivial copy/move assignment operator. + TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { + TypeInfoChars Info = getTypeInfoInChars(T); + + // In C++, objects can sometimes be allocated into the tail padding + // of a base-class subobject. We decide whether that's possible + // during class layout, so here we can just trust the layout results. + if (getLangOpts().CPlusPlus) { + if (const auto *RT = T->getAs()) { + const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); + Info.Width = layout.getDataSize(); + } + } + + return Info; + } + + /// getConstantArrayInfoInChars - Performing the computation in CharUnits + /// instead of in bits prevents overflowing the uint64_t for some large arrays. + TypeInfoChars + static getConstantArrayInfoInChars(const ASTContext &Context, + const ConstantArrayType *CAT) { + TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); + uint64_t Size = CAT->getSize().getZExtValue(); + assert((Size == 0 || static_cast(EltInfo.Width.getQuantity()) <= + (uint64_t)(-1)/Size) && + "Overflow in array type char size evaluation"); + uint64_t Width = EltInfo.Width.getQuantity() * Size; + unsigned Align = EltInfo.Align.getQuantity(); + if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || + Context.getTargetInfo().getPointerWidth(0) == 64) + Width = llvm::alignTo(Width, Align); + return TypeInfoChars(CharUnits::fromQuantity(Width), + CharUnits::fromQuantity(Align), + EltInfo.AlignIsRequired); + } + + TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { + if (const auto *CAT = dyn_cast(T)) + return getConstantArrayInfoInChars(*this, CAT); + TypeInfo Info = getTypeInfo(T); + return TypeInfoChars(toCharUnitsFromBits(Info.Width), + toCharUnitsFromBits(Info.Align), + Info.AlignIsRequired); + } + + TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { + return getTypeInfoInChars(T.getTypePtr()); + } + + bool ASTContext::isAlignmentRequired(const Type *T) const { + return getTypeInfo(T).AlignIsRequired; + } + + bool ASTContext::isAlignmentRequired(QualType T) const { + return isAlignmentRequired(T.getTypePtr()); + } + + unsigned ASTContext::getTypeAlignIfKnown(QualType T, + bool NeedsPreferredAlignment) const { + // An alignment on a typedef overrides anything else. + if (const auto *TT = T->getAs()) + if (unsigned Align = TT->getDecl()->getMaxAlignment()) + return Align; + + // If we have an (array of) complete type, we're done. + T = getBaseElementType(T); + if (!T->isIncompleteType()) + return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); + + // If we had an array type, its element type might be a typedef + // type with an alignment attribute. + if (const auto *TT = T->getAs()) + if (unsigned Align = TT->getDecl()->getMaxAlignment()) + return Align; + + // Otherwise, see if the declaration of the type had an attribute. + if (const auto *TT = T->getAs()) + return TT->getDecl()->getMaxAlignment(); + + return 0; + } + + TypeInfo ASTContext::getTypeInfo(const Type *T) const { + TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); + if (I != MemoizedTypeInfo.end()) + return I->second; + + // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. + TypeInfo TI = getTypeInfoImpl(T); + MemoizedTypeInfo[T] = TI; + return TI; + } + + /// getTypeInfoImpl - Return the size of the specified type, in bits. This + /// method does not work on incomplete types. + /// + /// FIXME: Pointers into different addr spaces could have different sizes and + /// alignment requirements: getPointerInfo should take an AddrSpace, this + /// should take a QualType, &c. + TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { + uint64_t Width = 0; + unsigned Align = 8; + bool AlignIsRequired = false; + unsigned AS = 0; + switch (T->getTypeClass()) { + #define TYPE(Class, Base) + #define ABSTRACT_TYPE(Class, Base) + #define NON_CANONICAL_TYPE(Class, Base) + #define DEPENDENT_TYPE(Class, Base) case Type::Class: + #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ + case Type::Class: \ + assert(!T->isDependentType() && "should not see dependent types here"); \ + return getTypeInfo(cast(T)->desugar().getTypePtr()); + #include "clang/AST/TypeNodes.inc" + llvm_unreachable("Should not see dependent types"); + + case Type::FunctionNoProto: + case Type::FunctionProto: + // GCC extension: alignof(function) = 32 bits + Width = 0; + Align = 32; + break; + + case Type::IncompleteArray: + case Type::VariableArray: + case Type::ConstantArray: { + // Model non-constant sized arrays as size zero, but track the alignment. + uint64_t Size = 0; + if (const auto *CAT = dyn_cast(T)) + Size = CAT->getSize().getZExtValue(); + + TypeInfo EltInfo = getTypeInfo(cast(T)->getElementType()); + assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && + "Overflow in array type bit size evaluation"); + Width = EltInfo.Width * Size; + Align = EltInfo.Align; + AlignIsRequired = EltInfo.AlignIsRequired; + if (!getTargetInfo().getCXXABI().isMicrosoft() || + getTargetInfo().getPointerWidth(0) == 64) + Width = llvm::alignTo(Width, Align); + break; + } + + case Type::ExtVector: + case Type::Vector: { + const auto *VT = cast(T); + TypeInfo EltInfo = getTypeInfo(VT->getElementType()); + Width = EltInfo.Width * VT->getNumElements(); + Align = Width; + // If the alignment is not a power of 2, round up to the next power of 2. + // This happens for non-power-of-2 length vectors. + if (Align & (Align-1)) { + Align = llvm::NextPowerOf2(Align); + Width = llvm::alignTo(Width, Align); + } + // Adjust the alignment based on the target max. + uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); + if (TargetVectorAlign && TargetVectorAlign < Align) + Align = TargetVectorAlign; + if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) + // Adjust the alignment for fixed-length SVE vectors. This is important + // for non-power-of-2 vector lengths. + Align = 128; + else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) + // Adjust the alignment for fixed-length SVE predicates. + Align = 16; + break; + } + + case Type::ConstantMatrix: { + const auto *MT = cast(T); + TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); + // The internal layout of a matrix value is implementation defined. + // Initially be ABI compatible with arrays with respect to alignment and + // size. + Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); + Align = ElementInfo.Align; + break; + } + + case Type::Builtin: + switch (cast(T)->getKind()) { + default: llvm_unreachable("Unknown builtin type!"); + case BuiltinType::Void: + // GCC extension: alignof(void) = 8 bits. + Width = 0; + Align = 8; + break; + case BuiltinType::Bool: + Width = Target->getBoolWidth(); + Align = Target->getBoolAlign(); + break; + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::UChar: + case BuiltinType::SChar: + case BuiltinType::Char8: + Width = Target->getCharWidth(); + Align = Target->getCharAlign(); + break; + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + Width = Target->getWCharWidth(); + Align = Target->getWCharAlign(); + break; + case BuiltinType::Char16: + Width = Target->getChar16Width(); + Align = Target->getChar16Align(); + break; + case BuiltinType::Char32: + Width = Target->getChar32Width(); + Align = Target->getChar32Align(); + break; + case BuiltinType::UShort: + case BuiltinType::Short: + Width = Target->getShortWidth(); + Align = Target->getShortAlign(); + break; + case BuiltinType::UInt: + case BuiltinType::Int: + Width = Target->getIntWidth(); + Align = Target->getIntAlign(); + break; + case BuiltinType::ULong: + case BuiltinType::Long: + Width = Target->getLongWidth(); + Align = Target->getLongAlign(); + break; + case BuiltinType::ULongLong: + case BuiltinType::LongLong: + Width = Target->getLongLongWidth(); + Align = Target->getLongLongAlign(); + break; + case BuiltinType::Int128: + case BuiltinType::UInt128: + Width = 128; + Align = 128; // int128_t is 128-bit aligned on all targets. + break; + case BuiltinType::ShortAccum: + case BuiltinType::UShortAccum: + case BuiltinType::SatShortAccum: + case BuiltinType::SatUShortAccum: + Width = Target->getShortAccumWidth(); + Align = Target->getShortAccumAlign(); + break; + case BuiltinType::Accum: + case BuiltinType::UAccum: + case BuiltinType::SatAccum: + case BuiltinType::SatUAccum: + Width = Target->getAccumWidth(); + Align = Target->getAccumAlign(); + break; + case BuiltinType::LongAccum: + case BuiltinType::ULongAccum: + case BuiltinType::SatLongAccum: + case BuiltinType::SatULongAccum: + Width = Target->getLongAccumWidth(); + Align = Target->getLongAccumAlign(); + break; + case BuiltinType::ShortFract: + case BuiltinType::UShortFract: + case BuiltinType::SatShortFract: + case BuiltinType::SatUShortFract: + Width = Target->getShortFractWidth(); + Align = Target->getShortFractAlign(); + break; + case BuiltinType::Fract: + case BuiltinType::UFract: + case BuiltinType::SatFract: + case BuiltinType::SatUFract: + Width = Target->getFractWidth(); + Align = Target->getFractAlign(); + break; + case BuiltinType::LongFract: + case BuiltinType::ULongFract: + case BuiltinType::SatLongFract: + case BuiltinType::SatULongFract: + Width = Target->getLongFractWidth(); + Align = Target->getLongFractAlign(); + break; + case BuiltinType::BFloat16: + Width = Target->getBFloat16Width(); + Align = Target->getBFloat16Align(); + break; + case BuiltinType::Float16: + case BuiltinType::Half: + if (Target->hasFloat16Type() || !getLangOpts().OpenMP || + !getLangOpts().OpenMPIsDevice) { + Width = Target->getHalfWidth(); + Align = Target->getHalfAlign(); + } else { + assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && + "Expected OpenMP device compilation."); + Width = AuxTarget->getHalfWidth(); + Align = AuxTarget->getHalfAlign(); + } + break; + case BuiltinType::Float: + Width = Target->getFloatWidth(); + Align = Target->getFloatAlign(); + break; + case BuiltinType::Double: + Width = Target->getDoubleWidth(); + Align = Target->getDoubleAlign(); + break; + case BuiltinType::LongDouble: + if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && + (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || + Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { + Width = AuxTarget->getLongDoubleWidth(); + Align = AuxTarget->getLongDoubleAlign(); + } else { + Width = Target->getLongDoubleWidth(); + Align = Target->getLongDoubleAlign(); + } + break; + case BuiltinType::Float128: + if (Target->hasFloat128Type() || !getLangOpts().OpenMP || + !getLangOpts().OpenMPIsDevice) { + Width = Target->getFloat128Width(); + Align = Target->getFloat128Align(); + } else { + assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && + "Expected OpenMP device compilation."); + Width = AuxTarget->getFloat128Width(); + Align = AuxTarget->getFloat128Align(); + } + break; + case BuiltinType::NullPtr: + Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) + Align = Target->getPointerAlign(0); // == sizeof(void*) + break; + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + Width = Target->getPointerWidth(0); + Align = Target->getPointerAlign(0); + break; + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: + #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: + #include "clang/Basic/OpenCLImageTypes.def" + #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ + case BuiltinType::Id: + #include "clang/Basic/OpenCLExtensionTypes.def" + AS = getTargetAddressSpace( + Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); + Width = Target->getPointerWidth(AS); + Align = Target->getPointerAlign(AS); + break; + // The SVE types are effectively target-specific. The length of an + // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple + // of 128 bits. There is one predicate bit for each vector byte, so the + // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. + // + // Because the length is only known at runtime, we use a dummy value + // of 0 for the static length. The alignment values are those defined + // by the Procedure Call Standard for the Arm Architecture. + #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ + IsSigned, IsFP, IsBF) \ + case BuiltinType::Id: \ + Width = 0; \ + Align = 128; \ + break; + #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ + case BuiltinType::Id: \ + Width = 0; \ + Align = 16; \ + break; + #include "clang/Basic/AArch64SVEACLETypes.def" + #define PPC_VECTOR_TYPE(Name, Id, Size) \ + case BuiltinType::Id: \ + Width = Size; \ + Align = Size; \ + break; + #include "clang/Basic/PPCTypes.def" + #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ + IsFP) \ + case BuiltinType::Id: \ + Width = 0; \ + Align = ElBits; \ + break; + #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ + case BuiltinType::Id: \ + Width = 0; \ + Align = 8; \ + break; + #include "clang/Basic/RISCVVTypes.def" + } + break; + case Type::ObjCObjectPointer: + Width = Target->getPointerWidth(0); + Align = Target->getPointerAlign(0); + break; + case Type::BlockPointer: + AS = getTargetAddressSpace(cast(T)->getPointeeType()); + Width = Target->getPointerWidth(AS); + Align = Target->getPointerAlign(AS); + break; + case Type::LValueReference: + case Type::RValueReference: + // alignof and sizeof should never enter this code path here, so we go + // the pointer route. + AS = getTargetAddressSpace(cast(T)->getPointeeType()); + Width = Target->getPointerWidth(AS); + Align = Target->getPointerAlign(AS); + break; + case Type::Pointer: + AS = getTargetAddressSpace(cast(T)->getPointeeType()); + Width = Target->getPointerWidth(AS); + Align = Target->getPointerAlign(AS); + break; + case Type::MemberPointer: { + const auto *MPT = cast(T); + CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); + Width = MPI.Width; + Align = MPI.Align; + break; + } + case Type::Complex: { + // Complex types have the same alignment as their elements, but twice the + // size. + TypeInfo EltInfo = getTypeInfo(cast(T)->getElementType()); + Width = EltInfo.Width * 2; + Align = EltInfo.Align; + break; + } + case Type::ObjCObject: + return getTypeInfo(cast(T)->getBaseType().getTypePtr()); + case Type::Adjusted: + case Type::Decayed: + return getTypeInfo(cast(T)->getAdjustedType().getTypePtr()); + case Type::ObjCInterface: { + const auto *ObjCI = cast(T); + if (ObjCI->getDecl()->isInvalidDecl()) { + Width = 8; + Align = 8; + break; + } + const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); + Width = toBits(Layout.getSize()); + Align = toBits(Layout.getAlignment()); + break; + } + case Type::ExtInt: { + const auto *EIT = cast(T); + Align = + std::min(static_cast(std::max( + getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), + Target->getLongLongAlign()); + Width = llvm::alignTo(EIT->getNumBits(), Align); + break; + } + case Type::Record: + case Type::Enum: { + const auto *TT = cast(T); + + if (TT->getDecl()->isInvalidDecl()) { + Width = 8; + Align = 8; + break; + } + + if (const auto *ET = dyn_cast(TT)) { + const EnumDecl *ED = ET->getDecl(); + TypeInfo Info = + getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); + if (unsigned AttrAlign = ED->getMaxAlignment()) { + Info.Align = AttrAlign; + Info.AlignIsRequired = true; + } + return Info; + } + + const auto *RT = cast(TT); + const RecordDecl *RD = RT->getDecl(); + const ASTRecordLayout &Layout = getASTRecordLayout(RD); + Width = toBits(Layout.getSize()); + Align = toBits(Layout.getAlignment()); + AlignIsRequired = RD->hasAttr(); + break; + } + + case Type::SubstTemplateTypeParm: + return getTypeInfo(cast(T)-> + getReplacementType().getTypePtr()); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: { + const auto *A = cast(T); + assert(!A->getDeducedType().isNull() && + "cannot request the size of an undeduced or dependent auto type"); + return getTypeInfo(A->getDeducedType().getTypePtr()); + } + + case Type::Paren: + return getTypeInfo(cast(T)->getInnerType().getTypePtr()); + + case Type::MacroQualified: + return getTypeInfo( + cast(T)->getUnderlyingType().getTypePtr()); + + case Type::ObjCTypeParam: + return getTypeInfo(cast(T)->desugar().getTypePtr()); + + case Type::Typedef: { + const TypedefNameDecl *Typedef = cast(T)->getDecl(); + TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); + // If the typedef has an aligned attribute on it, it overrides any computed + // alignment we have. This violates the GCC documentation (which says that + // attribute(aligned) can only round up) but matches its implementation. + if (unsigned AttrAlign = Typedef->getMaxAlignment()) { + Align = AttrAlign; + AlignIsRequired = true; + } else { + Align = Info.Align; + AlignIsRequired = Info.AlignIsRequired; + } + Width = Info.Width; + break; + } + + case Type::Elaborated: + return getTypeInfo(cast(T)->getNamedType().getTypePtr()); + + case Type::Attributed: + return getTypeInfo( + cast(T)->getEquivalentType().getTypePtr()); + + case Type::Atomic: { + // Start with the base type information. + TypeInfo Info = getTypeInfo(cast(T)->getValueType()); + Width = Info.Width; + Align = Info.Align; + + if (!Width) { + // An otherwise zero-sized type should still generate an + // atomic operation. + Width = Target->getCharWidth(); + assert(Align); + } else if (Width <= Target->getMaxAtomicPromoteWidth()) { + // If the size of the type doesn't exceed the platform's max + // atomic promotion width, make the size and alignment more + // favorable to atomic operations: + + // Round the size up to a power of 2. + if (!llvm::isPowerOf2_64(Width)) + Width = llvm::NextPowerOf2(Width); + + // Set the alignment equal to the size. + Align = static_cast(Width); + } + } + break; + + case Type::Pipe: + Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); + Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); + break; + } + + assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); + return TypeInfo(Width, Align, AlignIsRequired); + } + + unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { + UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); + if (I != MemoizedUnadjustedAlign.end()) + return I->second; + + unsigned UnadjustedAlign; + if (const auto *RT = T->getAs()) { + const RecordDecl *RD = RT->getDecl(); + const ASTRecordLayout &Layout = getASTRecordLayout(RD); + UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); + } else if (const auto *ObjCI = T->getAs()) { + const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); + UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); + } else { + UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); + } + + MemoizedUnadjustedAlign[T] = UnadjustedAlign; + return UnadjustedAlign; + } + + unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { + unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); + return SimdAlign; + } + + /// toCharUnitsFromBits - Convert a size in bits to a size in characters. + CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { + return CharUnits::fromQuantity(BitSize / getCharWidth()); + } + + /// toBits - Convert a size in characters to a size in characters. + int64_t ASTContext::toBits(CharUnits CharSize) const { + return CharSize.getQuantity() * getCharWidth(); + } + + /// getTypeSizeInChars - Return the size of the specified type, in characters. + /// This method does not work on incomplete types. + CharUnits ASTContext::getTypeSizeInChars(QualType T) const { + return getTypeInfoInChars(T).Width; + } + CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { + return getTypeInfoInChars(T).Width; + } + + /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in + /// characters. This method does not work on incomplete types. + CharUnits ASTContext::getTypeAlignInChars(QualType T) const { + return toCharUnitsFromBits(getTypeAlign(T)); + } + CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { + return toCharUnitsFromBits(getTypeAlign(T)); + } + + /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a + /// type, in characters, before alignment adustments. This method does + /// not work on incomplete types. + CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { + return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); + } + CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { + return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); + } + + /// getPreferredTypeAlign - Return the "preferred" alignment of the specified + /// type for the current target in bits. This can be different than the ABI + /// alignment in cases where it is beneficial for performance or backwards + /// compatibility preserving to overalign a data type. (Note: despite the name, + /// the preferred alignment is ABI-impacting, and not an optimization.) + unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { + TypeInfo TI = getTypeInfo(T); + unsigned ABIAlign = TI.Align; + + T = T->getBaseElementTypeUnsafe(); + + // The preferred alignment of member pointers is that of a pointer. + if (T->isMemberPointerType()) + return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); + + if (!Target->allowsLargerPreferedTypeAlignment()) + return ABIAlign; + + if (const auto *RT = T->getAs()) { + if (TI.AlignIsRequired || RT->getDecl()->isInvalidDecl()) + return ABIAlign; + + unsigned PreferredAlign = static_cast( + toBits(getASTRecordLayout(RT->getDecl()).PreferredAlignment)); + assert(PreferredAlign >= ABIAlign && + "PreferredAlign should be at least as large as ABIAlign."); + return PreferredAlign; + } + + // Double (and, for targets supporting AIX `power` alignment, long double) and + // long long should be naturally aligned (despite requiring less alignment) if + // possible. + if (const auto *CT = T->getAs()) + T = CT->getElementType().getTypePtr(); + if (const auto *ET = T->getAs()) + T = ET->getDecl()->getIntegerType().getTypePtr(); + if (T->isSpecificBuiltinType(BuiltinType::Double) || + T->isSpecificBuiltinType(BuiltinType::LongLong) || + T->isSpecificBuiltinType(BuiltinType::ULongLong) || + (T->isSpecificBuiltinType(BuiltinType::LongDouble) && + Target->defaultsToAIXPowerAlignment())) + // Don't increase the alignment if an alignment attribute was specified on a + // typedef declaration. + if (!TI.AlignIsRequired) + return std::max(ABIAlign, (unsigned)getTypeSize(T)); + + return ABIAlign; + } + + /// getTargetDefaultAlignForAttributeAligned - Return the default alignment + /// for __attribute__((aligned)) on this target, to be used if no alignment + /// value is specified. + unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { + return getTargetInfo().getDefaultAlignForAttributeAligned(); + } + + /// getAlignOfGlobalVar - Return the alignment in bits that should be given + /// to a global variable of the specified type. + unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { + uint64_t TypeSize = getTypeSize(T.getTypePtr()); + return std::max(getPreferredTypeAlign(T), + getTargetInfo().getMinGlobalAlign(TypeSize)); + } + + /// getAlignOfGlobalVarInChars - Return the alignment in characters that + /// should be given to a global variable of the specified type. + CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { + return toCharUnitsFromBits(getAlignOfGlobalVar(T)); + } + + CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { + CharUnits Offset = CharUnits::Zero(); + const ASTRecordLayout *Layout = &getASTRecordLayout(RD); + while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { + Offset += Layout->getBaseClassOffset(Base); + Layout = &getASTRecordLayout(Base); + } + return Offset; + } + + CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { + const ValueDecl *MPD = MP.getMemberPointerDecl(); + CharUnits ThisAdjustment = CharUnits::Zero(); + ArrayRef Path = MP.getMemberPointerPath(); + bool DerivedMember = MP.isMemberPointerToDerivedMember(); + const CXXRecordDecl *RD = cast(MPD->getDeclContext()); + for (unsigned I = 0, N = Path.size(); I != N; ++I) { + const CXXRecordDecl *Base = RD; + const CXXRecordDecl *Derived = Path[I]; + if (DerivedMember) + std::swap(Base, Derived); + ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); + RD = Path[I]; + } + if (DerivedMember) + ThisAdjustment = -ThisAdjustment; + return ThisAdjustment; + } + + /// DeepCollectObjCIvars - + /// This routine first collects all declared, but not synthesized, ivars in + /// super class and then collects all ivars, including those synthesized for + /// current class. This routine is used for implementation of current class + /// when all ivars, declared and synthesized are known. + void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, + bool leafClass, + SmallVectorImpl &Ivars) const { + if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) + DeepCollectObjCIvars(SuperClass, false, Ivars); + if (!leafClass) { + for (const auto *I : OI->ivars()) + Ivars.push_back(I); + } else { + auto *IDecl = const_cast(OI); + for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; + Iv= Iv->getNextIvar()) + Ivars.push_back(Iv); + } + } + + /// CollectInheritedProtocols - Collect all protocols in current class and + /// those inherited by it. + void ASTContext::CollectInheritedProtocols(const Decl *CDecl, + llvm::SmallPtrSet &Protocols) { + if (const auto *OI = dyn_cast(CDecl)) { + // We can use protocol_iterator here instead of + // all_referenced_protocol_iterator since we are walking all categories. + for (auto *Proto : OI->all_referenced_protocols()) { + CollectInheritedProtocols(Proto, Protocols); + } + + // Categories of this Interface. + for (const auto *Cat : OI->visible_categories()) + CollectInheritedProtocols(Cat, Protocols); + + if (ObjCInterfaceDecl *SD = OI->getSuperClass()) + while (SD) { + CollectInheritedProtocols(SD, Protocols); + SD = SD->getSuperClass(); + } + } else if (const auto *OC = dyn_cast(CDecl)) { + for (auto *Proto : OC->protocols()) { + CollectInheritedProtocols(Proto, Protocols); + } + } else if (const auto *OP = dyn_cast(CDecl)) { + // Insert the protocol. + if (!Protocols.insert( + const_cast(OP->getCanonicalDecl())).second) + return; + + for (auto *Proto : OP->protocols()) + CollectInheritedProtocols(Proto, Protocols); + } + } + + static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, + const RecordDecl *RD) { + assert(RD->isUnion() && "Must be union type"); + CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); + + for (const auto *Field : RD->fields()) { + if (!Context.hasUniqueObjectRepresentations(Field->getType())) + return false; + CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); + if (FieldSize != UnionSize) + return false; + } + return !RD->field_empty(); + } + + static bool isStructEmpty(QualType Ty) { + const RecordDecl *RD = Ty->castAs()->getDecl(); + + if (!RD->field_empty()) + return false; + + if (const auto *ClassDecl = dyn_cast(RD)) + return ClassDecl->isEmpty(); + + return true; + } + + static llvm::Optional + structHasUniqueObjectRepresentations(const ASTContext &Context, + const RecordDecl *RD) { + assert(!RD->isUnion() && "Must be struct/class type"); + const auto &Layout = Context.getASTRecordLayout(RD); + + int64_t CurOffsetInBits = 0; + if (const auto *ClassDecl = dyn_cast(RD)) { + if (ClassDecl->isDynamicClass()) + return llvm::None; + + SmallVector, 4> Bases; + for (const auto &Base : ClassDecl->bases()) { + // Empty types can be inherited from, and non-empty types can potentially + // have tail padding, so just make sure there isn't an error. + if (!isStructEmpty(Base.getType())) { + llvm::Optional Size = structHasUniqueObjectRepresentations( + Context, Base.getType()->castAs()->getDecl()); + if (!Size) + return llvm::None; + Bases.emplace_back(Base.getType(), Size.getValue()); + } + } + + llvm::sort(Bases, [&](const std::pair &L, + const std::pair &R) { + return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) < + Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl()); + }); + + for (const auto &Base : Bases) { + int64_t BaseOffset = Context.toBits( + Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl())); + int64_t BaseSize = Base.second; + if (BaseOffset != CurOffsetInBits) + return llvm::None; + CurOffsetInBits = BaseOffset + BaseSize; + } + } + + for (const auto *Field : RD->fields()) { + if (!Field->getType()->isReferenceType() && + !Context.hasUniqueObjectRepresentations(Field->getType())) + return llvm::None; + + int64_t FieldSizeInBits = + Context.toBits(Context.getTypeSizeInChars(Field->getType())); + if (Field->isBitField()) { + int64_t BitfieldSize = Field->getBitWidthValue(Context); + + if (BitfieldSize > FieldSizeInBits) + return llvm::None; + FieldSizeInBits = BitfieldSize; + } + + int64_t FieldOffsetInBits = Context.getFieldOffset(Field); + + if (FieldOffsetInBits != CurOffsetInBits) + return llvm::None; + + CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits; + } + + return CurOffsetInBits; + } + + bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { + // C++17 [meta.unary.prop]: + // The predicate condition for a template specialization + // has_unique_object_representations shall be + // satisfied if and only if: + // (9.1) - T is trivially copyable, and + // (9.2) - any two objects of type T with the same value have the same + // object representation, where two objects + // of array or non-union class type are considered to have the same value + // if their respective sequences of + // direct subobjects have the same values, and two objects of union type + // are considered to have the same + // value if they have the same active member and the corresponding members + // have the same value. + // The set of scalar types for which this condition holds is + // implementation-defined. [ Note: If a type has padding + // bits, the condition does not hold; otherwise, the condition holds true + // for unsigned integral types. -- end note ] + assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); + + // Arrays are unique only if their element type is unique. + if (Ty->isArrayType()) + return hasUniqueObjectRepresentations(getBaseElementType(Ty)); + + // (9.1) - T is trivially copyable... + if (!Ty.isTriviallyCopyableType(*this)) + return false; + + // All integrals and enums are unique. + if (Ty->isIntegralOrEnumerationType()) + return true; + + // All other pointers are unique. + if (Ty->isPointerType()) + return true; + + if (Ty->isMemberPointerType()) { + const auto *MPT = Ty->getAs(); + return !ABI->getMemberPointerInfo(MPT).HasPadding; + } + + if (Ty->isRecordType()) { + const RecordDecl *Record = Ty->castAs()->getDecl(); + + if (Record->isInvalidDecl()) + return false; + + if (Record->isUnion()) + return unionHasUniqueObjectRepresentations(*this, Record); + + Optional StructSize = + structHasUniqueObjectRepresentations(*this, Record); + + return StructSize && + StructSize.getValue() == static_cast(getTypeSize(Ty)); + } + + // FIXME: More cases to handle here (list by rsmith): + // vectors (careful about, eg, vector of 3 foo) + // _Complex int and friends + // _Atomic T + // Obj-C block pointers + // Obj-C object pointers + // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, + // clk_event_t, queue_t, reserve_id_t) + // There're also Obj-C class types and the Obj-C selector type, but I think it + // makes sense for those to return false here. + + return false; + } + + unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { + unsigned count = 0; + // Count ivars declared in class extension. + for (const auto *Ext : OI->known_extensions()) + count += Ext->ivar_size(); + + // Count ivar defined in this class's implementation. This + // includes synthesized ivars. + if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) + count += ImplDecl->ivar_size(); + + return count; + } + + bool ASTContext::isSentinelNullExpr(const Expr *E) { + if (!E) + return false; + + // nullptr_t is always treated as null. + if (E->getType()->isNullPtrType()) return true; + + if (E->getType()->isAnyPointerType() && + E->IgnoreParenCasts()->isNullPointerConstant(*this, + Expr::NPC_ValueDependentIsNull)) + return true; + + // Unfortunately, __null has type 'int'. + if (isa(E)) return true; + + return false; + } + + /// Get the implementation of ObjCInterfaceDecl, or nullptr if none + /// exists. + ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { + llvm::DenseMap::iterator + I = ObjCImpls.find(D); + if (I != ObjCImpls.end()) + return cast(I->second); + return nullptr; + } + + /// Get the implementation of ObjCCategoryDecl, or nullptr if none + /// exists. + ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { + llvm::DenseMap::iterator + I = ObjCImpls.find(D); + if (I != ObjCImpls.end()) + return cast(I->second); + return nullptr; + } + + /// Set the implementation of ObjCInterfaceDecl. + void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, + ObjCImplementationDecl *ImplD) { + assert(IFaceD && ImplD && "Passed null params"); + ObjCImpls[IFaceD] = ImplD; + } + + /// Set the implementation of ObjCCategoryDecl. + void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, + ObjCCategoryImplDecl *ImplD) { + assert(CatD && ImplD && "Passed null params"); + ObjCImpls[CatD] = ImplD; + } + + const ObjCMethodDecl * + ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { + return ObjCMethodRedecls.lookup(MD); + } + + void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, + const ObjCMethodDecl *Redecl) { + assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); + ObjCMethodRedecls[MD] = Redecl; + } + + const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( + const NamedDecl *ND) const { + if (const auto *ID = dyn_cast(ND->getDeclContext())) + return ID; + if (const auto *CD = dyn_cast(ND->getDeclContext())) + return CD->getClassInterface(); + if (const auto *IMD = dyn_cast(ND->getDeclContext())) + return IMD->getClassInterface(); + + return nullptr; + } + + /// Get the copy initialization expression of VarDecl, or nullptr if + /// none exists. + BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { + assert(VD && "Passed null params"); + assert(VD->hasAttr() && + "getBlockVarCopyInits - not __block var"); + auto I = BlockVarCopyInits.find(VD); + if (I != BlockVarCopyInits.end()) + return I->second; + return {nullptr, false}; + } + + /// Set the copy initialization expression of a block var decl. + void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, + bool CanThrow) { + assert(VD && CopyExpr && "Passed null params"); + assert(VD->hasAttr() && + "setBlockVarCopyInits - not __block var"); + BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); + } + + TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, + unsigned DataSize) const { + if (!DataSize) + DataSize = TypeLoc::getFullDataSizeForType(T); + else + assert(DataSize == TypeLoc::getFullDataSizeForType(T) && + "incorrect data size provided to CreateTypeSourceInfo!"); + + auto *TInfo = + (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); + new (TInfo) TypeSourceInfo(T); + return TInfo; + } + + TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, + SourceLocation L) const { + TypeSourceInfo *DI = CreateTypeSourceInfo(T); + DI->getTypeLoc().initialize(const_cast(*this), L); + return DI; + } + + const ASTRecordLayout & + ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { + return getObjCLayout(D, nullptr); + } + + const ASTRecordLayout & + ASTContext::getASTObjCImplementationLayout( + const ObjCImplementationDecl *D) const { + return getObjCLayout(D->getClassInterface(), D); + } + + //===----------------------------------------------------------------------===// + // Type creation/memoization methods + //===----------------------------------------------------------------------===// + + QualType + ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { + unsigned fastQuals = quals.getFastQualifiers(); + quals.removeFastQualifiers(); + + // Check if we've already instantiated this type. + llvm::FoldingSetNodeID ID; + ExtQuals::Profile(ID, baseType, quals); + void *insertPos = nullptr; + if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { + assert(eq->getQualifiers() == quals); + return QualType(eq, fastQuals); + } + + // If the base type is not canonical, make the appropriate canonical type. + QualType canon; + if (!baseType->isCanonicalUnqualified()) { + SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); + canonSplit.Quals.addConsistentQualifiers(quals); + canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); + + // Re-find the insert position. + (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); + } + + auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); + ExtQualNodes.InsertNode(eq, insertPos); + return QualType(eq, fastQuals); + } + + QualType ASTContext::getAddrSpaceQualType(QualType T, + LangAS AddressSpace) const { + QualType CanT = getCanonicalType(T); + if (CanT.getAddressSpace() == AddressSpace) + return T; + + // If we are composing extended qualifiers together, merge together + // into one ExtQuals node. + QualifierCollector Quals; + const Type *TypeNode = Quals.strip(T); + + // If this type already has an address space specified, it cannot get + // another one. + assert(!Quals.hasAddressSpace() && + "Type cannot be in multiple addr spaces!"); + Quals.addAddressSpace(AddressSpace); + + return getExtQualType(TypeNode, Quals); + } + + QualType ASTContext::removeAddrSpaceQualType(QualType T) const { + // If the type is not qualified with an address space, just return it + // immediately. + if (!T.hasAddressSpace()) + return T; + + // If we are composing extended qualifiers together, merge together + // into one ExtQuals node. + QualifierCollector Quals; + const Type *TypeNode; + + while (T.hasAddressSpace()) { + TypeNode = Quals.strip(T); + + // If the type no longer has an address space after stripping qualifiers, + // jump out. + if (!QualType(TypeNode, 0).hasAddressSpace()) + break; + + // There might be sugar in the way. Strip it and try again. + T = T.getSingleStepDesugaredType(*this); + } + + Quals.removeAddressSpace(); + + // Removal of the address space can mean there are no longer any + // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) + // or required. + if (Quals.hasNonFastQualifiers()) + return getExtQualType(TypeNode, Quals); + else + return QualType(TypeNode, Quals.getFastQualifiers()); + } + + QualType ASTContext::getObjCGCQualType(QualType T, + Qualifiers::GC GCAttr) const { + QualType CanT = getCanonicalType(T); + if (CanT.getObjCGCAttr() == GCAttr) + return T; + + if (const auto *ptr = T->getAs()) { + QualType Pointee = ptr->getPointeeType(); + if (Pointee->isAnyPointerType()) { + QualType ResultType = getObjCGCQualType(Pointee, GCAttr); + return getPointerType(ResultType); + } + } + + // If we are composing extended qualifiers together, merge together + // into one ExtQuals node. + QualifierCollector Quals; + const Type *TypeNode = Quals.strip(T); + + // If this type already has an ObjCGC specified, it cannot get + // another one. + assert(!Quals.hasObjCGCAttr() && + "Type cannot have multiple ObjCGCs!"); + Quals.addObjCGCAttr(GCAttr); + + return getExtQualType(TypeNode, Quals); + } + + QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { + if (const PointerType *Ptr = T->getAs()) { + QualType Pointee = Ptr->getPointeeType(); + if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { + return getPointerType(removeAddrSpaceQualType(Pointee)); + } + } + return T; + } + + const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, + FunctionType::ExtInfo Info) { + if (T->getExtInfo() == Info) + return T; + + QualType Result; + if (const auto *FNPT = dyn_cast(T)) { + Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); + } else { + const auto *FPT = cast(T); + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + EPI.ExtInfo = Info; + Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); + } + + return cast(Result.getTypePtr()); + } + + void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, + QualType ResultType) { + FD = FD->getMostRecentDecl(); + while (true) { + const auto *FPT = FD->getType()->castAs(); + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); + if (FunctionDecl *Next = FD->getPreviousDecl()) + FD = Next; + else + break; + } + if (ASTMutationListener *L = getASTMutationListener()) + L->DeducedReturnType(FD, ResultType); + } + + /// Get a function type and produce the equivalent function type with the + /// specified exception specification. Type sugar that can be present on a + /// declaration of a function with an exception specification is permitted + /// and preserved. Other type sugar (for instance, typedefs) is not. + QualType ASTContext::getFunctionTypeWithExceptionSpec( + QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { + // Might have some parens. + if (const auto *PT = dyn_cast(Orig)) + return getParenType( + getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); + + // Might be wrapped in a macro qualified type. + if (const auto *MQT = dyn_cast(Orig)) + return getMacroQualifiedType( + getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), + MQT->getMacroIdentifier()); + + // Might have a calling-convention attribute. + if (const auto *AT = dyn_cast(Orig)) + return getAttributedType( + AT->getAttrKind(), + getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), + getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); + + // Anything else must be a function type. Rebuild it with the new exception + // specification. + const auto *Proto = Orig->castAs(); + return getFunctionType( + Proto->getReturnType(), Proto->getParamTypes(), + Proto->getExtProtoInfo().withExceptionSpec(ESI)); + } + + bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, + QualType U) { + return hasSameType(T, U) || + (getLangOpts().CPlusPlus17 && + hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), + getFunctionTypeWithExceptionSpec(U, EST_None))); + } + + QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { + if (const auto *Proto = T->getAs()) { + QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); + SmallVector Args(Proto->param_types()); + for (unsigned i = 0, n = Args.size(); i != n; ++i) + Args[i] = removePtrSizeAddrSpace(Args[i]); + return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); + } + + if (const FunctionNoProtoType *Proto = T->getAs()) { + QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); + return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); + } + + return T; + } + + bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { + return hasSameType(T, U) || + hasSameType(getFunctionTypeWithoutPtrSizes(T), + getFunctionTypeWithoutPtrSizes(U)); + } + + void ASTContext::adjustExceptionSpec( + FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, + bool AsWritten) { + // Update the type. + QualType Updated = + getFunctionTypeWithExceptionSpec(FD->getType(), ESI); + FD->setType(Updated); + + if (!AsWritten) + return; + + // Update the type in the type source information too. + if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { + // If the type and the type-as-written differ, we may need to update + // the type-as-written too. + if (TSInfo->getType() != FD->getType()) + Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); + + // FIXME: When we get proper type location information for exceptions, + // we'll also have to rebuild the TypeSourceInfo. For now, we just patch + // up the TypeSourceInfo; + assert(TypeLoc::getFullDataSizeForType(Updated) == + TypeLoc::getFullDataSizeForType(TSInfo->getType()) && + "TypeLoc size mismatch from updating exception specification"); + TSInfo->overrideType(Updated); + } + } + + /// getComplexType - Return the uniqued reference to the type for a complex + /// number with the specified element type. + QualType ASTContext::getComplexType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + ComplexType::Profile(ID, T); + + void *InsertPos = nullptr; + if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(CT, 0); + + // If the pointee type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getComplexType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); + Types.push_back(New); + ComplexTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + /// getPointerType - Return the uniqued reference to the type for a pointer to + /// the specified type. + QualType ASTContext::getPointerType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + PointerType::Profile(ID, T); + + void *InsertPos = nullptr; + if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(PT, 0); + + // If the pointee type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getPointerType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); + Types.push_back(New); + PointerTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { + llvm::FoldingSetNodeID ID; + AdjustedType::Profile(ID, Orig, New); + void *InsertPos = nullptr; + AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (AT) + return QualType(AT, 0); + + QualType Canonical = getCanonicalType(New); + + // Get the new insert position for the node we care about. + AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!AT && "Shouldn't be in the map!"); + + AT = new (*this, TypeAlignment) + AdjustedType(Type::Adjusted, Orig, New, Canonical); + Types.push_back(AT); + AdjustedTypes.InsertNode(AT, InsertPos); + return QualType(AT, 0); + } + + QualType ASTContext::getDecayedType(QualType T) const { + assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); + + QualType Decayed; + + // C99 6.7.5.3p7: + // A declaration of a parameter as "array of type" shall be + // adjusted to "qualified pointer to type", where the type + // qualifiers (if any) are those specified within the [ and ] of + // the array type derivation. + if (T->isArrayType()) + Decayed = getArrayDecayedType(T); + + // C99 6.7.5.3p8: + // A declaration of a parameter as "function returning type" + // shall be adjusted to "pointer to function returning type", as + // in 6.3.2.1. + if (T->isFunctionType()) + Decayed = getPointerType(T); + + llvm::FoldingSetNodeID ID; + AdjustedType::Profile(ID, T, Decayed); + void *InsertPos = nullptr; + AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (AT) + return QualType(AT, 0); + + QualType Canonical = getCanonicalType(Decayed); + + // Get the new insert position for the node we care about. + AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!AT && "Shouldn't be in the map!"); + + AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); + Types.push_back(AT); + AdjustedTypes.InsertNode(AT, InsertPos); + return QualType(AT, 0); + } + + /// getBlockPointerType - Return the uniqued reference to the type for + /// a pointer to the specified block. + QualType ASTContext::getBlockPointerType(QualType T) const { + assert(T->isFunctionType() && "block of function types only"); + // Unique pointers, to guarantee there is only one block of a particular + // structure. + llvm::FoldingSetNodeID ID; + BlockPointerType::Profile(ID, T); + + void *InsertPos = nullptr; + if (BlockPointerType *PT = + BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(PT, 0); + + // If the block pointee type isn't canonical, this won't be a canonical + // type either so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getBlockPointerType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + BlockPointerType *NewIP = + BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); + Types.push_back(New); + BlockPointerTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + /// getLValueReferenceType - Return the uniqued reference to the type for an + /// lvalue reference to the specified type. + QualType + ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { + assert(getCanonicalType(T) != OverloadTy && + "Unresolved overloaded function type"); + + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + ReferenceType::Profile(ID, T, SpelledAsLValue); + + void *InsertPos = nullptr; + if (LValueReferenceType *RT = + LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(RT, 0); + + const auto *InnerRef = T->getAs(); + + // If the referencee type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. + QualType Canonical; + if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { + QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); + Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); + + // Get the new insert position for the node we care about. + LValueReferenceType *NewIP = + LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + + auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, + SpelledAsLValue); + Types.push_back(New); + LValueReferenceTypes.InsertNode(New, InsertPos); + + return QualType(New, 0); + } + + /// getRValueReferenceType - Return the uniqued reference to the type for an + /// rvalue reference to the specified type. + QualType ASTContext::getRValueReferenceType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + ReferenceType::Profile(ID, T, false); + + void *InsertPos = nullptr; + if (RValueReferenceType *RT = + RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(RT, 0); + + const auto *InnerRef = T->getAs(); + + // If the referencee type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. + QualType Canonical; + if (InnerRef || !T.isCanonical()) { + QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); + Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); + + // Get the new insert position for the node we care about. + RValueReferenceType *NewIP = + RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + + auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); + Types.push_back(New); + RValueReferenceTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + /// getMemberPointerType - Return the uniqued reference to the type for a + /// member pointer to the specified type, in the specified class. + QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + MemberPointerType::Profile(ID, T, Cls); + + void *InsertPos = nullptr; + if (MemberPointerType *PT = + MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(PT, 0); + + // If the pointee or class type isn't canonical, this won't be a canonical + // type either, so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { + Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); + + // Get the new insert position for the node we care about. + MemberPointerType *NewIP = + MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); + Types.push_back(New); + MemberPointerTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + /// getConstantArrayType - Return the unique reference to the type for an + /// array of the specified element type. + QualType ASTContext::getConstantArrayType(QualType EltTy, + const llvm::APInt &ArySizeIn, + const Expr *SizeExpr, + ArrayType::ArraySizeModifier ASM, + unsigned IndexTypeQuals) const { + assert((EltTy->isDependentType() || + EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && + "Constant array of VLAs is illegal!"); + + // We only need the size as part of the type if it's instantiation-dependent. + if (SizeExpr && !SizeExpr->isInstantiationDependent()) + SizeExpr = nullptr; + + // Convert the array size into a canonical width matching the pointer size for + // the target. + llvm::APInt ArySize(ArySizeIn); + ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); + + llvm::FoldingSetNodeID ID; + ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, + IndexTypeQuals); + + void *InsertPos = nullptr; + if (ConstantArrayType *ATP = + ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(ATP, 0); + + // If the element type isn't canonical or has qualifiers, or the array bound + // is instantiation-dependent, this won't be a canonical type either, so fill + // in the canonical type field. + QualType Canon; + if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { + SplitQualType canonSplit = getCanonicalType(EltTy).split(); + Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, + ASM, IndexTypeQuals); + Canon = getQualifiedType(Canon, canonSplit.Quals); + + // Get the new insert position for the node we care about. + ConstantArrayType *NewIP = + ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + + void *Mem = Allocate( + ConstantArrayType::totalSizeToAlloc(SizeExpr ? 1 : 0), + TypeAlignment); + auto *New = new (Mem) + ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); + ConstantArrayTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); + } + + /// getVariableArrayDecayedType - Turns the given type, which may be + /// variably-modified, into the corresponding type with all the known + /// sizes replaced with [*]. + QualType ASTContext::getVariableArrayDecayedType(QualType type) const { + // Vastly most common case. + if (!type->isVariablyModifiedType()) return type; + + QualType result; + + SplitQualType split = type.getSplitDesugaredType(); + const Type *ty = split.Ty; + switch (ty->getTypeClass()) { + #define TYPE(Class, Base) + #define ABSTRACT_TYPE(Class, Base) + #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: + #include "clang/AST/TypeNodes.inc" + llvm_unreachable("didn't desugar past all non-canonical types?"); + + // These types should never be variably-modified. + case Type::Builtin: + case Type::Complex: + case Type::Vector: + case Type::DependentVector: + case Type::ExtVector: + case Type::DependentSizedExtVector: + case Type::ConstantMatrix: + case Type::DependentSizedMatrix: + case Type::DependentAddressSpace: + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + case Type::Record: + case Type::Enum: + case Type::UnresolvedUsing: + case Type::TypeOfExpr: + case Type::TypeOf: + case Type::Decltype: + case Type::UnaryTransform: + case Type::DependentName: + case Type::InjectedClassName: + case Type::TemplateSpecialization: + case Type::DependentTemplateSpecialization: + case Type::TemplateTypeParm: + case Type::SubstTemplateTypeParmPack: + case Type::Auto: + case Type::DeducedTemplateSpecialization: + case Type::PackExpansion: + case Type::ExtInt: + case Type::DependentExtInt: + llvm_unreachable("type should never be variably-modified"); + + // These types can be variably-modified but should never need to + // further decay. + case Type::FunctionNoProto: + case Type::FunctionProto: + case Type::BlockPointer: + case Type::MemberPointer: + case Type::Pipe: + return type; + + // These types can be variably-modified. All these modifications + // preserve structure except as noted by comments. + // TODO: if we ever care about optimizing VLAs, there are no-op + // optimizations available here. + case Type::Pointer: + result = getPointerType(getVariableArrayDecayedType( + cast(ty)->getPointeeType())); + break; + + case Type::LValueReference: { + const auto *lv = cast(ty); + result = getLValueReferenceType( + getVariableArrayDecayedType(lv->getPointeeType()), + lv->isSpelledAsLValue()); + break; + } + + case Type::RValueReference: { + const auto *lv = cast(ty); + result = getRValueReferenceType( + getVariableArrayDecayedType(lv->getPointeeType())); + break; + } + + case Type::Atomic: { + const auto *at = cast(ty); + result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); + break; + } + + case Type::ConstantArray: { + const auto *cat = cast(ty); + result = getConstantArrayType( + getVariableArrayDecayedType(cat->getElementType()), + cat->getSize(), + cat->getSizeExpr(), + cat->getSizeModifier(), + cat->getIndexTypeCVRQualifiers()); + break; + } + + case Type::DependentSizedArray: { + const auto *dat = cast(ty); + result = getDependentSizedArrayType( + getVariableArrayDecayedType(dat->getElementType()), + dat->getSizeExpr(), + dat->getSizeModifier(), + dat->getIndexTypeCVRQualifiers(), + dat->getBracketsRange()); + break; + } + + // Turn incomplete types into [*] types. + case Type::IncompleteArray: { + const auto *iat = cast(ty); + result = getVariableArrayType( + getVariableArrayDecayedType(iat->getElementType()), + /*size*/ nullptr, + ArrayType::Normal, + iat->getIndexTypeCVRQualifiers(), + SourceRange()); + break; + } + + // Turn VLA types into [*] types. + case Type::VariableArray: { + const auto *vat = cast(ty); + result = getVariableArrayType( + getVariableArrayDecayedType(vat->getElementType()), + /*size*/ nullptr, + ArrayType::Star, + vat->getIndexTypeCVRQualifiers(), + vat->getBracketsRange()); + break; + } + } + + // Apply the top-level qualifiers from the original. + return getQualifiedType(result, split.Quals); + } + + /// getVariableArrayType - Returns a non-unique reference to the type for a + /// variable array of the specified element type. + QualType ASTContext::getVariableArrayType(QualType EltTy, + Expr *NumElts, + ArrayType::ArraySizeModifier ASM, + unsigned IndexTypeQuals, + SourceRange Brackets) const { + // Since we don't unique expressions, it isn't possible to unique VLA's + // that have an expression provided for their size. + QualType Canon; + + // Be sure to pull qualifiers off the element type. + if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { + SplitQualType canonSplit = getCanonicalType(EltTy).split(); + Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, + IndexTypeQuals, Brackets); + Canon = getQualifiedType(Canon, canonSplit.Quals); + } + + auto *New = new (*this, TypeAlignment) + VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); + + VariableArrayTypes.push_back(New); + Types.push_back(New); + return QualType(New, 0); + } + + /// getDependentSizedArrayType - Returns a non-unique reference to + /// the type for a dependently-sized array of the specified element + /// type. + QualType ASTContext::getDependentSizedArrayType(QualType elementType, + Expr *numElements, + ArrayType::ArraySizeModifier ASM, + unsigned elementTypeQuals, + SourceRange brackets) const { + assert((!numElements || numElements->isTypeDependent() || + numElements->isValueDependent()) && + "Size must be type- or value-dependent!"); + + // Dependently-sized array types that do not have a specified number + // of elements will have their sizes deduced from a dependent + // initializer. We do no canonicalization here at all, which is okay + // because they can't be used in most locations. + if (!numElements) { + auto *newType + = new (*this, TypeAlignment) + DependentSizedArrayType(*this, elementType, QualType(), + numElements, ASM, elementTypeQuals, + brackets); + Types.push_back(newType); + return QualType(newType, 0); + } + + // Otherwise, we actually build a new type every time, but we + // also build a canonical type. + + SplitQualType canonElementType = getCanonicalType(elementType).split(); + + void *insertPos = nullptr; + llvm::FoldingSetNodeID ID; + DependentSizedArrayType::Profile(ID, *this, + QualType(canonElementType.Ty, 0), + ASM, elementTypeQuals, numElements); + + // Look for an existing type with these properties. + DependentSizedArrayType *canonTy = + DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); + + // If we don't have one, build one. + if (!canonTy) { + canonTy = new (*this, TypeAlignment) + DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), + QualType(), numElements, ASM, elementTypeQuals, + brackets); + DependentSizedArrayTypes.InsertNode(canonTy, insertPos); + Types.push_back(canonTy); + } + + // Apply qualifiers from the element type to the array. + QualType canon = getQualifiedType(QualType(canonTy,0), + canonElementType.Quals); + + // If we didn't need extra canonicalization for the element type or the size + // expression, then just use that as our result. + if (QualType(canonElementType.Ty, 0) == elementType && + canonTy->getSizeExpr() == numElements) + return canon; + + // Otherwise, we need to build a type which follows the spelling + // of the element type. + auto *sugaredType + = new (*this, TypeAlignment) + DependentSizedArrayType(*this, elementType, canon, numElements, + ASM, elementTypeQuals, brackets); + Types.push_back(sugaredType); + return QualType(sugaredType, 0); + } + + QualType ASTContext::getIncompleteArrayType(QualType elementType, + ArrayType::ArraySizeModifier ASM, + unsigned elementTypeQuals) const { + llvm::FoldingSetNodeID ID; + IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); + + void *insertPos = nullptr; + if (IncompleteArrayType *iat = + IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) + return QualType(iat, 0); + + // If the element type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. We also have to pull + // qualifiers off the element type. + QualType canon; + + if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { + SplitQualType canonSplit = getCanonicalType(elementType).split(); + canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), + ASM, elementTypeQuals); + canon = getQualifiedType(canon, canonSplit.Quals); + + // Get the new insert position for the node we care about. + IncompleteArrayType *existing = + IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); + assert(!existing && "Shouldn't be in the map!"); (void) existing; + } + + auto *newType = new (*this, TypeAlignment) + IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); + + IncompleteArrayTypes.InsertNode(newType, insertPos); + Types.push_back(newType); + return QualType(newType, 0); + } + + ASTContext::BuiltinVectorTypeInfo + ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { + #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ + {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ + NUMVECTORS}; + + #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ + {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; + + switch (Ty->getKind()) { + default: + llvm_unreachable("Unsupported builtin vector type"); + case BuiltinType::SveInt8: + return SVE_INT_ELTTY(8, 16, true, 1); + case BuiltinType::SveUint8: + return SVE_INT_ELTTY(8, 16, false, 1); + case BuiltinType::SveInt8x2: + return SVE_INT_ELTTY(8, 16, true, 2); + case BuiltinType::SveUint8x2: + return SVE_INT_ELTTY(8, 16, false, 2); + case BuiltinType::SveInt8x3: + return SVE_INT_ELTTY(8, 16, true, 3); + case BuiltinType::SveUint8x3: + return SVE_INT_ELTTY(8, 16, false, 3); + case BuiltinType::SveInt8x4: + return SVE_INT_ELTTY(8, 16, true, 4); + case BuiltinType::SveUint8x4: + return SVE_INT_ELTTY(8, 16, false, 4); + case BuiltinType::SveInt16: + return SVE_INT_ELTTY(16, 8, true, 1); + case BuiltinType::SveUint16: + return SVE_INT_ELTTY(16, 8, false, 1); + case BuiltinType::SveInt16x2: + return SVE_INT_ELTTY(16, 8, true, 2); + case BuiltinType::SveUint16x2: + return SVE_INT_ELTTY(16, 8, false, 2); + case BuiltinType::SveInt16x3: + return SVE_INT_ELTTY(16, 8, true, 3); + case BuiltinType::SveUint16x3: + return SVE_INT_ELTTY(16, 8, false, 3); + case BuiltinType::SveInt16x4: + return SVE_INT_ELTTY(16, 8, true, 4); + case BuiltinType::SveUint16x4: + return SVE_INT_ELTTY(16, 8, false, 4); + case BuiltinType::SveInt32: + return SVE_INT_ELTTY(32, 4, true, 1); + case BuiltinType::SveUint32: + return SVE_INT_ELTTY(32, 4, false, 1); + case BuiltinType::SveInt32x2: + return SVE_INT_ELTTY(32, 4, true, 2); + case BuiltinType::SveUint32x2: + return SVE_INT_ELTTY(32, 4, false, 2); + case BuiltinType::SveInt32x3: + return SVE_INT_ELTTY(32, 4, true, 3); + case BuiltinType::SveUint32x3: + return SVE_INT_ELTTY(32, 4, false, 3); + case BuiltinType::SveInt32x4: + return SVE_INT_ELTTY(32, 4, true, 4); + case BuiltinType::SveUint32x4: + return SVE_INT_ELTTY(32, 4, false, 4); + case BuiltinType::SveInt64: + return SVE_INT_ELTTY(64, 2, true, 1); + case BuiltinType::SveUint64: + return SVE_INT_ELTTY(64, 2, false, 1); + case BuiltinType::SveInt64x2: + return SVE_INT_ELTTY(64, 2, true, 2); + case BuiltinType::SveUint64x2: + return SVE_INT_ELTTY(64, 2, false, 2); + case BuiltinType::SveInt64x3: + return SVE_INT_ELTTY(64, 2, true, 3); + case BuiltinType::SveUint64x3: + return SVE_INT_ELTTY(64, 2, false, 3); + case BuiltinType::SveInt64x4: + return SVE_INT_ELTTY(64, 2, true, 4); + case BuiltinType::SveUint64x4: + return SVE_INT_ELTTY(64, 2, false, 4); + case BuiltinType::SveBool: + return SVE_ELTTY(BoolTy, 16, 1); + case BuiltinType::SveFloat16: + return SVE_ELTTY(HalfTy, 8, 1); + case BuiltinType::SveFloat16x2: + return SVE_ELTTY(HalfTy, 8, 2); + case BuiltinType::SveFloat16x3: + return SVE_ELTTY(HalfTy, 8, 3); + case BuiltinType::SveFloat16x4: + return SVE_ELTTY(HalfTy, 8, 4); + case BuiltinType::SveFloat32: + return SVE_ELTTY(FloatTy, 4, 1); + case BuiltinType::SveFloat32x2: + return SVE_ELTTY(FloatTy, 4, 2); + case BuiltinType::SveFloat32x3: + return SVE_ELTTY(FloatTy, 4, 3); + case BuiltinType::SveFloat32x4: + return SVE_ELTTY(FloatTy, 4, 4); + case BuiltinType::SveFloat64: + return SVE_ELTTY(DoubleTy, 2, 1); + case BuiltinType::SveFloat64x2: + return SVE_ELTTY(DoubleTy, 2, 2); + case BuiltinType::SveFloat64x3: + return SVE_ELTTY(DoubleTy, 2, 3); + case BuiltinType::SveFloat64x4: + return SVE_ELTTY(DoubleTy, 2, 4); + case BuiltinType::SveBFloat16: + return SVE_ELTTY(BFloat16Ty, 8, 1); + case BuiltinType::SveBFloat16x2: + return SVE_ELTTY(BFloat16Ty, 8, 2); + case BuiltinType::SveBFloat16x3: + return SVE_ELTTY(BFloat16Ty, 8, 3); + case BuiltinType::SveBFloat16x4: + return SVE_ELTTY(BFloat16Ty, 8, 4); + #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ + IsSigned) \ + case BuiltinType::Id: \ + return {getIntTypeForBitwidth(ElBits, IsSigned), \ + llvm::ElementCount::getScalable(NumEls), NF}; + #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ + case BuiltinType::Id: \ + return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \ + llvm::ElementCount::getScalable(NumEls), NF}; + #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ + case BuiltinType::Id: \ + return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; + #include "clang/Basic/RISCVVTypes.def" + } + } + + /// getScalableVectorType - Return the unique reference to a scalable vector + /// type of the specified element type and size. VectorType must be a built-in + /// type. + QualType ASTContext::getScalableVectorType(QualType EltTy, + unsigned NumElts) const { + if (Target->hasAArch64SVETypes()) { + uint64_t EltTySize = getTypeSize(EltTy); + #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ + IsSigned, IsFP, IsBF) \ + if (!EltTy->isBooleanType() && \ + ((EltTy->hasIntegerRepresentation() && \ + EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ + (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ + IsFP && !IsBF) || \ + (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ + IsBF && !IsFP)) && \ + EltTySize == ElBits && NumElts == NumEls) { \ + return SingletonId; \ + } + #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ + if (EltTy->isBooleanType() && NumElts == NumEls) \ + return SingletonId; + #include "clang/Basic/AArch64SVEACLETypes.def" + } else if (Target->hasRISCVVTypes()) { + uint64_t EltTySize = getTypeSize(EltTy); + #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ + IsFP) \ + if (!EltTy->isBooleanType() && \ + ((EltTy->hasIntegerRepresentation() && \ + EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ + (EltTy->hasFloatingRepresentation() && IsFP)) && \ + EltTySize == ElBits && NumElts == NumEls) \ + return SingletonId; + #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ + if (EltTy->isBooleanType() && NumElts == NumEls) \ + return SingletonId; + #include "clang/Basic/RISCVVTypes.def" + } + return QualType(); + } + + /// getVectorType - Return the unique reference to a vector type of + /// the specified element type and size. VectorType must be a built-in type. + QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, + VectorType::VectorKind VecKind) const { + assert(vecType->isBuiltinType()); + + // Check if we've already instantiated a vector of this type. + llvm::FoldingSetNodeID ID; + VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); + + void *InsertPos = nullptr; + if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(VTP, 0); + + // If the element type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!vecType.isCanonical()) { + Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); + + // Get the new insert position for the node we care about. + VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + auto *New = new (*this, TypeAlignment) + VectorType(vecType, NumElts, Canonical, VecKind); + VectorTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); + } + + QualType + ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, + SourceLocation AttrLoc, + VectorType::VectorKind VecKind) const { + llvm::FoldingSetNodeID ID; + DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, + VecKind); + void *InsertPos = nullptr; + DependentVectorType *Canon = + DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); + DependentVectorType *New; + + if (Canon) { + New = new (*this, TypeAlignment) DependentVectorType( + *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); + } else { + QualType CanonVecTy = getCanonicalType(VecType); + if (CanonVecTy == VecType) { + New = new (*this, TypeAlignment) DependentVectorType( + *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); + + DependentVectorType *CanonCheck = + DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CanonCheck && + "Dependent-sized vector_size canonical type broken"); + (void)CanonCheck; + DependentVectorTypes.InsertNode(New, InsertPos); + } else { + QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, + SourceLocation(), VecKind); + New = new (*this, TypeAlignment) DependentVectorType( + *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); + } + } + + Types.push_back(New); + return QualType(New, 0); + } + + /// getExtVectorType - Return the unique reference to an extended vector type of + /// the specified element type and size. VectorType must be a built-in type. + QualType + ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { + assert(vecType->isBuiltinType() || vecType->isDependentType()); + + // Check if we've already instantiated a vector of this type. + llvm::FoldingSetNodeID ID; + VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, + VectorType::GenericVector); + void *InsertPos = nullptr; + if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(VTP, 0); + + // If the element type isn't canonical, this won't be a canonical type either, + // so fill in the canonical type field. + QualType Canonical; + if (!vecType.isCanonical()) { + Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); + + // Get the new insert position for the node we care about. + VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + auto *New = new (*this, TypeAlignment) + ExtVectorType(vecType, NumElts, Canonical); + VectorTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); + } + + QualType + ASTContext::getDependentSizedExtVectorType(QualType vecType, + Expr *SizeExpr, + SourceLocation AttrLoc) const { + llvm::FoldingSetNodeID ID; + DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), + SizeExpr); + + void *InsertPos = nullptr; + DependentSizedExtVectorType *Canon + = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); + DependentSizedExtVectorType *New; + if (Canon) { + // We already have a canonical version of this array type; use it as + // the canonical type for a newly-built type. + New = new (*this, TypeAlignment) + DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), + SizeExpr, AttrLoc); + } else { + QualType CanonVecTy = getCanonicalType(vecType); + if (CanonVecTy == vecType) { + New = new (*this, TypeAlignment) + DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, + AttrLoc); + + DependentSizedExtVectorType *CanonCheck + = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); + (void)CanonCheck; + DependentSizedExtVectorTypes.InsertNode(New, InsertPos); + } else { + QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, + SourceLocation()); + New = new (*this, TypeAlignment) DependentSizedExtVectorType( + *this, vecType, CanonExtTy, SizeExpr, AttrLoc); + } + } + + Types.push_back(New); + return QualType(New, 0); + } + + QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, + unsigned NumColumns) const { + llvm::FoldingSetNodeID ID; + ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, + Type::ConstantMatrix); + + assert(MatrixType::isValidElementType(ElementTy) && + "need a valid element type"); + assert(ConstantMatrixType::isDimensionValid(NumRows) && + ConstantMatrixType::isDimensionValid(NumColumns) && + "need valid matrix dimensions"); + void *InsertPos = nullptr; + if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(MTP, 0); + + QualType Canonical; + if (!ElementTy.isCanonical()) { + Canonical = + getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); + + ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Matrix type shouldn't already exist in the map"); + (void)NewIP; + } + + auto *New = new (*this, TypeAlignment) + ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); + MatrixTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); + } + + QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, + Expr *RowExpr, + Expr *ColumnExpr, + SourceLocation AttrLoc) const { + QualType CanonElementTy = getCanonicalType(ElementTy); + llvm::FoldingSetNodeID ID; + DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, + ColumnExpr); + + void *InsertPos = nullptr; + DependentSizedMatrixType *Canon = + DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (!Canon) { + Canon = new (*this, TypeAlignment) DependentSizedMatrixType( + *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); + #ifndef NDEBUG + DependentSizedMatrixType *CanonCheck = + DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); + #endif + DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); + Types.push_back(Canon); + } + + // Already have a canonical version of the matrix type + // + // If it exactly matches the requested type, use it directly. + if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && + Canon->getRowExpr() == ColumnExpr) + return QualType(Canon, 0); + + // Use Canon as the canonical type for newly-built type. + DependentSizedMatrixType *New = new (*this, TypeAlignment) + DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, + ColumnExpr, AttrLoc); + Types.push_back(New); + return QualType(New, 0); + } + + QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, + Expr *AddrSpaceExpr, + SourceLocation AttrLoc) const { + assert(AddrSpaceExpr->isInstantiationDependent()); + + QualType canonPointeeType = getCanonicalType(PointeeType); + + void *insertPos = nullptr; + llvm::FoldingSetNodeID ID; + DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, + AddrSpaceExpr); + + DependentAddressSpaceType *canonTy = + DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); + + if (!canonTy) { + canonTy = new (*this, TypeAlignment) + DependentAddressSpaceType(*this, canonPointeeType, + QualType(), AddrSpaceExpr, AttrLoc); + DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); + Types.push_back(canonTy); + } + + if (canonPointeeType == PointeeType && + canonTy->getAddrSpaceExpr() == AddrSpaceExpr) + return QualType(canonTy, 0); + + auto *sugaredType + = new (*this, TypeAlignment) + DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), + AddrSpaceExpr, AttrLoc); + Types.push_back(sugaredType); + return QualType(sugaredType, 0); + } + + /// Determine whether \p T is canonical as the result type of a function. + static bool isCanonicalResultType(QualType T) { + return T.isCanonical() && + (T.getObjCLifetime() == Qualifiers::OCL_None || + T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); + } + + /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. + QualType + ASTContext::getFunctionNoProtoType(QualType ResultTy, + const FunctionType::ExtInfo &Info) const { + // Unique functions, to guarantee there is only one function of a particular + // structure. + llvm::FoldingSetNodeID ID; + FunctionNoProtoType::Profile(ID, ResultTy, Info); + + void *InsertPos = nullptr; + if (FunctionNoProtoType *FT = + FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(FT, 0); + + QualType Canonical; + if (!isCanonicalResultType(ResultTy)) { + Canonical = + getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); + + // Get the new insert position for the node we care about. + FunctionNoProtoType *NewIP = + FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + + auto *New = new (*this, TypeAlignment) + FunctionNoProtoType(ResultTy, Canonical, Info); + Types.push_back(New); + FunctionNoProtoTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + CanQualType + ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { + CanQualType CanResultType = getCanonicalType(ResultType); + + // Canonical result types do not have ARC lifetime qualifiers. + if (CanResultType.getQualifiers().hasObjCLifetime()) { + Qualifiers Qs = CanResultType.getQualifiers(); + Qs.removeObjCLifetime(); + return CanQualType::CreateUnsafe( + getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); + } + + return CanResultType; + } + + static bool isCanonicalExceptionSpecification( + const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { + if (ESI.Type == EST_None) + return true; + if (!NoexceptInType) + return false; + + // C++17 onwards: exception specification is part of the type, as a simple + // boolean "can this function type throw". + if (ESI.Type == EST_BasicNoexcept) + return true; + + // A noexcept(expr) specification is (possibly) canonical if expr is + // value-dependent. + if (ESI.Type == EST_DependentNoexcept) + return true; + + // A dynamic exception specification is canonical if it only contains pack + // expansions (so we can't tell whether it's non-throwing) and all its + // contained types are canonical. + if (ESI.Type == EST_Dynamic) { + bool AnyPackExpansions = false; + for (QualType ET : ESI.Exceptions) { + if (!ET.isCanonical()) + return false; + if (ET->getAs()) + AnyPackExpansions = true; + } + return AnyPackExpansions; + } + + return false; + } + + QualType ASTContext::getFunctionTypeInternal( + QualType ResultTy, ArrayRef ArgArray, + const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { + size_t NumArgs = ArgArray.size(); + + // Unique functions, to guarantee there is only one function of a particular + // structure. + llvm::FoldingSetNodeID ID; + FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, + *this, true); + + QualType Canonical; + bool Unique = false; + + void *InsertPos = nullptr; + if (FunctionProtoType *FPT = + FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { + QualType Existing = QualType(FPT, 0); + + // If we find a pre-existing equivalent FunctionProtoType, we can just reuse + // it so long as our exception specification doesn't contain a dependent + // noexcept expression, or we're just looking for a canonical type. + // Otherwise, we're going to need to create a type + // sugar node to hold the concrete expression. + if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || + EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) + return Existing; + + // We need a new type sugar node for this one, to hold the new noexcept + // expression. We do no canonicalization here, but that's OK since we don't + // expect to see the same noexcept expression much more than once. + Canonical = getCanonicalType(Existing); + Unique = true; + } + + bool NoexceptInType = getLangOpts().CPlusPlus17; + bool IsCanonicalExceptionSpec = + isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); + + // Determine whether the type being created is already canonical or not. + bool isCanonical = !Unique && IsCanonicalExceptionSpec && + isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; + for (unsigned i = 0; i != NumArgs && isCanonical; ++i) + if (!ArgArray[i].isCanonicalAsParam()) + isCanonical = false; + + if (OnlyWantCanonical) + assert(isCanonical && + "given non-canonical parameters constructing canonical type"); + + // If this type isn't canonical, get the canonical version of it if we don't + // already have it. The exception spec is only partially part of the + // canonical type, and only in C++17 onwards. + if (!isCanonical && Canonical.isNull()) { + SmallVector CanonicalArgs; + CanonicalArgs.reserve(NumArgs); + for (unsigned i = 0; i != NumArgs; ++i) + CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); + + llvm::SmallVector ExceptionTypeStorage; + FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; + CanonicalEPI.HasTrailingReturn = false; + + if (IsCanonicalExceptionSpec) { + // Exception spec is already OK. + } else if (NoexceptInType) { + switch (EPI.ExceptionSpec.Type) { + case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: + // We don't know yet. It shouldn't matter what we pick here; no-one + // should ever look at this. + LLVM_FALLTHROUGH; + case EST_None: case EST_MSAny: case EST_NoexceptFalse: + CanonicalEPI.ExceptionSpec.Type = EST_None; + break; + + // A dynamic exception specification is almost always "not noexcept", + // with the exception that a pack expansion might expand to no types. + case EST_Dynamic: { + bool AnyPacks = false; + for (QualType ET : EPI.ExceptionSpec.Exceptions) { + if (ET->getAs()) + AnyPacks = true; + ExceptionTypeStorage.push_back(getCanonicalType(ET)); + } + if (!AnyPacks) + CanonicalEPI.ExceptionSpec.Type = EST_None; + else { + CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; + CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; + } + break; + } + + case EST_DynamicNone: + case EST_BasicNoexcept: + case EST_NoexceptTrue: + case EST_NoThrow: + CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; + break; + + case EST_DependentNoexcept: + llvm_unreachable("dependent noexcept is already canonical"); + } + } else { + CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); + } + + // Adjust the canonical function result type. + CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); + Canonical = + getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); + + // Get the new insert position for the node we care about. + FunctionProtoType *NewIP = + FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + + // Compute the needed size to hold this FunctionProtoType and the + // various trailing objects. + auto ESH = FunctionProtoType::getExceptionSpecSize( + EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); + size_t Size = FunctionProtoType::totalSizeToAlloc< + QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, + FunctionType::ExceptionType, Expr *, FunctionDecl *, + FunctionProtoType::ExtParameterInfo, Qualifiers>( + NumArgs, EPI.Variadic, + FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), + ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, + EPI.ExtParameterInfos ? NumArgs : 0, + EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); + + auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); + FunctionProtoType::ExtProtoInfo newEPI = EPI; + new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); + Types.push_back(FTP); + if (!Unique) + FunctionProtoTypes.InsertNode(FTP, InsertPos); + return QualType(FTP, 0); + } + + QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { + llvm::FoldingSetNodeID ID; + PipeType::Profile(ID, T, ReadOnly); + + void *InsertPos = nullptr; + if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(PT, 0); + + // If the pipe element type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getPipeType(getCanonicalType(T), ReadOnly); + + // Get the new insert position for the node we care about. + PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); + (void)NewIP; + } + auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); + Types.push_back(New); + PipeTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { + // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. + return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) + : Ty; + } + + QualType ASTContext::getReadPipeType(QualType T) const { + return getPipeType(T, true); + } + + QualType ASTContext::getWritePipeType(QualType T) const { + return getPipeType(T, false); + } + + QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const { + llvm::FoldingSetNodeID ID; + ExtIntType::Profile(ID, IsUnsigned, NumBits); + + void *InsertPos = nullptr; + if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(EIT, 0); + + auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits); + ExtIntTypes.InsertNode(New, InsertPos); + Types.push_back(New); + return QualType(New, 0); + } + + QualType ASTContext::getDependentExtIntType(bool IsUnsigned, + Expr *NumBitsExpr) const { + assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); + llvm::FoldingSetNodeID ID; + DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); + + void *InsertPos = nullptr; + if (DependentExtIntType *Existing = + DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(Existing, 0); + + auto *New = new (*this, TypeAlignment) + DependentExtIntType(*this, IsUnsigned, NumBitsExpr); + DependentExtIntTypes.InsertNode(New, InsertPos); + + Types.push_back(New); + return QualType(New, 0); + } + + #ifndef NDEBUG + static bool NeedsInjectedClassNameType(const RecordDecl *D) { + if (!isa(D)) return false; + const auto *RD = cast(D); + if (isa(RD)) + return true; + if (RD->getDescribedClassTemplate() && + !isa(RD)) + return true; + return false; + } + #endif + + /// getInjectedClassNameType - Return the unique reference to the + /// injected class name type for the specified templated declaration. + QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, + QualType TST) const { + assert(NeedsInjectedClassNameType(Decl)); + if (Decl->TypeForDecl) { + assert(isa(Decl->TypeForDecl)); + } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { + assert(PrevDecl->TypeForDecl && "previous declaration has no type"); + Decl->TypeForDecl = PrevDecl->TypeForDecl; + assert(isa(Decl->TypeForDecl)); + } else { + Type *newType = + new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); + Decl->TypeForDecl = newType; + Types.push_back(newType); + } + return QualType(Decl->TypeForDecl, 0); + } + + /// getTypeDeclType - Return the unique reference to the type for the + /// specified type declaration. + QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { + assert(Decl && "Passed null for Decl param"); + assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); + + if (const auto *Typedef = dyn_cast(Decl)) + return getTypedefType(Typedef); + + assert(!isa(Decl) && + "Template type parameter types are always available."); + + if (const auto *Record = dyn_cast(Decl)) { + assert(Record->isFirstDecl() && "struct/union has previous declaration"); + assert(!NeedsInjectedClassNameType(Record)); + return getRecordType(Record); + } else if (const auto *Enum = dyn_cast(Decl)) { + assert(Enum->isFirstDecl() && "enum has previous declaration"); + return getEnumType(Enum); + } else if (const auto *Using = dyn_cast(Decl)) { + Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); + Decl->TypeForDecl = newType; + Types.push_back(newType); + } else + llvm_unreachable("TypeDecl without a type?"); + + return QualType(Decl->TypeForDecl, 0); + } + + /// getTypedefType - Return the unique reference to the type for the + /// specified typedef name decl. + QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, + QualType Underlying) const { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (Underlying.isNull()) + Underlying = Decl->getUnderlyingType(); + QualType Canonical = getCanonicalType(Underlying); + auto *newType = new (*this, TypeAlignment) + TypedefType(Type::Typedef, Decl, Underlying, Canonical); + Decl->TypeForDecl = newType; + Types.push_back(newType); + return QualType(newType, 0); + } + + QualType ASTContext::getRecordType(const RecordDecl *Decl) const { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) + if (PrevDecl->TypeForDecl) + return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); + + auto *newType = new (*this, TypeAlignment) RecordType(Decl); + Decl->TypeForDecl = newType; + Types.push_back(newType); + return QualType(newType, 0); + } + + QualType ASTContext::getEnumType(const EnumDecl *Decl) const { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) + if (PrevDecl->TypeForDecl) + return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); + + auto *newType = new (*this, TypeAlignment) EnumType(Decl); + Decl->TypeForDecl = newType; + Types.push_back(newType); + return QualType(newType, 0); + } + + QualType ASTContext::getAttributedType(attr::Kind attrKind, + QualType modifiedType, + QualType equivalentType) { + llvm::FoldingSetNodeID id; + AttributedType::Profile(id, attrKind, modifiedType, equivalentType); + + void *insertPos = nullptr; + AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); + if (type) return QualType(type, 0); + + QualType canon = getCanonicalType(equivalentType); + type = new (*this, TypeAlignment) + AttributedType(canon, attrKind, modifiedType, equivalentType); + + Types.push_back(type); + AttributedTypes.InsertNode(type, insertPos); + + return QualType(type, 0); + } + + /// Retrieve a substitution-result type. + QualType + ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, + QualType Replacement) const { + assert(Replacement.isCanonical() + && "replacement types must always be canonical"); + + llvm::FoldingSetNodeID ID; + SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); + void *InsertPos = nullptr; + SubstTemplateTypeParmType *SubstParm + = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (!SubstParm) { + SubstParm = new (*this, TypeAlignment) + SubstTemplateTypeParmType(Parm, Replacement); + Types.push_back(SubstParm); + SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); + } + + return QualType(SubstParm, 0); + } + + /// Retrieve a + QualType ASTContext::getSubstTemplateTypeParmPackType( + const TemplateTypeParmType *Parm, + const TemplateArgument &ArgPack) { + #ifndef NDEBUG + for (const auto &P : ArgPack.pack_elements()) { + assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); + assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); + } + #endif + + llvm::FoldingSetNodeID ID; + SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); + void *InsertPos = nullptr; + if (SubstTemplateTypeParmPackType *SubstParm + = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(SubstParm, 0); + + QualType Canon; + if (!Parm->isCanonicalUnqualified()) { + Canon = getCanonicalType(QualType(Parm, 0)); + Canon = getSubstTemplateTypeParmPackType(cast(Canon), + ArgPack); + SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + auto *SubstParm + = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, + ArgPack); + Types.push_back(SubstParm); + SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); + return QualType(SubstParm, 0); + } + + /// Retrieve the template type parameter type for a template + /// parameter or parameter pack with the given depth, index, and (optionally) + /// name. + QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, + bool ParameterPack, + TemplateTypeParmDecl *TTPDecl) const { + llvm::FoldingSetNodeID ID; + TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); + void *InsertPos = nullptr; + TemplateTypeParmType *TypeParm + = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (TypeParm) + return QualType(TypeParm, 0); + + if (TTPDecl) { + QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); + TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); + + TemplateTypeParmType *TypeCheck + = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!TypeCheck && "Template type parameter canonical type broken"); + (void)TypeCheck; + } else + TypeParm = new (*this, TypeAlignment) + TemplateTypeParmType(Depth, Index, ParameterPack); + + Types.push_back(TypeParm); + TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); + + return QualType(TypeParm, 0); + } + + TypeSourceInfo * + ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, + SourceLocation NameLoc, + const TemplateArgumentListInfo &Args, + QualType Underlying) const { + assert(!Name.getAsDependentTemplateName() && + "No dependent template names here!"); + QualType TST = getTemplateSpecializationType(Name, Args, Underlying); + + TypeSourceInfo *DI = CreateTypeSourceInfo(TST); + TemplateSpecializationTypeLoc TL = + DI->getTypeLoc().castAs(); + TL.setTemplateKeywordLoc(SourceLocation()); + TL.setTemplateNameLoc(NameLoc); + TL.setLAngleLoc(Args.getLAngleLoc()); + TL.setRAngleLoc(Args.getRAngleLoc()); + for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) + TL.setArgLocInfo(i, Args[i].getLocInfo()); + return DI; + } + + QualType + ASTContext::getTemplateSpecializationType(TemplateName Template, + const TemplateArgumentListInfo &Args, + QualType Underlying) const { + assert(!Template.getAsDependentTemplateName() && + "No dependent template names here!"); + + SmallVector ArgVec; + ArgVec.reserve(Args.size()); + for (const TemplateArgumentLoc &Arg : Args.arguments()) + ArgVec.push_back(Arg.getArgument()); + + return getTemplateSpecializationType(Template, ArgVec, Underlying); + } + + #ifndef NDEBUG + static bool hasAnyPackExpansions(ArrayRef Args) { + for (const TemplateArgument &Arg : Args) + if (Arg.isPackExpansion()) + return true; + + return true; + } + #endif + + QualType + ASTContext::getTemplateSpecializationType(TemplateName Template, + ArrayRef Args, + QualType Underlying) const { + assert(!Template.getAsDependentTemplateName() && + "No dependent template names here!"); + // Look through qualified template names. + if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) + Template = TemplateName(QTN->getTemplateDecl()); + + bool IsTypeAlias = + Template.getAsTemplateDecl() && + isa(Template.getAsTemplateDecl()); + QualType CanonType; + if (!Underlying.isNull()) + CanonType = getCanonicalType(Underlying); + else { + // We can get here with an alias template when the specialization contains + // a pack expansion that does not match up with a parameter pack. + assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && + "Caller must compute aliased type"); + IsTypeAlias = false; + CanonType = getCanonicalTemplateSpecializationType(Template, Args); + } + + // Allocate the (non-canonical) template specialization type, but don't + // try to unique it: these types typically have location information that + // we don't unique and don't want to lose. + void *Mem = Allocate(sizeof(TemplateSpecializationType) + + sizeof(TemplateArgument) * Args.size() + + (IsTypeAlias? sizeof(QualType) : 0), + TypeAlignment); + auto *Spec + = new (Mem) TemplateSpecializationType(Template, Args, CanonType, + IsTypeAlias ? Underlying : QualType()); + + Types.push_back(Spec); + return QualType(Spec, 0); + } + + QualType ASTContext::getCanonicalTemplateSpecializationType( + TemplateName Template, ArrayRef Args) const { + assert(!Template.getAsDependentTemplateName() && + "No dependent template names here!"); + + // Look through qualified template names. + if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) + Template = TemplateName(QTN->getTemplateDecl()); + + // Build the canonical template specialization type. + TemplateName CanonTemplate = getCanonicalTemplateName(Template); + SmallVector CanonArgs; + unsigned NumArgs = Args.size(); + CanonArgs.reserve(NumArgs); + for (const TemplateArgument &Arg : Args) + CanonArgs.push_back(getCanonicalTemplateArgument(Arg)); + + // Determine whether this canonical template specialization type already + // exists. + llvm::FoldingSetNodeID ID; + TemplateSpecializationType::Profile(ID, CanonTemplate, + CanonArgs, *this); + + void *InsertPos = nullptr; + TemplateSpecializationType *Spec + = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (!Spec) { + // Allocate a new canonical template specialization type. + void *Mem = Allocate((sizeof(TemplateSpecializationType) + + sizeof(TemplateArgument) * NumArgs), + TypeAlignment); + Spec = new (Mem) TemplateSpecializationType(CanonTemplate, + CanonArgs, + QualType(), QualType()); + Types.push_back(Spec); + TemplateSpecializationTypes.InsertNode(Spec, InsertPos); + } + + assert(Spec->isDependentType() && + "Non-dependent template-id type must have a canonical type"); + return QualType(Spec, 0); + } + + QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + QualType NamedType, + TagDecl *OwnedTagDecl) const { + llvm::FoldingSetNodeID ID; + ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); + + void *InsertPos = nullptr; + ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + QualType Canon = NamedType; + if (!Canon.isCanonical()) { + Canon = getCanonicalType(NamedType); + ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckT && "Elaborated canonical type broken"); + (void)CheckT; + } + + void *Mem = Allocate(ElaboratedType::totalSizeToAlloc(!!OwnedTagDecl), + TypeAlignment); + T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); + + Types.push_back(T); + ElaboratedTypes.InsertNode(T, InsertPos); + return QualType(T, 0); + } + + QualType + ASTContext::getParenType(QualType InnerType) const { + llvm::FoldingSetNodeID ID; + ParenType::Profile(ID, InnerType); + + void *InsertPos = nullptr; + ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + QualType Canon = InnerType; + if (!Canon.isCanonical()) { + Canon = getCanonicalType(InnerType); + ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckT && "Paren canonical type broken"); + (void)CheckT; + } + + T = new (*this, TypeAlignment) ParenType(InnerType, Canon); + Types.push_back(T); + ParenTypes.InsertNode(T, InsertPos); + return QualType(T, 0); + } + + QualType + ASTContext::getMacroQualifiedType(QualType UnderlyingTy, + const IdentifierInfo *MacroII) const { + QualType Canon = UnderlyingTy; + if (!Canon.isCanonical()) + Canon = getCanonicalType(UnderlyingTy); + + auto *newType = new (*this, TypeAlignment) + MacroQualifiedType(UnderlyingTy, Canon, MacroII); + Types.push_back(newType); + return QualType(newType, 0); + } + + QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + QualType Canon) const { + if (Canon.isNull()) { + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + if (CanonNNS != NNS) + Canon = getDependentNameType(Keyword, CanonNNS, Name); + } + + llvm::FoldingSetNodeID ID; + DependentNameType::Profile(ID, Keyword, NNS, Name); + + void *InsertPos = nullptr; + DependentNameType *T + = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); + Types.push_back(T); + DependentNameTypes.InsertNode(T, InsertPos); + return QualType(T, 0); + } + + QualType + ASTContext::getDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + const TemplateArgumentListInfo &Args) const { + // TODO: avoid this copy + SmallVector ArgCopy; + for (unsigned I = 0, E = Args.size(); I != E; ++I) + ArgCopy.push_back(Args[I].getArgument()); + return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); + } + + QualType + ASTContext::getDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + ArrayRef Args) const { + assert((!NNS || NNS->isDependent()) && + "nested-name-specifier must be dependent"); + + llvm::FoldingSetNodeID ID; + DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, + Name, Args); + + void *InsertPos = nullptr; + DependentTemplateSpecializationType *T + = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + + ElaboratedTypeKeyword CanonKeyword = Keyword; + if (Keyword == ETK_None) CanonKeyword = ETK_Typename; + + bool AnyNonCanonArgs = false; + unsigned NumArgs = Args.size(); + SmallVector CanonArgs(NumArgs); + for (unsigned I = 0; I != NumArgs; ++I) { + CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); + if (!CanonArgs[I].structurallyEquals(Args[I])) + AnyNonCanonArgs = true; + } + + QualType Canon; + if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { + Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, + Name, + CanonArgs); + + // Find the insert position again. + DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + + sizeof(TemplateArgument) * NumArgs), + TypeAlignment); + T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, + Name, Args, Canon); + Types.push_back(T); + DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); + return QualType(T, 0); + } + + TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { + TemplateArgument Arg; + if (const auto *TTP = dyn_cast(Param)) { + QualType ArgType = getTypeDeclType(TTP); + if (TTP->isParameterPack()) + ArgType = getPackExpansionType(ArgType, None); + + Arg = TemplateArgument(ArgType); + } else if (auto *NTTP = dyn_cast(Param)) { + QualType T = + NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); + // For class NTTPs, ensure we include the 'const' so the type matches that + // of a real template argument. + // FIXME: It would be more faithful to model this as something like an + // lvalue-to-rvalue conversion applied to a const-qualified lvalue. + if (T->isRecordType()) + T.addConst(); + Expr *E = new (*this) DeclRefExpr( + *this, NTTP, /*enclosing*/ false, T, + Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); + + if (NTTP->isParameterPack()) + E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), + None); + Arg = TemplateArgument(E); + } else { + auto *TTP = cast(Param); + if (TTP->isParameterPack()) + Arg = TemplateArgument(TemplateName(TTP), Optional()); + else + Arg = TemplateArgument(TemplateName(TTP)); + } + + if (Param->isTemplateParameterPack()) + Arg = TemplateArgument::CreatePackCopy(*this, Arg); + + return Arg; + } + + void + ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, + SmallVectorImpl &Args) { + Args.reserve(Args.size() + Params->size()); + + for (NamedDecl *Param : *Params) + Args.push_back(getInjectedTemplateArg(Param)); + } + + QualType ASTContext::getPackExpansionType(QualType Pattern, + Optional NumExpansions, + bool ExpectPackInType) { + assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && + "Pack expansions must expand one or more parameter packs"); + + llvm::FoldingSetNodeID ID; + PackExpansionType::Profile(ID, Pattern, NumExpansions); + + void *InsertPos = nullptr; + PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); + if (T) + return QualType(T, 0); + + QualType Canon; + if (!Pattern.isCanonical()) { + Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, + /*ExpectPackInType=*/false); + + // Find the insert position again, in case we inserted an element into + // PackExpansionTypes and invalidated our insert position. + PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + T = new (*this, TypeAlignment) + PackExpansionType(Pattern, Canon, NumExpansions); + Types.push_back(T); + PackExpansionTypes.InsertNode(T, InsertPos); + return QualType(T, 0); + } + + /// CmpProtocolNames - Comparison predicate for sorting protocols + /// alphabetically. + static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, + ObjCProtocolDecl *const *RHS) { + return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); + } + + static bool areSortedAndUniqued(ArrayRef Protocols) { + if (Protocols.empty()) return true; + + if (Protocols[0]->getCanonicalDecl() != Protocols[0]) + return false; + + for (unsigned i = 1; i != Protocols.size(); ++i) + if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || + Protocols[i]->getCanonicalDecl() != Protocols[i]) + return false; + return true; + } + + static void + SortAndUniqueProtocols(SmallVectorImpl &Protocols) { + // Sort protocols, keyed by name. + llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); + + // Canonicalize. + for (ObjCProtocolDecl *&P : Protocols) + P = P->getCanonicalDecl(); + + // Remove duplicates. + auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); + Protocols.erase(ProtocolsEnd, Protocols.end()); + } + + QualType ASTContext::getObjCObjectType(QualType BaseType, + ObjCProtocolDecl * const *Protocols, + unsigned NumProtocols) const { + return getObjCObjectType(BaseType, {}, + llvm::makeArrayRef(Protocols, NumProtocols), + /*isKindOf=*/false); + } + + QualType ASTContext::getObjCObjectType( + QualType baseType, + ArrayRef typeArgs, + ArrayRef protocols, + bool isKindOf) const { + // If the base type is an interface and there aren't any protocols or + // type arguments to add, then the interface type will do just fine. + if (typeArgs.empty() && protocols.empty() && !isKindOf && + isa(baseType)) + return baseType; + + // Look in the folding set for an existing type. + llvm::FoldingSetNodeID ID; + ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); + void *InsertPos = nullptr; + if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(QT, 0); + + // Determine the type arguments to be used for canonicalization, + // which may be explicitly specified here or written on the base + // type. + ArrayRef effectiveTypeArgs = typeArgs; + if (effectiveTypeArgs.empty()) { + if (const auto *baseObject = baseType->getAs()) + effectiveTypeArgs = baseObject->getTypeArgs(); + } + + // Build the canonical type, which has the canonical base type and a + // sorted-and-uniqued list of protocols and the type arguments + // canonicalized. + QualType canonical; + bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(), + effectiveTypeArgs.end(), + [&](QualType type) { + return type.isCanonical(); + }); + bool protocolsSorted = areSortedAndUniqued(protocols); + if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { + // Determine the canonical type arguments. + ArrayRef canonTypeArgs; + SmallVector canonTypeArgsVec; + if (!typeArgsAreCanonical) { + canonTypeArgsVec.reserve(effectiveTypeArgs.size()); + for (auto typeArg : effectiveTypeArgs) + canonTypeArgsVec.push_back(getCanonicalType(typeArg)); + canonTypeArgs = canonTypeArgsVec; + } else { + canonTypeArgs = effectiveTypeArgs; + } + + ArrayRef canonProtocols; + SmallVector canonProtocolsVec; + if (!protocolsSorted) { + canonProtocolsVec.append(protocols.begin(), protocols.end()); + SortAndUniqueProtocols(canonProtocolsVec); + canonProtocols = canonProtocolsVec; + } else { + canonProtocols = protocols; + } + + canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, + canonProtocols, isKindOf); + + // Regenerate InsertPos. + ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + unsigned size = sizeof(ObjCObjectTypeImpl); + size += typeArgs.size() * sizeof(QualType); + size += protocols.size() * sizeof(ObjCProtocolDecl *); + void *mem = Allocate(size, TypeAlignment); + auto *T = + new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, + isKindOf); + + Types.push_back(T); + ObjCObjectTypes.InsertNode(T, InsertPos); + return QualType(T, 0); + } + + /// Apply Objective-C protocol qualifiers to the given type. + /// If this is for the canonical type of a type parameter, we can apply + /// protocol qualifiers on the ObjCObjectPointerType. + QualType + ASTContext::applyObjCProtocolQualifiers(QualType type, + ArrayRef protocols, bool &hasError, + bool allowOnPointerType) const { + hasError = false; + + if (const auto *objT = dyn_cast(type.getTypePtr())) { + return getObjCTypeParamType(objT->getDecl(), protocols); + } + + // Apply protocol qualifiers to ObjCObjectPointerType. + if (allowOnPointerType) { + if (const auto *objPtr = + dyn_cast(type.getTypePtr())) { + const ObjCObjectType *objT = objPtr->getObjectType(); + // Merge protocol lists and construct ObjCObjectType. + SmallVector protocolsVec; + protocolsVec.append(objT->qual_begin(), + objT->qual_end()); + protocolsVec.append(protocols.begin(), protocols.end()); + ArrayRef protocols = protocolsVec; + type = getObjCObjectType( + objT->getBaseType(), + objT->getTypeArgsAsWritten(), + protocols, + objT->isKindOfTypeAsWritten()); + return getObjCObjectPointerType(type); + } + } + + // Apply protocol qualifiers to ObjCObjectType. + if (const auto *objT = dyn_cast(type.getTypePtr())){ + // FIXME: Check for protocols to which the class type is already + // known to conform. + + return getObjCObjectType(objT->getBaseType(), + objT->getTypeArgsAsWritten(), + protocols, + objT->isKindOfTypeAsWritten()); + } + + // If the canonical type is ObjCObjectType, ... + if (type->isObjCObjectType()) { + // Silently overwrite any existing protocol qualifiers. + // TODO: determine whether that's the right thing to do. + + // FIXME: Check for protocols to which the class type is already + // known to conform. + return getObjCObjectType(type, {}, protocols, false); + } + + // id + if (type->isObjCIdType()) { + const auto *objPtr = type->castAs(); + type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, + objPtr->isKindOfType()); + return getObjCObjectPointerType(type); + } + + // Class + if (type->isObjCClassType()) { + const auto *objPtr = type->castAs(); + type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, + objPtr->isKindOfType()); + return getObjCObjectPointerType(type); + } + + hasError = true; + return type; + } + + QualType + ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, + ArrayRef protocols) const { + // Look in the folding set for an existing type. + llvm::FoldingSetNodeID ID; + ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); + void *InsertPos = nullptr; + if (ObjCTypeParamType *TypeParam = + ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(TypeParam, 0); + + // We canonicalize to the underlying type. + QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); + if (!protocols.empty()) { + // Apply the protocol qualifers. + bool hasError; + Canonical = getCanonicalType(applyObjCProtocolQualifiers( + Canonical, protocols, hasError, true /*allowOnPointerType*/)); + assert(!hasError && "Error when apply protocol qualifier to bound type"); + } + + unsigned size = sizeof(ObjCTypeParamType); + size += protocols.size() * sizeof(ObjCProtocolDecl *); + void *mem = Allocate(size, TypeAlignment); + auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); + + Types.push_back(newType); + ObjCTypeParamTypes.InsertNode(newType, InsertPos); + return QualType(newType, 0); + } + + void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, + ObjCTypeParamDecl *New) const { + New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); + // Update TypeForDecl after updating TypeSourceInfo. + auto NewTypeParamTy = cast(New->getTypeForDecl()); + SmallVector protocols; + protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); + QualType UpdatedTy = getObjCTypeParamType(New, protocols); + New->setTypeForDecl(UpdatedTy.getTypePtr()); + } + + /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's + /// protocol list adopt all protocols in QT's qualified-id protocol + /// list. + bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, + ObjCInterfaceDecl *IC) { + if (!QT->isObjCQualifiedIdType()) + return false; + + if (const auto *OPT = QT->getAs()) { + // If both the right and left sides have qualifiers. + for (auto *Proto : OPT->quals()) { + if (!IC->ClassImplementsProtocol(Proto, false)) + return false; + } + return true; + } + return false; + } + + /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in + /// QT's qualified-id protocol list adopt all protocols in IDecl's list + /// of protocols. + bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, + ObjCInterfaceDecl *IDecl) { + if (!QT->isObjCQualifiedIdType()) + return false; + const auto *OPT = QT->getAs(); + if (!OPT) + return false; + if (!IDecl->hasDefinition()) + return false; + llvm::SmallPtrSet InheritedProtocols; + CollectInheritedProtocols(IDecl, InheritedProtocols); + if (InheritedProtocols.empty()) + return false; + // Check that if every protocol in list of id conforms to a protocol + // of IDecl's, then bridge casting is ok. + bool Conforms = false; + for (auto *Proto : OPT->quals()) { + Conforms = false; + for (auto *PI : InheritedProtocols) { + if (ProtocolCompatibleWithProtocol(Proto, PI)) { + Conforms = true; + break; + } + } + if (!Conforms) + break; + } + if (Conforms) + return true; + + for (auto *PI : InheritedProtocols) { + // If both the right and left sides have qualifiers. + bool Adopts = false; + for (auto *Proto : OPT->quals()) { + // return 'true' if 'PI' is in the inheritance hierarchy of Proto + if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) + break; + } + if (!Adopts) + return false; + } + return true; + } + + /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for + /// the given object type. + QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { + llvm::FoldingSetNodeID ID; + ObjCObjectPointerType::Profile(ID, ObjectT); + + void *InsertPos = nullptr; + if (ObjCObjectPointerType *QT = + ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(QT, 0); + + // Find the canonical object type. + QualType Canonical; + if (!ObjectT.isCanonical()) { + Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); + + // Regenerate InsertPos. + ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + // No match. + void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); + auto *QType = + new (Mem) ObjCObjectPointerType(Canonical, ObjectT); + + Types.push_back(QType); + ObjCObjectPointerTypes.InsertNode(QType, InsertPos); + return QualType(QType, 0); + } + + /// getObjCInterfaceType - Return the unique reference to the type for the + /// specified ObjC interface decl. The list of protocols is optional. + QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, + ObjCInterfaceDecl *PrevDecl) const { + if (Decl->TypeForDecl) + return QualType(Decl->TypeForDecl, 0); + + if (PrevDecl) { + assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); + Decl->TypeForDecl = PrevDecl->TypeForDecl; + return QualType(PrevDecl->TypeForDecl, 0); + } + + // Prefer the definition, if there is one. + if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) + Decl = Def; + + void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); + auto *T = new (Mem) ObjCInterfaceType(Decl); + Decl->TypeForDecl = T; + Types.push_back(T); + return QualType(T, 0); + } + + /// getTypeOfExprType - Unlike many "get" functions, we can't unique + /// TypeOfExprType AST's (since expression's are never shared). For example, + /// multiple declarations that refer to "typeof(x)" all contain different + /// DeclRefExpr's. This doesn't effect the type checker, since it operates + /// on canonical type's (which are always unique). + QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { + TypeOfExprType *toe; + if (tofExpr->isTypeDependent()) { + llvm::FoldingSetNodeID ID; + DependentTypeOfExprType::Profile(ID, *this, tofExpr); + + void *InsertPos = nullptr; + DependentTypeOfExprType *Canon + = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); + if (Canon) { + // We already have a "canonical" version of an identical, dependent + // typeof(expr) type. Use that as our canonical type. + toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, + QualType((TypeOfExprType*)Canon, 0)); + } else { + // Build a new, canonical typeof(expr) type. + Canon + = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); + DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); + toe = Canon; + } + } else { + QualType Canonical = getCanonicalType(tofExpr->getType()); + toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); + } + Types.push_back(toe); + return QualType(toe, 0); + } + + /// getTypeOfType - Unlike many "get" functions, we don't unique + /// TypeOfType nodes. The only motivation to unique these nodes would be + /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be + /// an issue. This doesn't affect the type checker, since it operates + /// on canonical types (which are always unique). + QualType ASTContext::getTypeOfType(QualType tofType) const { + QualType Canonical = getCanonicalType(tofType); + auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); + Types.push_back(tot); + return QualType(tot, 0); + } + + /// Unlike many "get" functions, we don't unique DecltypeType + /// nodes. This would never be helpful, since each such type has its own + /// expression, and would not give a significant memory saving, since there + /// is an Expr tree under each such type. + QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { + DecltypeType *dt; + + // C++11 [temp.type]p2: + // If an expression e involves a template parameter, decltype(e) denotes a + // unique dependent type. Two such decltype-specifiers refer to the same + // type only if their expressions are equivalent (14.5.6.1). + if (e->isInstantiationDependent()) { + llvm::FoldingSetNodeID ID; + DependentDecltypeType::Profile(ID, *this, e); + + void *InsertPos = nullptr; + DependentDecltypeType *Canon + = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); + if (!Canon) { + // Build a new, canonical decltype(expr) type. + Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); + DependentDecltypeTypes.InsertNode(Canon, InsertPos); + } + dt = new (*this, TypeAlignment) + DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); + } else { + dt = new (*this, TypeAlignment) + DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); + } + Types.push_back(dt); + return QualType(dt, 0); + } + + /// getUnaryTransformationType - We don't unique these, since the memory + /// savings are minimal and these are rare. + QualType ASTContext::getUnaryTransformType(QualType BaseType, + QualType UnderlyingType, + UnaryTransformType::UTTKind Kind) + const { + UnaryTransformType *ut = nullptr; + + if (BaseType->isDependentType()) { + // Look in the folding set for an existing type. + llvm::FoldingSetNodeID ID; + DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); + + void *InsertPos = nullptr; + DependentUnaryTransformType *Canon + = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (!Canon) { + // Build a new, canonical __underlying_type(type) type. + Canon = new (*this, TypeAlignment) + DependentUnaryTransformType(*this, getCanonicalType(BaseType), + Kind); + DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); + } + ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, + QualType(), Kind, + QualType(Canon, 0)); + } else { + QualType CanonType = getCanonicalType(UnderlyingType); + ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, + UnderlyingType, Kind, + CanonType); + } + Types.push_back(ut); + return QualType(ut, 0); + } + + /// getAutoType - Return the uniqued reference to the 'auto' type which has been + /// deduced to the given type, or to the canonical undeduced 'auto' type, or the + /// canonical deduced-but-dependent 'auto' type. + QualType + ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, + bool IsDependent, bool IsPack, + ConceptDecl *TypeConstraintConcept, + ArrayRef TypeConstraintArgs) const { + assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); + if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && + !TypeConstraintConcept && !IsDependent) + return getAutoDeductType(); + + // Look in the folding set for an existing type. + void *InsertPos = nullptr; + llvm::FoldingSetNodeID ID; + AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, + TypeConstraintConcept, TypeConstraintArgs); + if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(AT, 0); + + void *Mem = Allocate(sizeof(AutoType) + + sizeof(TemplateArgument) * TypeConstraintArgs.size(), + TypeAlignment); + auto *AT = new (Mem) AutoType( + DeducedType, Keyword, + (IsDependent ? TypeDependence::DependentInstantiation + : TypeDependence::None) | + (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), + TypeConstraintConcept, TypeConstraintArgs); + Types.push_back(AT); + if (InsertPos) + AutoTypes.InsertNode(AT, InsertPos); + return QualType(AT, 0); + } + + /// Return the uniqued reference to the deduced template specialization type + /// which has been deduced to the given type, or to the canonical undeduced + /// such type, or the canonical deduced-but-dependent such type. + QualType ASTContext::getDeducedTemplateSpecializationType( + TemplateName Template, QualType DeducedType, bool IsDependent) const { + // Look in the folding set for an existing type. + void *InsertPos = nullptr; + llvm::FoldingSetNodeID ID; + DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, + IsDependent); + if (DeducedTemplateSpecializationType *DTST = + DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(DTST, 0); + + auto *DTST = new (*this, TypeAlignment) + DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); + Types.push_back(DTST); + if (InsertPos) + DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); + return QualType(DTST, 0); + } + + /// getAtomicType - Return the uniqued reference to the atomic type for + /// the given value type. + QualType ASTContext::getAtomicType(QualType T) const { + // Unique pointers, to guarantee there is only one pointer of a particular + // structure. + llvm::FoldingSetNodeID ID; + AtomicType::Profile(ID, T); + + void *InsertPos = nullptr; + if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(AT, 0); + + // If the atomic value type isn't canonical, this won't be a canonical type + // either, so fill in the canonical type field. + QualType Canonical; + if (!T.isCanonical()) { + Canonical = getAtomicType(getCanonicalType(T)); + + // Get the new insert position for the node we care about. + AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; + } + auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); + Types.push_back(New); + AtomicTypes.InsertNode(New, InsertPos); + return QualType(New, 0); + } + + /// getAutoDeductType - Get type pattern for deducing against 'auto'. + QualType ASTContext::getAutoDeductType() const { + if (AutoDeductTy.isNull()) + AutoDeductTy = QualType(new (*this, TypeAlignment) + AutoType(QualType(), AutoTypeKeyword::Auto, + TypeDependence::None, + /*concept*/ nullptr, /*args*/ {}), + 0); + return AutoDeductTy; + } + + /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. + QualType ASTContext::getAutoRRefDeductType() const { + if (AutoRRefDeductTy.isNull()) + AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); + assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); + return AutoRRefDeductTy; + } + + /// getTagDeclType - Return the unique reference to the type for the + /// specified TagDecl (struct/union/class/enum) decl. + QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { + assert(Decl); + // FIXME: What is the design on getTagDeclType when it requires casting + // away const? mutable? + return getTypeDeclType(const_cast(Decl)); + } + + /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result + /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and + /// needs to agree with the definition in . + CanQualType ASTContext::getSizeType() const { + return getFromTargetType(Target->getSizeType()); + } + + /// Return the unique signed counterpart of the integer type + /// corresponding to size_t. + CanQualType ASTContext::getSignedSizeType() const { + return getFromTargetType(Target->getSignedSizeType()); + } + + /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). + CanQualType ASTContext::getIntMaxType() const { + return getFromTargetType(Target->getIntMaxType()); + } + + /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). + CanQualType ASTContext::getUIntMaxType() const { + return getFromTargetType(Target->getUIntMaxType()); + } + + /// getSignedWCharType - Return the type of "signed wchar_t". + /// Used when in C++, as a GCC extension. + QualType ASTContext::getSignedWCharType() const { + // FIXME: derive from "Target" ? + return WCharTy; + } + + /// getUnsignedWCharType - Return the type of "unsigned wchar_t". + /// Used when in C++, as a GCC extension. + QualType ASTContext::getUnsignedWCharType() const { + // FIXME: derive from "Target" ? + return UnsignedIntTy; + } + + QualType ASTContext::getIntPtrType() const { + return getFromTargetType(Target->getIntPtrType()); + } + + QualType ASTContext::getUIntPtrType() const { + return getCorrespondingUnsignedType(getIntPtrType()); + } + + /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) + /// defined in . Pointer - pointer requires this (C99 6.5.6p9). + QualType ASTContext::getPointerDiffType() const { + return getFromTargetType(Target->getPtrDiffType(0)); + } + + /// Return the unique unsigned counterpart of "ptrdiff_t" + /// integer type. The standard (C11 7.21.6.1p7) refers to this type + /// in the definition of %tu format specifier. + QualType ASTContext::getUnsignedPointerDiffType() const { + return getFromTargetType(Target->getUnsignedPtrDiffType(0)); + } + + /// Return the unique type for "pid_t" defined in + /// . We need this to compute the correct type for vfork(). + QualType ASTContext::getProcessIDType() const { + return getFromTargetType(Target->getProcessIDType()); + } + + //===----------------------------------------------------------------------===// + // Type Operators + //===----------------------------------------------------------------------===// + + CanQualType ASTContext::getCanonicalParamType(QualType T) const { + // Push qualifiers into arrays, and then discard any remaining + // qualifiers. + T = getCanonicalType(T); + T = getVariableArrayDecayedType(T); + const Type *Ty = T.getTypePtr(); + QualType Result; + if (isa(Ty)) { + Result = getArrayDecayedType(QualType(Ty,0)); + } else if (isa(Ty)) { + Result = getPointerType(QualType(Ty, 0)); + } else { + Result = QualType(Ty, 0); + } + + return CanQualType::CreateUnsafe(Result); + } + + QualType ASTContext::getUnqualifiedArrayType(QualType type, + Qualifiers &quals) { + SplitQualType splitType = type.getSplitUnqualifiedType(); + + // FIXME: getSplitUnqualifiedType() actually walks all the way to + // the unqualified desugared type and then drops it on the floor. + // We then have to strip that sugar back off with + // getUnqualifiedDesugaredType(), which is silly. + const auto *AT = + dyn_cast(splitType.Ty->getUnqualifiedDesugaredType()); + + // If we don't have an array, just use the results in splitType. + if (!AT) { + quals = splitType.Quals; + return QualType(splitType.Ty, 0); + } + + // Otherwise, recurse on the array's element type. + QualType elementType = AT->getElementType(); + QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); + + // If that didn't change the element type, AT has no qualifiers, so we + // can just use the results in splitType. + if (elementType == unqualElementType) { + assert(quals.empty()); // from the recursive call + quals = splitType.Quals; + return QualType(splitType.Ty, 0); + } + + // Otherwise, add in the qualifiers from the outermost type, then + // build the type back up. + quals.addConsistentQualifiers(splitType.Quals); + + if (const auto *CAT = dyn_cast(AT)) { + return getConstantArrayType(unqualElementType, CAT->getSize(), + CAT->getSizeExpr(), CAT->getSizeModifier(), 0); + } + + if (const auto *IAT = dyn_cast(AT)) { + return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); + } + + if (const auto *VAT = dyn_cast(AT)) { + return getVariableArrayType(unqualElementType, + VAT->getSizeExpr(), + VAT->getSizeModifier(), + VAT->getIndexTypeCVRQualifiers(), + VAT->getBracketsRange()); + } + + const auto *DSAT = cast(AT); + return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), + DSAT->getSizeModifier(), 0, + SourceRange()); + } + + /// Attempt to unwrap two types that may both be array types with the same bound + /// (or both be array types of unknown bound) for the purpose of comparing the + /// cv-decomposition of two types per C++ [conv.qual]. + bool ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) { + bool UnwrappedAny = false; + while (true) { + auto *AT1 = getAsArrayType(T1); + if (!AT1) return UnwrappedAny; + + auto *AT2 = getAsArrayType(T2); + if (!AT2) return UnwrappedAny; + + // If we don't have two array types with the same constant bound nor two + // incomplete array types, we've unwrapped everything we can. + if (auto *CAT1 = dyn_cast(AT1)) { + auto *CAT2 = dyn_cast(AT2); + if (!CAT2 || CAT1->getSize() != CAT2->getSize()) + return UnwrappedAny; + } else if (!isa(AT1) || + !isa(AT2)) { + return UnwrappedAny; + } + + T1 = AT1->getElementType(); + T2 = AT2->getElementType(); + UnwrappedAny = true; + } + } + + /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). + /// + /// If T1 and T2 are both pointer types of the same kind, or both array types + /// with the same bound, unwraps layers from T1 and T2 until a pointer type is + /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. + /// + /// This function will typically be called in a loop that successively + /// "unwraps" pointer and pointer-to-member types to compare them at each + /// level. + /// + /// \return \c true if a pointer type was unwrapped, \c false if we reached a + /// pair of types that can't be unwrapped further. + bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) { + UnwrapSimilarArrayTypes(T1, T2); + + const auto *T1PtrType = T1->getAs(); + const auto *T2PtrType = T2->getAs(); + if (T1PtrType && T2PtrType) { + T1 = T1PtrType->getPointeeType(); + T2 = T2PtrType->getPointeeType(); + return true; + } + + const auto *T1MPType = T1->getAs(); + const auto *T2MPType = T2->getAs(); + if (T1MPType && T2MPType && + hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), + QualType(T2MPType->getClass(), 0))) { + T1 = T1MPType->getPointeeType(); + T2 = T2MPType->getPointeeType(); + return true; + } + + if (getLangOpts().ObjC) { + const auto *T1OPType = T1->getAs(); + const auto *T2OPType = T2->getAs(); + if (T1OPType && T2OPType) { + T1 = T1OPType->getPointeeType(); + T2 = T2OPType->getPointeeType(); + return true; + } + } + + // FIXME: Block pointers, too? + + return false; + } + + bool ASTContext::hasSimilarType(QualType T1, QualType T2) { + while (true) { + Qualifiers Quals; + T1 = getUnqualifiedArrayType(T1, Quals); + T2 = getUnqualifiedArrayType(T2, Quals); + if (hasSameType(T1, T2)) + return true; + if (!UnwrapSimilarTypes(T1, T2)) + return false; + } + } + + bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { + while (true) { + Qualifiers Quals1, Quals2; + T1 = getUnqualifiedArrayType(T1, Quals1); + T2 = getUnqualifiedArrayType(T2, Quals2); + + Quals1.removeCVRQualifiers(); + Quals2.removeCVRQualifiers(); + if (Quals1 != Quals2) + return false; + + if (hasSameType(T1, T2)) + return true; + + if (!UnwrapSimilarTypes(T1, T2)) + return false; + } + } + + DeclarationNameInfo + ASTContext::getNameForTemplate(TemplateName Name, + SourceLocation NameLoc) const { + switch (Name.getKind()) { + case TemplateName::QualifiedTemplate: + case TemplateName::Template: + // DNInfo work in progress: CHECKME: what about DNLoc? + return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), + NameLoc); + + case TemplateName::OverloadedTemplate: { + OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); + // DNInfo work in progress: CHECKME: what about DNLoc? + return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); + } + + case TemplateName::AssumedTemplate: { + AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); + return DeclarationNameInfo(Storage->getDeclName(), NameLoc); + } + + case TemplateName::DependentTemplate: { + DependentTemplateName *DTN = Name.getAsDependentTemplateName(); + DeclarationName DName; + if (DTN->isIdentifier()) { + DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); + return DeclarationNameInfo(DName, NameLoc); + } else { + DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); + // DNInfo work in progress: FIXME: source locations? + DeclarationNameLoc DNLoc = + DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); + return DeclarationNameInfo(DName, NameLoc, DNLoc); + } + } + + case TemplateName::SubstTemplateTemplateParm: { + SubstTemplateTemplateParmStorage *subst + = Name.getAsSubstTemplateTemplateParm(); + return DeclarationNameInfo(subst->getParameter()->getDeclName(), + NameLoc); + } + + case TemplateName::SubstTemplateTemplateParmPack: { + SubstTemplateTemplateParmPackStorage *subst + = Name.getAsSubstTemplateTemplateParmPack(); + return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), + NameLoc); + } + } + + llvm_unreachable("bad template name kind!"); + } + + TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { + switch (Name.getKind()) { + case TemplateName::QualifiedTemplate: + case TemplateName::Template: { + TemplateDecl *Template = Name.getAsTemplateDecl(); + if (auto *TTP = dyn_cast(Template)) + Template = getCanonicalTemplateTemplateParmDecl(TTP); + + // The canonical template name is the canonical template declaration. + return TemplateName(cast(Template->getCanonicalDecl())); + } + + case TemplateName::OverloadedTemplate: + case TemplateName::AssumedTemplate: + llvm_unreachable("cannot canonicalize unresolved template"); + + case TemplateName::DependentTemplate: { + DependentTemplateName *DTN = Name.getAsDependentTemplateName(); + assert(DTN && "Non-dependent template names must refer to template decls."); + return DTN->CanonicalTemplateName; + } + + case TemplateName::SubstTemplateTemplateParm: { + SubstTemplateTemplateParmStorage *subst + = Name.getAsSubstTemplateTemplateParm(); + return getCanonicalTemplateName(subst->getReplacement()); + } + + case TemplateName::SubstTemplateTemplateParmPack: { + SubstTemplateTemplateParmPackStorage *subst + = Name.getAsSubstTemplateTemplateParmPack(); + TemplateTemplateParmDecl *canonParameter + = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); + TemplateArgument canonArgPack + = getCanonicalTemplateArgument(subst->getArgumentPack()); + return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); + } + } + + llvm_unreachable("bad template name!"); + } + + bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { + X = getCanonicalTemplateName(X); + Y = getCanonicalTemplateName(Y); + return X.getAsVoidPointer() == Y.getAsVoidPointer(); + } + + TemplateArgument + ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { + switch (Arg.getKind()) { + case TemplateArgument::Null: + return Arg; + + case TemplateArgument::Expression: + return Arg; + + case TemplateArgument::Declaration: { + auto *D = cast(Arg.getAsDecl()->getCanonicalDecl()); + return TemplateArgument(D, Arg.getParamTypeForDecl()); + } + + case TemplateArgument::NullPtr: + return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), + /*isNullPtr*/true); + + case TemplateArgument::Template: + return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); + + case TemplateArgument::TemplateExpansion: + return TemplateArgument(getCanonicalTemplateName( + Arg.getAsTemplateOrTemplatePattern()), + Arg.getNumTemplateExpansions()); + + case TemplateArgument::Integral: + return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); + + case TemplateArgument::Type: + return TemplateArgument(getCanonicalType(Arg.getAsType())); + + case TemplateArgument::Pack: { + if (Arg.pack_size() == 0) + return Arg; + + auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; + unsigned Idx = 0; + for (TemplateArgument::pack_iterator A = Arg.pack_begin(), + AEnd = Arg.pack_end(); + A != AEnd; (void)++A, ++Idx) + CanonArgs[Idx] = getCanonicalTemplateArgument(*A); + + return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); + } + } + + // Silence GCC warning + llvm_unreachable("Unhandled template argument kind"); + } + + NestedNameSpecifier * + ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { + if (!NNS) + return nullptr; + + switch (NNS->getKind()) { + case NestedNameSpecifier::Identifier: + // Canonicalize the prefix but keep the identifier the same. + return NestedNameSpecifier::Create(*this, + getCanonicalNestedNameSpecifier(NNS->getPrefix()), + NNS->getAsIdentifier()); + + case NestedNameSpecifier::Namespace: + // A namespace is canonical; build a nested-name-specifier with + // this namespace and no prefix. + return NestedNameSpecifier::Create(*this, nullptr, + NNS->getAsNamespace()->getOriginalNamespace()); + + case NestedNameSpecifier::NamespaceAlias: + // A namespace is canonical; build a nested-name-specifier with + // this namespace and no prefix. + return NestedNameSpecifier::Create(*this, nullptr, + NNS->getAsNamespaceAlias()->getNamespace() + ->getOriginalNamespace()); + + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: { + QualType T = getCanonicalType(QualType(NNS->getAsType(), 0)); + + // If we have some kind of dependent-named type (e.g., "typename T::type"), + // break it apart into its prefix and identifier, then reconsititute those + // as the canonical nested-name-specifier. This is required to canonicalize + // a dependent nested-name-specifier involving typedefs of dependent-name + // types, e.g., + // typedef typename T::type T1; + // typedef typename T1::type T2; + if (const auto *DNT = T->getAs()) + return NestedNameSpecifier::Create(*this, DNT->getQualifier(), + const_cast(DNT->getIdentifier())); + + // Otherwise, just canonicalize the type, and force it to be a TypeSpec. + // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the + // first place? + return NestedNameSpecifier::Create(*this, nullptr, false, + const_cast(T.getTypePtr())); + } + + case NestedNameSpecifier::Global: + case NestedNameSpecifier::Super: + // The global specifier and __super specifer are canonical and unique. + return NNS; + } + + llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); + } + + const ArrayType *ASTContext::getAsArrayType(QualType T) const { + // Handle the non-qualified case efficiently. + if (!T.hasLocalQualifiers()) { + // Handle the common positive case fast. + if (const auto *AT = dyn_cast(T)) + return AT; + } + + // Handle the common negative case fast. + if (!isa(T.getCanonicalType())) + return nullptr; + + // Apply any qualifiers from the array type to the element type. This + // implements C99 6.7.3p8: "If the specification of an array type includes + // any type qualifiers, the element type is so qualified, not the array type." + + // If we get here, we either have type qualifiers on the type, or we have + // sugar such as a typedef in the way. If we have type qualifiers on the type + // we must propagate them down into the element type. + + SplitQualType split = T.getSplitDesugaredType(); + Qualifiers qs = split.Quals; + + // If we have a simple case, just return now. + const auto *ATy = dyn_cast(split.Ty); + if (!ATy || qs.empty()) + return ATy; + + // Otherwise, we have an array and we have qualifiers on it. Push the + // qualifiers into the array element type and return a new array type. + QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); + + if (const auto *CAT = dyn_cast(ATy)) + return cast(getConstantArrayType(NewEltTy, CAT->getSize(), + CAT->getSizeExpr(), + CAT->getSizeModifier(), + CAT->getIndexTypeCVRQualifiers())); + if (const auto *IAT = dyn_cast(ATy)) + return cast(getIncompleteArrayType(NewEltTy, + IAT->getSizeModifier(), + IAT->getIndexTypeCVRQualifiers())); + + if (const auto *DSAT = dyn_cast(ATy)) + return cast( + getDependentSizedArrayType(NewEltTy, + DSAT->getSizeExpr(), + DSAT->getSizeModifier(), + DSAT->getIndexTypeCVRQualifiers(), + DSAT->getBracketsRange())); + + const auto *VAT = cast(ATy); + return cast(getVariableArrayType(NewEltTy, + VAT->getSizeExpr(), + VAT->getSizeModifier(), + VAT->getIndexTypeCVRQualifiers(), + VAT->getBracketsRange())); + } + + QualType ASTContext::getAdjustedParameterType(QualType T) const { + if (T->isArrayType() || T->isFunctionType()) + return getDecayedType(T); + return T; + } + + QualType ASTContext::getSignatureParameterType(QualType T) const { + T = getVariableArrayDecayedType(T); + T = getAdjustedParameterType(T); + return T.getUnqualifiedType(); + } + + QualType ASTContext::getExceptionObjectType(QualType T) const { + // C++ [except.throw]p3: + // A throw-expression initializes a temporary object, called the exception + // object, the type of which is determined by removing any top-level + // cv-qualifiers from the static type of the operand of throw and adjusting + // the type from "array of T" or "function returning T" to "pointer to T" + // or "pointer to function returning T", [...] + T = getVariableArrayDecayedType(T); + if (T->isArrayType() || T->isFunctionType()) + T = getDecayedType(T); + return T.getUnqualifiedType(); + } + + /// getArrayDecayedType - Return the properly qualified result of decaying the + /// specified array type to a pointer. This operation is non-trivial when + /// handling typedefs etc. The canonical type of "T" must be an array type, + /// this returns a pointer to a properly qualified element of the array. + /// + /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. + QualType ASTContext::getArrayDecayedType(QualType Ty) const { + // Get the element type with 'getAsArrayType' so that we don't lose any + // typedefs in the element type of the array. This also handles propagation + // of type qualifiers from the array type into the element type if present + // (C99 6.7.3p8). + const ArrayType *PrettyArrayType = getAsArrayType(Ty); + assert(PrettyArrayType && "Not an array type!"); + + QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); + + // int x[restrict 4] -> int *restrict + QualType Result = getQualifiedType(PtrTy, + PrettyArrayType->getIndexTypeQualifiers()); + + // int x[_Nullable] -> int * _Nullable + if (auto Nullability = Ty->getNullability(*this)) { + Result = const_cast(this)->getAttributedType( + AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); + } + return Result; + } + + QualType ASTContext::getBaseElementType(const ArrayType *array) const { + return getBaseElementType(array->getElementType()); + } + + QualType ASTContext::getBaseElementType(QualType type) const { + Qualifiers qs; + while (true) { + SplitQualType split = type.getSplitDesugaredType(); + const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); + if (!array) break; + + type = array->getElementType(); + qs.addConsistentQualifiers(split.Quals); + } + + return getQualifiedType(type, qs); + } + + /// getConstantArrayElementCount - Returns number of constant array elements. + uint64_t + ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { + uint64_t ElementCount = 1; + do { + ElementCount *= CA->getSize().getZExtValue(); + CA = dyn_cast_or_null( + CA->getElementType()->getAsArrayTypeUnsafe()); + } while (CA); + return ElementCount; + } + + /// getFloatingRank - Return a relative rank for floating point types. + /// This routine will assert if passed a built-in type that isn't a float. + static FloatingRank getFloatingRank(QualType T) { + if (const auto *CT = T->getAs()) + return getFloatingRank(CT->getElementType()); + + switch (T->castAs()->getKind()) { + default: llvm_unreachable("getFloatingRank(): not a floating type"); + case BuiltinType::Float16: return Float16Rank; + case BuiltinType::Half: return HalfRank; + case BuiltinType::Float: return FloatRank; + case BuiltinType::Double: return DoubleRank; + case BuiltinType::LongDouble: return LongDoubleRank; + case BuiltinType::Float128: return Float128Rank; + case BuiltinType::BFloat16: return BFloat16Rank; + } + } + + /// getFloatingTypeOfSizeWithinDomain - Returns a real floating + /// point or a complex type (based on typeDomain/typeSize). + /// 'typeDomain' is a real floating point or complex type. + /// 'typeSize' is a real floating point or complex type. + QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, + QualType Domain) const { + FloatingRank EltRank = getFloatingRank(Size); + if (Domain->isComplexType()) { + switch (EltRank) { + case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported"); + case Float16Rank: + case HalfRank: llvm_unreachable("Complex half is not supported"); + case FloatRank: return FloatComplexTy; + case DoubleRank: return DoubleComplexTy; + case LongDoubleRank: return LongDoubleComplexTy; + case Float128Rank: return Float128ComplexTy; + } + } + + assert(Domain->isRealFloatingType() && "Unknown domain!"); + switch (EltRank) { + case Float16Rank: return HalfTy; + case BFloat16Rank: return BFloat16Ty; + case HalfRank: return HalfTy; + case FloatRank: return FloatTy; + case DoubleRank: return DoubleTy; + case LongDoubleRank: return LongDoubleTy; + case Float128Rank: return Float128Ty; + } + llvm_unreachable("getFloatingRank(): illegal value for rank"); + } + + /// getFloatingTypeOrder - Compare the rank of the two specified floating + /// point types, ignoring the domain of the type (i.e. 'double' == + /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If + /// LHS < RHS, return -1. + int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { + FloatingRank LHSR = getFloatingRank(LHS); + FloatingRank RHSR = getFloatingRank(RHS); + + if (LHSR == RHSR) + return 0; + if (LHSR > RHSR) + return 1; + return -1; + } + + int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { + if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) + return 0; + return getFloatingTypeOrder(LHS, RHS); + } + + /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This + /// routine will assert if passed a built-in type that isn't an integer or enum, + /// or if it is not canonicalized. + unsigned ASTContext::getIntegerRank(const Type *T) const { + assert(T->isCanonicalUnqualified() && "T should be canonicalized"); + + // Results in this 'losing' to any type of the same size, but winning if + // larger. + if (const auto *EIT = dyn_cast(T)) + return 0 + (EIT->getNumBits() << 3); + + switch (cast(T)->getKind()) { + default: llvm_unreachable("getIntegerRank(): not a built-in integer"); + case BuiltinType::Bool: + return 1 + (getIntWidth(BoolTy) << 3); + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::SChar: + case BuiltinType::UChar: + return 2 + (getIntWidth(CharTy) << 3); + case BuiltinType::Short: + case BuiltinType::UShort: + return 3 + (getIntWidth(ShortTy) << 3); + case BuiltinType::Int: + case BuiltinType::UInt: + return 4 + (getIntWidth(IntTy) << 3); + case BuiltinType::Long: + case BuiltinType::ULong: + return 5 + (getIntWidth(LongTy) << 3); + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + return 6 + (getIntWidth(LongLongTy) << 3); + case BuiltinType::Int128: + case BuiltinType::UInt128: + return 7 + (getIntWidth(Int128Ty) << 3); + } + } + + /// Whether this is a promotable bitfield reference according + /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). + /// + /// \returns the type this bit-field will promote to, or NULL if no + /// promotion occurs. + QualType ASTContext::isPromotableBitField(Expr *E) const { + if (E->isTypeDependent() || E->isValueDependent()) + return {}; + + // C++ [conv.prom]p5: + // If the bit-field has an enumerated type, it is treated as any other + // value of that type for promotion purposes. + if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) + return {}; + + // FIXME: We should not do this unless E->refersToBitField() is true. This + // matters in C where getSourceBitField() will find bit-fields for various + // cases where the source expression is not a bit-field designator. + + FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? + if (!Field) + return {}; + + QualType FT = Field->getType(); + + uint64_t BitWidth = Field->getBitWidthValue(*this); + uint64_t IntSize = getTypeSize(IntTy); + // C++ [conv.prom]p5: + // A prvalue for an integral bit-field can be converted to a prvalue of type + // int if int can represent all the values of the bit-field; otherwise, it + // can be converted to unsigned int if unsigned int can represent all the + // values of the bit-field. If the bit-field is larger yet, no integral + // promotion applies to it. + // C11 6.3.1.1/2: + // [For a bit-field of type _Bool, int, signed int, or unsigned int:] + // If an int can represent all values of the original type (as restricted by + // the width, for a bit-field), the value is converted to an int; otherwise, + // it is converted to an unsigned int. + // + // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. + // We perform that promotion here to match GCC and C++. + // FIXME: C does not permit promotion of an enum bit-field whose rank is + // greater than that of 'int'. We perform that promotion to match GCC. + if (BitWidth < IntSize) + return IntTy; + + if (BitWidth == IntSize) + return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; + + // Bit-fields wider than int are not subject to promotions, and therefore act + // like the base type. GCC has some weird bugs in this area that we + // deliberately do not follow (GCC follows a pre-standard resolution to + // C's DR315 which treats bit-width as being part of the type, and this leaks + // into their semantics in some cases). + return {}; + } + + /// getPromotedIntegerType - Returns the type that Promotable will + /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable + /// integer type. + QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { + assert(!Promotable.isNull()); + assert(Promotable->isPromotableIntegerType()); + if (const auto *ET = Promotable->getAs()) + return ET->getDecl()->getPromotionType(); + + if (const auto *BT = Promotable->getAs()) { + // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t + // (3.9.1) can be converted to a prvalue of the first of the following + // types that can represent all the values of its underlying type: + // int, unsigned int, long int, unsigned long int, long long int, or + // unsigned long long int [...] + // FIXME: Is there some better way to compute this? + if (BT->getKind() == BuiltinType::WChar_S || + BT->getKind() == BuiltinType::WChar_U || + BT->getKind() == BuiltinType::Char8 || + BT->getKind() == BuiltinType::Char16 || + BT->getKind() == BuiltinType::Char32) { + bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; + uint64_t FromSize = getTypeSize(BT); + QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, + LongLongTy, UnsignedLongLongTy }; + for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { + uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); + if (FromSize < ToSize || + (FromSize == ToSize && + FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) + return PromoteTypes[Idx]; + } + llvm_unreachable("char type should fit into long long"); + } + } + + // At this point, we should have a signed or unsigned integer type. + if (Promotable->isSignedIntegerType()) + return IntTy; + uint64_t PromotableSize = getIntWidth(Promotable); + uint64_t IntSize = getIntWidth(IntTy); + assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); + return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; + } + + /// Recurses in pointer/array types until it finds an objc retainable + /// type and returns its ownership. + Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { + while (!T.isNull()) { + if (T.getObjCLifetime() != Qualifiers::OCL_None) + return T.getObjCLifetime(); + if (T->isArrayType()) + T = getBaseElementType(T); + else if (const auto *PT = T->getAs()) + T = PT->getPointeeType(); + else if (const auto *RT = T->getAs()) + T = RT->getPointeeType(); + else + break; + } + + return Qualifiers::OCL_None; + } + + static const Type *getIntegerTypeForEnum(const EnumType *ET) { + // Incomplete enum types are not treated as integer types. + // FIXME: In C++, enum types are never integer types. + if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) + return ET->getDecl()->getIntegerType().getTypePtr(); + return nullptr; + } + + /// getIntegerTypeOrder - Returns the highest ranked integer type: + /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If + /// LHS < RHS, return -1. + int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { + const Type *LHSC = getCanonicalType(LHS).getTypePtr(); + const Type *RHSC = getCanonicalType(RHS).getTypePtr(); + + // Unwrap enums to their underlying type. + if (const auto *ET = dyn_cast(LHSC)) + LHSC = getIntegerTypeForEnum(ET); + if (const auto *ET = dyn_cast(RHSC)) + RHSC = getIntegerTypeForEnum(ET); + + if (LHSC == RHSC) return 0; + + bool LHSUnsigned = LHSC->isUnsignedIntegerType(); + bool RHSUnsigned = RHSC->isUnsignedIntegerType(); + + unsigned LHSRank = getIntegerRank(LHSC); + unsigned RHSRank = getIntegerRank(RHSC); + + if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. + if (LHSRank == RHSRank) return 0; + return LHSRank > RHSRank ? 1 : -1; + } + + // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. + if (LHSUnsigned) { + // If the unsigned [LHS] type is larger, return it. + if (LHSRank >= RHSRank) + return 1; + + // If the signed type can represent all values of the unsigned type, it + // wins. Because we are dealing with 2's complement and types that are + // powers of two larger than each other, this is always safe. + return -1; + } + + // If the unsigned [RHS] type is larger, return it. + if (RHSRank >= LHSRank) + return -1; + + // If the signed type can represent all values of the unsigned type, it + // wins. Because we are dealing with 2's complement and types that are + // powers of two larger than each other, this is always safe. + return 1; + } + + TypedefDecl *ASTContext::getCFConstantStringDecl() const { + if (CFConstantStringTypeDecl) + return CFConstantStringTypeDecl; + + assert(!CFConstantStringTagDecl && + "tag and typedef should be initialized together"); + CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); + CFConstantStringTagDecl->startDefinition(); + + struct { + QualType Type; + const char *Name; + } Fields[5]; + unsigned Count = 0; + + /// Objective-C ABI + /// + /// typedef struct __NSConstantString_tag { + /// const int *isa; + /// int flags; + /// const char *str; + /// long length; + /// } __NSConstantString; + /// + /// Swift ABI (4.1, 4.2) + /// + /// typedef struct __NSConstantString_tag { + /// uintptr_t _cfisa; + /// uintptr_t _swift_rc; + /// _Atomic(uint64_t) _cfinfoa; + /// const char *_ptr; + /// uint32_t _length; + /// } __NSConstantString; + /// + /// Swift ABI (5.0) + /// + /// typedef struct __NSConstantString_tag { + /// uintptr_t _cfisa; + /// uintptr_t _swift_rc; + /// _Atomic(uint64_t) _cfinfoa; + /// const char *_ptr; + /// uintptr_t _length; + /// } __NSConstantString; + + const auto CFRuntime = getLangOpts().CFRuntime; + if (static_cast(CFRuntime) < + static_cast(LangOptions::CoreFoundationABI::Swift)) { + Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; + Fields[Count++] = { IntTy, "flags" }; + Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; + Fields[Count++] = { LongTy, "length" }; + } else { + Fields[Count++] = { getUIntPtrType(), "_cfisa" }; + Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; + Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; + Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; + if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || + CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) + Fields[Count++] = { IntTy, "_ptr" }; + else + Fields[Count++] = { getUIntPtrType(), "_ptr" }; + } + + // Create fields + for (unsigned i = 0; i < Count; ++i) { + FieldDecl *Field = + FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), + SourceLocation(), &Idents.get(Fields[i].Name), + Fields[i].Type, /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); + Field->setAccess(AS_public); + CFConstantStringTagDecl->addDecl(Field); + } + + CFConstantStringTagDecl->completeDefinition(); + // This type is designed to be compatible with NSConstantString, but cannot + // use the same name, since NSConstantString is an interface. + auto tagType = getTagDeclType(CFConstantStringTagDecl); + CFConstantStringTypeDecl = + buildImplicitTypedef(tagType, "__NSConstantString"); + + return CFConstantStringTypeDecl; + } + + RecordDecl *ASTContext::getCFConstantStringTagDecl() const { + if (!CFConstantStringTagDecl) + getCFConstantStringDecl(); // Build the tag and the typedef. + return CFConstantStringTagDecl; + } + + // getCFConstantStringType - Return the type used for constant CFStrings. + QualType ASTContext::getCFConstantStringType() const { + return getTypedefType(getCFConstantStringDecl()); + } + + QualType ASTContext::getObjCSuperType() const { + if (ObjCSuperType.isNull()) { + RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); + TUDecl->addDecl(ObjCSuperTypeDecl); + ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); + } + return ObjCSuperType; + } + + void ASTContext::setCFConstantStringType(QualType T) { + const auto *TD = T->castAs(); + CFConstantStringTypeDecl = cast(TD->getDecl()); + const auto *TagType = + CFConstantStringTypeDecl->getUnderlyingType()->castAs(); + CFConstantStringTagDecl = TagType->getDecl(); + } + + QualType ASTContext::getBlockDescriptorType() const { + if (BlockDescriptorType) + return getTagDeclType(BlockDescriptorType); + + RecordDecl *RD; + // FIXME: Needs the FlagAppleBlock bit. + RD = buildImplicitRecord("__block_descriptor"); + RD->startDefinition(); + + QualType FieldTypes[] = { + UnsignedLongTy, + UnsignedLongTy, + }; + + static const char *const FieldNames[] = { + "reserved", + "Size" + }; + + for (size_t i = 0; i < 2; ++i) { + FieldDecl *Field = FieldDecl::Create( + *this, RD, SourceLocation(), SourceLocation(), + &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); + Field->setAccess(AS_public); + RD->addDecl(Field); + } + + RD->completeDefinition(); + + BlockDescriptorType = RD; + + return getTagDeclType(BlockDescriptorType); + } + + QualType ASTContext::getBlockDescriptorExtendedType() const { + if (BlockDescriptorExtendedType) + return getTagDeclType(BlockDescriptorExtendedType); + + RecordDecl *RD; + // FIXME: Needs the FlagAppleBlock bit. + RD = buildImplicitRecord("__block_descriptor_withcopydispose"); + RD->startDefinition(); + + QualType FieldTypes[] = { + UnsignedLongTy, + UnsignedLongTy, + getPointerType(VoidPtrTy), + getPointerType(VoidPtrTy) + }; + + static const char *const FieldNames[] = { + "reserved", + "Size", + "CopyFuncPtr", + "DestroyFuncPtr" + }; + + for (size_t i = 0; i < 4; ++i) { + FieldDecl *Field = FieldDecl::Create( + *this, RD, SourceLocation(), SourceLocation(), + &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, ICIS_NoInit); + Field->setAccess(AS_public); + RD->addDecl(Field); + } + + RD->completeDefinition(); + + BlockDescriptorExtendedType = RD; + return getTagDeclType(BlockDescriptorExtendedType); + } + + OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { + const auto *BT = dyn_cast(T); + + if (!BT) { + if (isa(T)) + return OCLTK_Pipe; + + return OCLTK_Default; + } + + switch (BT->getKind()) { + #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: \ + return OCLTK_Image; + #include "clang/Basic/OpenCLImageTypes.def" + + case BuiltinType::OCLClkEvent: + return OCLTK_ClkEvent; + + case BuiltinType::OCLEvent: + return OCLTK_Event; + + case BuiltinType::OCLQueue: + return OCLTK_Queue; + + case BuiltinType::OCLReserveID: + return OCLTK_ReserveID; + + case BuiltinType::OCLSampler: + return OCLTK_Sampler; + + default: + return OCLTK_Default; + } + } + + LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { + return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); + } + + /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" + /// requires copy/dispose. Note that this must match the logic + /// in buildByrefHelpers. + bool ASTContext::BlockRequiresCopying(QualType Ty, + const VarDecl *D) { + if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { + const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); + if (!copyExpr && record->hasTrivialDestructor()) return false; + + return true; + } + + // The block needs copy/destroy helpers if Ty is non-trivial to destructively + // move or destroy. + if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) + return true; + + if (!Ty->isObjCRetainableType()) return false; + + Qualifiers qs = Ty.getQualifiers(); + + // If we have lifetime, that dominates. + if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { + switch (lifetime) { + case Qualifiers::OCL_None: llvm_unreachable("impossible"); + + // These are just bits as far as the runtime is concerned. + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + return false; + + // These cases should have been taken care of when checking the type's + // non-triviality. + case Qualifiers::OCL_Weak: + case Qualifiers::OCL_Strong: + llvm_unreachable("impossible"); + } + llvm_unreachable("fell out of lifetime switch!"); + } + return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || + Ty->isObjCObjectPointerType()); + } + + bool ASTContext::getByrefLifetime(QualType Ty, + Qualifiers::ObjCLifetime &LifeTime, + bool &HasByrefExtendedLayout) const { + if (!getLangOpts().ObjC || + getLangOpts().getGC() != LangOptions::NonGC) + return false; + + HasByrefExtendedLayout = false; + if (Ty->isRecordType()) { + HasByrefExtendedLayout = true; + LifeTime = Qualifiers::OCL_None; + } else if ((LifeTime = Ty.getObjCLifetime())) { + // Honor the ARC qualifiers. + } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { + // The MRR rule. + LifeTime = Qualifiers::OCL_ExplicitNone; + } else { + LifeTime = Qualifiers::OCL_None; + } + return true; + } + + CanQualType ASTContext::getNSUIntegerType() const { + assert(Target && "Expected target to be initialized"); + const llvm::Triple &T = Target->getTriple(); + // Windows is LLP64 rather than LP64 + if (T.isOSWindows() && T.isArch64Bit()) + return UnsignedLongLongTy; + return UnsignedLongTy; + } + + CanQualType ASTContext::getNSIntegerType() const { + assert(Target && "Expected target to be initialized"); + const llvm::Triple &T = Target->getTriple(); + // Windows is LLP64 rather than LP64 + if (T.isOSWindows() && T.isArch64Bit()) + return LongLongTy; + return LongTy; + } + + TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { + if (!ObjCInstanceTypeDecl) + ObjCInstanceTypeDecl = + buildImplicitTypedef(getObjCIdType(), "instancetype"); + return ObjCInstanceTypeDecl; + } + + // This returns true if a type has been typedefed to BOOL: + // typedef BOOL; + static bool isTypeTypedefedAsBOOL(QualType T) { + if (const auto *TT = dyn_cast(T)) + if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) + return II->isStr("BOOL"); + + return false; + } + + /// getObjCEncodingTypeSize returns size of type for objective-c encoding + /// purpose. + CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { + if (!type->isIncompleteArrayType() && type->isIncompleteType()) + return CharUnits::Zero(); + + CharUnits sz = getTypeSizeInChars(type); + + // Make all integer and enum types at least as large as an int + if (sz.isPositive() && type->isIntegralOrEnumerationType()) + sz = std::max(sz, getTypeSizeInChars(IntTy)); + // Treat arrays as pointers, since that's how they're passed in. + else if (type->isArrayType()) + sz = getTypeSizeInChars(VoidPtrTy); + return sz; + } + + bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { + return getTargetInfo().getCXXABI().isMicrosoft() && + VD->isStaticDataMember() && + VD->getType()->isIntegralOrEnumerationType() && + !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); + } + + ASTContext::InlineVariableDefinitionKind + ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { + if (!VD->isInline()) + return InlineVariableDefinitionKind::None; + + // In almost all cases, it's a weak definition. + auto *First = VD->getFirstDecl(); + if (First->isInlineSpecified() || !First->isStaticDataMember()) + return InlineVariableDefinitionKind::Weak; + + // If there's a file-context declaration in this translation unit, it's a + // non-discardable definition. + for (auto *D : VD->redecls()) + if (D->getLexicalDeclContext()->isFileContext() && + !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) + return InlineVariableDefinitionKind::Strong; + + // If we've not seen one yet, we don't know. + return InlineVariableDefinitionKind::WeakUnknown; + } + + static std::string charUnitsToString(const CharUnits &CU) { + return llvm::itostr(CU.getQuantity()); + } + + /// getObjCEncodingForBlock - Return the encoded type for this block + /// declaration. + std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { + std::string S; + + const BlockDecl *Decl = Expr->getBlockDecl(); + QualType BlockTy = + Expr->getType()->castAs()->getPointeeType(); + QualType BlockReturnTy = BlockTy->castAs()->getReturnType(); + // Encode result type. + if (getLangOpts().EncodeExtendedBlockSig) + getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, + true /*Extended*/); + else + getObjCEncodingForType(BlockReturnTy, S); + // Compute size of all parameters. + // Start with computing size of a pointer in number of bytes. + // FIXME: There might(should) be a better way of doing this computation! + CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); + CharUnits ParmOffset = PtrSize; + for (auto PI : Decl->parameters()) { + QualType PType = PI->getType(); + CharUnits sz = getObjCEncodingTypeSize(PType); + if (sz.isZero()) + continue; + assert(sz.isPositive() && "BlockExpr - Incomplete param type"); + ParmOffset += sz; + } + // Size of the argument frame + S += charUnitsToString(ParmOffset); + // Block pointer and offset. + S += "@?0"; + + // Argument types. + ParmOffset = PtrSize; + for (auto PVDecl : Decl->parameters()) { + QualType PType = PVDecl->getOriginalType(); + if (const auto *AT = + dyn_cast(PType->getCanonicalTypeInternal())) { + // Use array's original type only if it has known number of + // elements. + if (!isa(AT)) + PType = PVDecl->getType(); + } else if (PType->isFunctionType()) + PType = PVDecl->getType(); + if (getLangOpts().EncodeExtendedBlockSig) + getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, + S, true /*Extended*/); + else + getObjCEncodingForType(PType, S); + S += charUnitsToString(ParmOffset); + ParmOffset += getObjCEncodingTypeSize(PType); + } + + return S; + } + + std::string + ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { + std::string S; + // Encode result type. + getObjCEncodingForType(Decl->getReturnType(), S); + CharUnits ParmOffset; + // Compute size of all parameters. + for (auto PI : Decl->parameters()) { + QualType PType = PI->getType(); + CharUnits sz = getObjCEncodingTypeSize(PType); + if (sz.isZero()) + continue; + + assert(sz.isPositive() && + "getObjCEncodingForFunctionDecl - Incomplete param type"); + ParmOffset += sz; + } + S += charUnitsToString(ParmOffset); + ParmOffset = CharUnits::Zero(); + + // Argument types. + for (auto PVDecl : Decl->parameters()) { + QualType PType = PVDecl->getOriginalType(); + if (const auto *AT = + dyn_cast(PType->getCanonicalTypeInternal())) { + // Use array's original type only if it has known number of + // elements. + if (!isa(AT)) + PType = PVDecl->getType(); + } else if (PType->isFunctionType()) + PType = PVDecl->getType(); + getObjCEncodingForType(PType, S); + S += charUnitsToString(ParmOffset); + ParmOffset += getObjCEncodingTypeSize(PType); + } + + return S; + } + + /// getObjCEncodingForMethodParameter - Return the encoded type for a single + /// method parameter or return type. If Extended, include class names and + /// block object types. + void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, + QualType T, std::string& S, + bool Extended) const { + // Encode type qualifer, 'in', 'inout', etc. for the parameter. + getObjCEncodingForTypeQualifier(QT, S); + // Encode parameter type. + ObjCEncOptions Options = ObjCEncOptions() + .setExpandPointedToStructures() + .setExpandStructures() + .setIsOutermostType(); + if (Extended) + Options.setEncodeBlockParameters().setEncodeClassNames(); + getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); + } + + /// getObjCEncodingForMethodDecl - Return the encoded type for this method + /// declaration. + std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, + bool Extended) const { + // FIXME: This is not very efficient. + // Encode return type. + std::string S; + getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), + Decl->getReturnType(), S, Extended); + // Compute size of all parameters. + // Start with computing size of a pointer in number of bytes. + // FIXME: There might(should) be a better way of doing this computation! + CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); + // The first two arguments (self and _cmd) are pointers; account for + // their size. + CharUnits ParmOffset = 2 * PtrSize; + for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), + E = Decl->sel_param_end(); PI != E; ++PI) { + QualType PType = (*PI)->getType(); + CharUnits sz = getObjCEncodingTypeSize(PType); + if (sz.isZero()) + continue; + + assert(sz.isPositive() && + "getObjCEncodingForMethodDecl - Incomplete param type"); + ParmOffset += sz; + } + S += charUnitsToString(ParmOffset); + S += "@0:"; + S += charUnitsToString(PtrSize); + + // Argument types. + ParmOffset = 2 * PtrSize; + for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), + E = Decl->sel_param_end(); PI != E; ++PI) { + const ParmVarDecl *PVDecl = *PI; + QualType PType = PVDecl->getOriginalType(); + if (const auto *AT = + dyn_cast(PType->getCanonicalTypeInternal())) { + // Use array's original type only if it has known number of + // elements. + if (!isa(AT)) + PType = PVDecl->getType(); + } else if (PType->isFunctionType()) + PType = PVDecl->getType(); + getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), + PType, S, Extended); + S += charUnitsToString(ParmOffset); + ParmOffset += getObjCEncodingTypeSize(PType); + } + + return S; + } + + ObjCPropertyImplDecl * + ASTContext::getObjCPropertyImplDeclForPropertyDecl( + const ObjCPropertyDecl *PD, + const Decl *Container) const { + if (!Container) + return nullptr; + if (const auto *CID = dyn_cast(Container)) { + for (auto *PID : CID->property_impls()) + if (PID->getPropertyDecl() == PD) + return PID; + } else { + const auto *OID = cast(Container); + for (auto *PID : OID->property_impls()) + if (PID->getPropertyDecl() == PD) + return PID; + } + return nullptr; + } + + /// getObjCEncodingForPropertyDecl - Return the encoded type for this + /// property declaration. If non-NULL, Container must be either an + /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be + /// NULL when getting encodings for protocol properties. + /// Property attributes are stored as a comma-delimited C string. The simple + /// attributes readonly and bycopy are encoded as single characters. The + /// parametrized attributes, getter=name, setter=name, and ivar=name, are + /// encoded as single characters, followed by an identifier. Property types + /// are also encoded as a parametrized attribute. The characters used to encode + /// these attributes are defined by the following enumeration: + /// @code + /// enum PropertyAttributes { + /// kPropertyReadOnly = 'R', // property is read-only. + /// kPropertyBycopy = 'C', // property is a copy of the value last assigned + /// kPropertyByref = '&', // property is a reference to the value last assigned + /// kPropertyDynamic = 'D', // property is dynamic + /// kPropertyGetter = 'G', // followed by getter selector name + /// kPropertySetter = 'S', // followed by setter selector name + /// kPropertyInstanceVariable = 'V' // followed by instance variable name + /// kPropertyType = 'T' // followed by old-style type encoding. + /// kPropertyWeak = 'W' // 'weak' property + /// kPropertyStrong = 'P' // property GC'able + /// kPropertyNonAtomic = 'N' // property non-atomic + /// }; + /// @endcode + std::string + ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, + const Decl *Container) const { + // Collect information from the property implementation decl(s). + bool Dynamic = false; + ObjCPropertyImplDecl *SynthesizePID = nullptr; + + if (ObjCPropertyImplDecl *PropertyImpDecl = + getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { + if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) + Dynamic = true; + else + SynthesizePID = PropertyImpDecl; + } + + // FIXME: This is not very efficient. + std::string S = "T"; + + // Encode result type. + // GCC has some special rules regarding encoding of properties which + // closely resembles encoding of ivars. + getObjCEncodingForPropertyType(PD->getType(), S); + + if (PD->isReadOnly()) { + S += ",R"; + if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) + S += ",C"; + if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) + S += ",&"; + if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) + S += ",W"; + } else { + switch (PD->getSetterKind()) { + case ObjCPropertyDecl::Assign: break; + case ObjCPropertyDecl::Copy: S += ",C"; break; + case ObjCPropertyDecl::Retain: S += ",&"; break; + case ObjCPropertyDecl::Weak: S += ",W"; break; + } + } + + // It really isn't clear at all what this means, since properties + // are "dynamic by default". + if (Dynamic) + S += ",D"; + + if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) + S += ",N"; + + if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { + S += ",G"; + S += PD->getGetterName().getAsString(); + } + + if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { + S += ",S"; + S += PD->getSetterName().getAsString(); + } + + if (SynthesizePID) { + const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); + S += ",V"; + S += OID->getNameAsString(); + } + + // FIXME: OBJCGC: weak & strong + return S; + } + + /// getLegacyIntegralTypeEncoding - + /// Another legacy compatibility encoding: 32-bit longs are encoded as + /// 'l' or 'L' , but not always. For typedefs, we need to use + /// 'i' or 'I' instead if encoding a struct field, or a pointer! + void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { + if (isa(PointeeTy.getTypePtr())) { + if (const auto *BT = PointeeTy->getAs()) { + if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) + PointeeTy = UnsignedIntTy; + else + if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) + PointeeTy = IntTy; + } + } + } + + void ASTContext::getObjCEncodingForType(QualType T, std::string& S, + const FieldDecl *Field, + QualType *NotEncodedT) const { + // We follow the behavior of gcc, expanding structures which are + // directly pointed to, and expanding embedded structures. Note that + // these rules are sufficient to prevent recursive encoding of the + // same type. + getObjCEncodingForTypeImpl(T, S, + ObjCEncOptions() + .setExpandPointedToStructures() + .setExpandStructures() + .setIsOutermostType(), + Field, NotEncodedT); + } + + void ASTContext::getObjCEncodingForPropertyType(QualType T, + std::string& S) const { + // Encode result type. + // GCC has some special rules regarding encoding of properties which + // closely resembles encoding of ivars. + getObjCEncodingForTypeImpl(T, S, + ObjCEncOptions() + .setExpandPointedToStructures() + .setExpandStructures() + .setIsOutermostType() + .setEncodingProperty(), + /*Field=*/nullptr); + } + + static char getObjCEncodingForPrimitiveType(const ASTContext *C, + const BuiltinType *BT) { + BuiltinType::Kind kind = BT->getKind(); + switch (kind) { + case BuiltinType::Void: return 'v'; + case BuiltinType::Bool: return 'B'; + case BuiltinType::Char8: + case BuiltinType::Char_U: + case BuiltinType::UChar: return 'C'; + case BuiltinType::Char16: + case BuiltinType::UShort: return 'S'; + case BuiltinType::Char32: + case BuiltinType::UInt: return 'I'; + case BuiltinType::ULong: + return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; + case BuiltinType::UInt128: return 'T'; + case BuiltinType::ULongLong: return 'Q'; + case BuiltinType::Char_S: + case BuiltinType::SChar: return 'c'; + case BuiltinType::Short: return 's'; + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Int: return 'i'; + case BuiltinType::Long: + return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; + case BuiltinType::LongLong: return 'q'; + case BuiltinType::Int128: return 't'; + case BuiltinType::Float: return 'f'; + case BuiltinType::Double: return 'd'; + case BuiltinType::LongDouble: return 'D'; + case BuiltinType::NullPtr: return '*'; // like char* + + case BuiltinType::BFloat16: + case BuiltinType::Float16: + case BuiltinType::Float128: + case BuiltinType::Half: + case BuiltinType::ShortAccum: + case BuiltinType::Accum: + case BuiltinType::LongAccum: + case BuiltinType::UShortAccum: + case BuiltinType::UAccum: + case BuiltinType::ULongAccum: + case BuiltinType::ShortFract: + case BuiltinType::Fract: + case BuiltinType::LongFract: + case BuiltinType::UShortFract: + case BuiltinType::UFract: + case BuiltinType::ULongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatAccum: + case BuiltinType::SatLongAccum: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUAccum: + case BuiltinType::SatULongAccum: + case BuiltinType::SatShortFract: + case BuiltinType::SatFract: + case BuiltinType::SatLongFract: + case BuiltinType::SatUShortFract: + case BuiltinType::SatUFract: + case BuiltinType::SatULongFract: + // FIXME: potentially need @encodes for these! + return ' '; + + #define SVE_TYPE(Name, Id, SingletonId) \ + case BuiltinType::Id: + #include "clang/Basic/AArch64SVEACLETypes.def" + #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: + #include "clang/Basic/RISCVVTypes.def" + { + DiagnosticsEngine &Diags = C->getDiagnostics(); + unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot yet @encode type %0"); + Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); + return ' '; + } + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + llvm_unreachable("@encoding ObjC primitive type"); + + // OpenCL and placeholder types don't need @encodings. + #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: + #include "clang/Basic/OpenCLImageTypes.def" + #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ + case BuiltinType::Id: + #include "clang/Basic/OpenCLExtensionTypes.def" + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: + case BuiltinType::OCLSampler: + case BuiltinType::Dependent: + #define PPC_VECTOR_TYPE(Name, Id, Size) \ + case BuiltinType::Id: + #include "clang/Basic/PPCTypes.def" + #define BUILTIN_TYPE(KIND, ID) + #define PLACEHOLDER_TYPE(KIND, ID) \ + case BuiltinType::KIND: + #include "clang/AST/BuiltinTypes.def" + llvm_unreachable("invalid builtin type for @encode"); + } + llvm_unreachable("invalid BuiltinType::Kind value"); + } + + static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { + EnumDecl *Enum = ET->getDecl(); + + // The encoding of an non-fixed enum type is always 'i', regardless of size. + if (!Enum->isFixed()) + return 'i'; + + // The encoding of a fixed enum type matches its fixed underlying type. + const auto *BT = Enum->getIntegerType()->castAs(); + return getObjCEncodingForPrimitiveType(C, BT); + } + + static void EncodeBitField(const ASTContext *Ctx, std::string& S, + QualType T, const FieldDecl *FD) { + assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); + S += 'b'; + // The NeXT runtime encodes bit fields as b followed by the number of bits. + // The GNU runtime requires more information; bitfields are encoded as b, + // then the offset (in bits) of the first element, then the type of the + // bitfield, then the size in bits. For example, in this structure: + // + // struct + // { + // int integer; + // int flags:2; + // }; + // On a 32-bit system, the encoding for flags would be b2 for the NeXT + // runtime, but b32i2 for the GNU runtime. The reason for this extra + // information is not especially sensible, but we're stuck with it for + // compatibility with GCC, although providing it breaks anything that + // actually uses runtime introspection and wants to work on both runtimes... + if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { + uint64_t Offset; + + if (const auto *IVD = dyn_cast(FD)) { + Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, + IVD); + } else { + const RecordDecl *RD = FD->getParent(); + const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); + Offset = RL.getFieldOffset(FD->getFieldIndex()); + } + + S += llvm::utostr(Offset); + + if (const auto *ET = T->getAs()) + S += ObjCEncodingForEnumType(Ctx, ET); + else { + const auto *BT = T->castAs(); + S += getObjCEncodingForPrimitiveType(Ctx, BT); + } + } + S += llvm::utostr(FD->getBitWidthValue(*Ctx)); + } + + // Helper function for determining whether the encoded type string would include + // a template specialization type. + static bool hasTemplateSpecializationInEncodedString(const Type *T, + bool VisitBasesAndFields) { + T = T->getBaseElementTypeUnsafe(); + + if (auto *PT = T->getAs()) + return hasTemplateSpecializationInEncodedString( + PT->getPointeeType().getTypePtr(), false); + + auto *CXXRD = T->getAsCXXRecordDecl(); + + if (!CXXRD) + return false; + + if (isa(CXXRD)) + return true; + + if (!CXXRD->hasDefinition() || !VisitBasesAndFields) + return false; + + for (auto B : CXXRD->bases()) + if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), + true)) + return true; + + for (auto *FD : CXXRD->fields()) + if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), + true)) + return true; + + return false; + } + + // FIXME: Use SmallString for accumulating string. + void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, + const ObjCEncOptions Options, + const FieldDecl *FD, + QualType *NotEncodedT) const { + CanQualType CT = getCanonicalType(T); + switch (CT->getTypeClass()) { + case Type::Builtin: + case Type::Enum: + if (FD && FD->isBitField()) + return EncodeBitField(this, S, T, FD); + if (const auto *BT = dyn_cast(CT)) + S += getObjCEncodingForPrimitiveType(this, BT); + else + S += ObjCEncodingForEnumType(this, cast(CT)); + return; + + case Type::Complex: + S += 'j'; + getObjCEncodingForTypeImpl(T->castAs()->getElementType(), S, + ObjCEncOptions(), + /*Field=*/nullptr); + return; + + case Type::Atomic: + S += 'A'; + getObjCEncodingForTypeImpl(T->castAs()->getValueType(), S, + ObjCEncOptions(), + /*Field=*/nullptr); + return; + + // encoding for pointer or reference types. + case Type::Pointer: + case Type::LValueReference: + case Type::RValueReference: { + QualType PointeeTy; + if (isa(CT)) { + const auto *PT = T->castAs(); + if (PT->isObjCSelType()) { + S += ':'; + return; + } + PointeeTy = PT->getPointeeType(); + } else { + PointeeTy = T->castAs()->getPointeeType(); + } + + bool isReadOnly = false; + // For historical/compatibility reasons, the read-only qualifier of the + // pointee gets emitted _before_ the '^'. The read-only qualifier of + // the pointer itself gets ignored, _unless_ we are looking at a typedef! + // Also, do not emit the 'r' for anything but the outermost type! + if (isa(T.getTypePtr())) { + if (Options.IsOutermostType() && T.isConstQualified()) { + isReadOnly = true; + S += 'r'; + } + } else if (Options.IsOutermostType()) { + QualType P = PointeeTy; + while (auto PT = P->getAs()) + P = PT->getPointeeType(); + if (P.isConstQualified()) { + isReadOnly = true; + S += 'r'; + } + } + if (isReadOnly) { + // Another legacy compatibility encoding. Some ObjC qualifier and type + // combinations need to be rearranged. + // Rewrite "in const" from "nr" to "rn" + if (StringRef(S).endswith("nr")) + S.replace(S.end()-2, S.end(), "rn"); + } + + if (PointeeTy->isCharType()) { + // char pointer types should be encoded as '*' unless it is a + // type that has been typedef'd to 'BOOL'. + if (!isTypeTypedefedAsBOOL(PointeeTy)) { + S += '*'; + return; + } + } else if (const auto *RTy = PointeeTy->getAs()) { + // GCC binary compat: Need to convert "struct objc_class *" to "#". + if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { + S += '#'; + return; + } + // GCC binary compat: Need to convert "struct objc_object *" to "@". + if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { + S += '@'; + return; + } + // If the encoded string for the class includes template names, just emit + // "^v" for pointers to the class. + if (getLangOpts().CPlusPlus && + (!getLangOpts().EncodeCXXClassTemplateSpec && + hasTemplateSpecializationInEncodedString( + RTy, Options.ExpandPointedToStructures()))) { + S += "^v"; + return; + } + // fall through... + } + S += '^'; + getLegacyIntegralTypeEncoding(PointeeTy); + + ObjCEncOptions NewOptions; + if (Options.ExpandPointedToStructures()) + NewOptions.setExpandStructures(); + getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, + /*Field=*/nullptr, NotEncodedT); + return; + } + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: { + const auto *AT = cast(CT); + + if (isa(AT) && !Options.IsStructField()) { + // Incomplete arrays are encoded as a pointer to the array element. + S += '^'; + + getObjCEncodingForTypeImpl( + AT->getElementType(), S, + Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); + } else { + S += '['; + + if (const auto *CAT = dyn_cast(AT)) + S += llvm::utostr(CAT->getSize().getZExtValue()); + else { + //Variable length arrays are encoded as a regular array with 0 elements. + assert((isa(AT) || isa(AT)) && + "Unknown array type!"); + S += '0'; + } + + getObjCEncodingForTypeImpl( + AT->getElementType(), S, + Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, + NotEncodedT); + S += ']'; + } + return; + } + + case Type::FunctionNoProto: + case Type::FunctionProto: + S += '?'; + return; + + case Type::Record: { + RecordDecl *RDecl = cast(CT)->getDecl(); + S += RDecl->isUnion() ? '(' : '{'; + // Anonymous structures print as '?' + if (const IdentifierInfo *II = RDecl->getIdentifier()) { + S += II->getName(); + if (const auto *Spec = dyn_cast(RDecl)) { + const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); + llvm::raw_string_ostream OS(S); + printTemplateArgumentList(OS, TemplateArgs.asArray(), + getPrintingPolicy()); + } + } else { + S += '?'; + } + if (Options.ExpandStructures()) { + S += '='; + if (!RDecl->isUnion()) { + getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); + } else { + for (const auto *Field : RDecl->fields()) { + if (FD) { + S += '"'; + S += Field->getNameAsString(); + S += '"'; + } + + // Special case bit-fields. + if (Field->isBitField()) { + getObjCEncodingForTypeImpl(Field->getType(), S, + ObjCEncOptions().setExpandStructures(), + Field); + } else { + QualType qt = Field->getType(); + getLegacyIntegralTypeEncoding(qt); + getObjCEncodingForTypeImpl( + qt, S, + ObjCEncOptions().setExpandStructures().setIsStructField(), FD, + NotEncodedT); + } + } + } + } + S += RDecl->isUnion() ? ')' : '}'; + return; + } + + case Type::BlockPointer: { + const auto *BT = T->castAs(); + S += "@?"; // Unlike a pointer-to-function, which is "^?". + if (Options.EncodeBlockParameters()) { + const auto *FT = BT->getPointeeType()->castAs(); + + S += '<'; + // Block return type + getObjCEncodingForTypeImpl(FT->getReturnType(), S, + Options.forComponentType(), FD, NotEncodedT); + // Block self + S += "@?"; + // Block parameters + if (const auto *FPT = dyn_cast(FT)) { + for (const auto &I : FPT->param_types()) + getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, + NotEncodedT); + } + S += '>'; + } + return; + } + + case Type::ObjCObject: { + // hack to match legacy encoding of *id and *Class + QualType Ty = getObjCObjectPointerType(CT); + if (Ty->isObjCIdType()) { + S += "{objc_object=}"; + return; + } + else if (Ty->isObjCClassType()) { + S += "{objc_class=}"; + return; + } + // TODO: Double check to make sure this intentionally falls through. + LLVM_FALLTHROUGH; + } + + case Type::ObjCInterface: { + // Ignore protocol qualifiers when mangling at this level. + // @encode(class_name) + ObjCInterfaceDecl *OI = T->castAs()->getInterface(); + S += '{'; + S += OI->getObjCRuntimeNameAsString(); + if (Options.ExpandStructures()) { + S += '='; + SmallVector Ivars; + DeepCollectObjCIvars(OI, true, Ivars); + for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { + const FieldDecl *Field = Ivars[i]; + if (Field->isBitField()) + getObjCEncodingForTypeImpl(Field->getType(), S, + ObjCEncOptions().setExpandStructures(), + Field); + else + getObjCEncodingForTypeImpl(Field->getType(), S, + ObjCEncOptions().setExpandStructures(), FD, + NotEncodedT); + } + } + S += '}'; + return; + } + + case Type::ObjCObjectPointer: { + const auto *OPT = T->castAs(); + if (OPT->isObjCIdType()) { + S += '@'; + return; + } + + if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { + // FIXME: Consider if we need to output qualifiers for 'Class

'. + // Since this is a binary compatibility issue, need to consult with + // runtime folks. Fortunately, this is a *very* obscure construct. + S += '#'; + return; + } + + if (OPT->isObjCQualifiedIdType()) { + getObjCEncodingForTypeImpl( + getObjCIdType(), S, + Options.keepingOnly(ObjCEncOptions() + .setExpandPointedToStructures() + .setExpandStructures()), + FD); + if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { + // Note that we do extended encoding of protocol qualifer list + // Only when doing ivar or property encoding. + S += '"'; + for (const auto *I : OPT->quals()) { + S += '<'; + S += I->getObjCRuntimeNameAsString(); + S += '>'; + } + S += '"'; + } + return; + } + + S += '@'; + if (OPT->getInterfaceDecl() && + (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { + S += '"'; + S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); + for (const auto *I : OPT->quals()) { + S += '<'; + S += I->getObjCRuntimeNameAsString(); + S += '>'; + } + S += '"'; + } + return; + } + + // gcc just blithely ignores member pointers. + // FIXME: we should do better than that. 'M' is available. + case Type::MemberPointer: + // This matches gcc's encoding, even though technically it is insufficient. + //FIXME. We should do a better job than gcc. + case Type::Vector: + case Type::ExtVector: + // Until we have a coherent encoding of these three types, issue warning. + if (NotEncodedT) + *NotEncodedT = T; + return; + + case Type::ConstantMatrix: + if (NotEncodedT) + *NotEncodedT = T; + return; + + // We could see an undeduced auto type here during error recovery. + // Just ignore it. + case Type::Auto: + case Type::DeducedTemplateSpecialization: + return; + + case Type::Pipe: + case Type::ExtInt: + #define ABSTRACT_TYPE(KIND, BASE) + #define TYPE(KIND, BASE) + #define DEPENDENT_TYPE(KIND, BASE) \ + case Type::KIND: + #define NON_CANONICAL_TYPE(KIND, BASE) \ + case Type::KIND: + #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ + case Type::KIND: + #include "clang/AST/TypeNodes.inc" + llvm_unreachable("@encode for dependent type!"); + } + llvm_unreachable("bad type kind!"); + } + + void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, + std::string &S, + const FieldDecl *FD, + bool includeVBases, + QualType *NotEncodedT) const { + assert(RDecl && "Expected non-null RecordDecl"); + assert(!RDecl->isUnion() && "Should not be called for unions"); + if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) + return; + + const auto *CXXRec = dyn_cast(RDecl); + std::multimap FieldOrBaseOffsets; + const ASTRecordLayout &layout = getASTRecordLayout(RDecl); + + if (CXXRec) { + for (const auto &BI : CXXRec->bases()) { + if (!BI.isVirtual()) { + CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); + if (base->isEmpty()) + continue; + uint64_t offs = toBits(layout.getBaseClassOffset(base)); + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), + std::make_pair(offs, base)); + } + } + } + + unsigned i = 0; + for (FieldDecl *Field : RDecl->fields()) { + if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) + continue; + uint64_t offs = layout.getFieldOffset(i); + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), + std::make_pair(offs, Field)); + ++i; + } + + if (CXXRec && includeVBases) { + for (const auto &BI : CXXRec->vbases()) { + CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); + if (base->isEmpty()) + continue; + uint64_t offs = toBits(layout.getVBaseClassOffset(base)); + if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && + FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), + std::make_pair(offs, base)); + } + } + + CharUnits size; + if (CXXRec) { + size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); + } else { + size = layout.getSize(); + } + + #ifndef NDEBUG + uint64_t CurOffs = 0; + #endif + std::multimap::iterator + CurLayObj = FieldOrBaseOffsets.begin(); + + if (CXXRec && CXXRec->isDynamicClass() && + (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { + if (FD) { + S += "\"_vptr$"; + std::string recname = CXXRec->getNameAsString(); + if (recname.empty()) recname = "?"; + S += recname; + S += '"'; + } + S += "^^?"; + #ifndef NDEBUG + CurOffs += getTypeSize(VoidPtrTy); + #endif + } + + if (!RDecl->hasFlexibleArrayMember()) { + // Mark the end of the structure. + uint64_t offs = toBits(size); + FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), + std::make_pair(offs, nullptr)); + } + + for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { + #ifndef NDEBUG + assert(CurOffs <= CurLayObj->first); + if (CurOffs < CurLayObj->first) { + uint64_t padding = CurLayObj->first - CurOffs; + // FIXME: There doesn't seem to be a way to indicate in the encoding that + // packing/alignment of members is different that normal, in which case + // the encoding will be out-of-sync with the real layout. + // If the runtime switches to just consider the size of types without + // taking into account alignment, we could make padding explicit in the + // encoding (e.g. using arrays of chars). The encoding strings would be + // longer then though. + CurOffs += padding; + } + #endif + + NamedDecl *dcl = CurLayObj->second; + if (!dcl) + break; // reached end of structure. + + if (auto *base = dyn_cast(dcl)) { + // We expand the bases without their virtual bases since those are going + // in the initial structure. Note that this differs from gcc which + // expands virtual bases each time one is encountered in the hierarchy, + // making the encoding type bigger than it really is. + getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, + NotEncodedT); + assert(!base->isEmpty()); + #ifndef NDEBUG + CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); + #endif + } else { + const auto *field = cast(dcl); + if (FD) { + S += '"'; + S += field->getNameAsString(); + S += '"'; + } + + if (field->isBitField()) { + EncodeBitField(this, S, field->getType(), field); + #ifndef NDEBUG + CurOffs += field->getBitWidthValue(*this); + #endif + } else { + QualType qt = field->getType(); + getLegacyIntegralTypeEncoding(qt); + getObjCEncodingForTypeImpl( + qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), + FD, NotEncodedT); + #ifndef NDEBUG + CurOffs += getTypeSize(field->getType()); + #endif + } + } + } + } + + void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, + std::string& S) const { + if (QT & Decl::OBJC_TQ_In) + S += 'n'; + if (QT & Decl::OBJC_TQ_Inout) + S += 'N'; + if (QT & Decl::OBJC_TQ_Out) + S += 'o'; + if (QT & Decl::OBJC_TQ_Bycopy) + S += 'O'; + if (QT & Decl::OBJC_TQ_Byref) + S += 'R'; + if (QT & Decl::OBJC_TQ_Oneway) + S += 'V'; + } + + TypedefDecl *ASTContext::getObjCIdDecl() const { + if (!ObjCIdDecl) { + QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); + T = getObjCObjectPointerType(T); + ObjCIdDecl = buildImplicitTypedef(T, "id"); + } + return ObjCIdDecl; + } + + TypedefDecl *ASTContext::getObjCSelDecl() const { + if (!ObjCSelDecl) { + QualType T = getPointerType(ObjCBuiltinSelTy); + ObjCSelDecl = buildImplicitTypedef(T, "SEL"); + } + return ObjCSelDecl; + } + + TypedefDecl *ASTContext::getObjCClassDecl() const { + if (!ObjCClassDecl) { + QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); + T = getObjCObjectPointerType(T); + ObjCClassDecl = buildImplicitTypedef(T, "Class"); + } + return ObjCClassDecl; + } + + ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { + if (!ObjCProtocolClassDecl) { + ObjCProtocolClassDecl + = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), + &Idents.get("Protocol"), + /*typeParamList=*/nullptr, + /*PrevDecl=*/nullptr, + SourceLocation(), true); + } + + return ObjCProtocolClassDecl; + } + + //===----------------------------------------------------------------------===// + // __builtin_va_list Construction Functions + //===----------------------------------------------------------------------===// + + static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, + StringRef Name) { + // typedef char* __builtin[_ms]_va_list; + QualType T = Context->getPointerType(Context->CharTy); + return Context->buildImplicitTypedef(T, Name); + } + + static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { + return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); + } + + static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { + return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); + } + + static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { + // typedef void* __builtin_va_list; + QualType T = Context->getPointerType(Context->VoidTy); + return Context->buildImplicitTypedef(T, "__builtin_va_list"); + } + + static TypedefDecl * + CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { + // struct __va_list + RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); + if (Context->getLangOpts().CPlusPlus) { + // namespace std { struct __va_list { + NamespaceDecl *NS; + NS = NamespaceDecl::Create(const_cast(*Context), + Context->getTranslationUnitDecl(), + /*Inline*/ false, SourceLocation(), + SourceLocation(), &Context->Idents.get("std"), + /*PrevDecl*/ nullptr); + NS->setImplicit(); + VaListTagDecl->setDeclContext(NS); + } + + VaListTagDecl->startDefinition(); + + const size_t NumFields = 5; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // void *__stack; + FieldTypes[0] = Context->getPointerType(Context->VoidTy); + FieldNames[0] = "__stack"; + + // void *__gr_top; + FieldTypes[1] = Context->getPointerType(Context->VoidTy); + FieldNames[1] = "__gr_top"; + + // void *__vr_top; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "__vr_top"; + + // int __gr_offs; + FieldTypes[3] = Context->IntTy; + FieldNames[3] = "__gr_offs"; + + // int __vr_offs; + FieldTypes[4] = Context->IntTy; + FieldNames[4] = "__vr_offs"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(const_cast(*Context), + VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // } __builtin_va_list; + return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); + } + + static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + + VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 5; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // unsigned char gpr; + FieldTypes[0] = Context->UnsignedCharTy; + FieldNames[0] = "gpr"; + + // unsigned char fpr; + FieldTypes[1] = Context->UnsignedCharTy; + FieldNames[1] = "fpr"; + + // unsigned short reserved; + FieldTypes[2] = Context->UnsignedShortTy; + FieldNames[2] = "reserved"; + + // void* overflow_arg_area; + FieldTypes[3] = Context->getPointerType(Context->VoidTy); + FieldNames[3] = "overflow_arg_area"; + + // void* reg_save_area; + FieldTypes[4] = Context->getPointerType(Context->VoidTy); + FieldNames[4] = "reg_save_area"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl = + Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); + + QualType VaListTagTypedefType = + Context->getTypedefType(VaListTagTypedefDecl); + + // typedef __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType + = Context->getConstantArrayType(VaListTagTypedefType, + Size, nullptr, ArrayType::Normal, 0); + return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); + } + + static TypedefDecl * + CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { + // struct __va_list_tag { + RecordDecl *VaListTagDecl; + VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 4; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // unsigned gp_offset; + FieldTypes[0] = Context->UnsignedIntTy; + FieldNames[0] = "gp_offset"; + + // unsigned fp_offset; + FieldTypes[1] = Context->UnsignedIntTy; + FieldNames[1] = "fp_offset"; + + // void* overflow_arg_area; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "overflow_arg_area"; + + // void* reg_save_area; + FieldTypes[3] = Context->getPointerType(Context->VoidTy); + FieldNames[3] = "reg_save_area"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(const_cast(*Context), + VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // }; + + // typedef struct __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType = Context->getConstantArrayType( + VaListTagType, Size, nullptr, ArrayType::Normal, 0); + return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); + } + + static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { + // typedef int __builtin_va_list[4]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); + QualType IntArrayType = Context->getConstantArrayType( + Context->IntTy, Size, nullptr, ArrayType::Normal, 0); + return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); + } + + static TypedefDecl * + CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { + // struct __va_list + RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); + if (Context->getLangOpts().CPlusPlus) { + // namespace std { struct __va_list { + NamespaceDecl *NS; + NS = NamespaceDecl::Create(const_cast(*Context), + Context->getTranslationUnitDecl(), + /*Inline*/false, SourceLocation(), + SourceLocation(), &Context->Idents.get("std"), + /*PrevDecl*/ nullptr); + NS->setImplicit(); + VaListDecl->setDeclContext(NS); + } + + VaListDecl->startDefinition(); + + // void * __ap; + FieldDecl *Field = FieldDecl::Create(const_cast(*Context), + VaListDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get("__ap"), + Context->getPointerType(Context->VoidTy), + /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListDecl->addDecl(Field); + + // }; + VaListDecl->completeDefinition(); + Context->VaListTagDecl = VaListDecl; + + // typedef struct __va_list __builtin_va_list; + QualType T = Context->getRecordType(VaListDecl); + return Context->buildImplicitTypedef(T, "__builtin_va_list"); + } + + static TypedefDecl * + CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { + // struct __va_list_tag { + RecordDecl *VaListTagDecl; + VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 4; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // long __gpr; + FieldTypes[0] = Context->LongTy; + FieldNames[0] = "__gpr"; + + // long __fpr; + FieldTypes[1] = Context->LongTy; + FieldNames[1] = "__fpr"; + + // void *__overflow_arg_area; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "__overflow_arg_area"; + + // void *__reg_save_area; + FieldTypes[3] = Context->getPointerType(Context->VoidTy); + FieldNames[3] = "__reg_save_area"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create(const_cast(*Context), + VaListTagDecl, + SourceLocation(), + SourceLocation(), + &Context->Idents.get(FieldNames[i]), + FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, + ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // }; + + // typedef __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType = Context->getConstantArrayType( + VaListTagType, Size, nullptr, ArrayType::Normal, 0); + + return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); + } + + static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 3; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // void *CurrentSavedRegisterArea; + FieldTypes[0] = Context->getPointerType(Context->VoidTy); + FieldNames[0] = "__current_saved_reg_area_pointer"; + + // void *SavedRegAreaEnd; + FieldTypes[1] = Context->getPointerType(Context->VoidTy); + FieldNames[1] = "__saved_reg_area_end_pointer"; + + // void *OverflowArea; + FieldTypes[2] = Context->getPointerType(Context->VoidTy); + FieldNames[2] = "__overflow_area_pointer"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create( + const_cast(*Context), VaListTagDecl, SourceLocation(), + SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], + /*TInfo=*/0, + /*BitWidth=*/0, + /*Mutable=*/false, ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl = + Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); + + QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); + + // typedef __va_list_tag __builtin_va_list[1]; + llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); + QualType VaListTagArrayType = Context->getConstantArrayType( + VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); + + return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); + } + + static TypedefDecl *CreateVaListDecl(const ASTContext *Context, + TargetInfo::BuiltinVaListKind Kind) { + switch (Kind) { + case TargetInfo::CharPtrBuiltinVaList: + return CreateCharPtrBuiltinVaListDecl(Context); + case TargetInfo::VoidPtrBuiltinVaList: + return CreateVoidPtrBuiltinVaListDecl(Context); + case TargetInfo::AArch64ABIBuiltinVaList: + return CreateAArch64ABIBuiltinVaListDecl(Context); + case TargetInfo::PowerABIBuiltinVaList: + return CreatePowerABIBuiltinVaListDecl(Context); + case TargetInfo::X86_64ABIBuiltinVaList: + return CreateX86_64ABIBuiltinVaListDecl(Context); + case TargetInfo::PNaClABIBuiltinVaList: + return CreatePNaClABIBuiltinVaListDecl(Context); + case TargetInfo::AAPCSABIBuiltinVaList: + return CreateAAPCSABIBuiltinVaListDecl(Context); + case TargetInfo::SystemZBuiltinVaList: + return CreateSystemZBuiltinVaListDecl(Context); + case TargetInfo::HexagonBuiltinVaList: + return CreateHexagonBuiltinVaListDecl(Context); + } + + llvm_unreachable("Unhandled __builtin_va_list type kind"); + } + + TypedefDecl *ASTContext::getBuiltinVaListDecl() const { + if (!BuiltinVaListDecl) { + BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); + assert(BuiltinVaListDecl->isImplicit()); + } + + return BuiltinVaListDecl; + } + + Decl *ASTContext::getVaListTagDecl() const { + // Force the creation of VaListTagDecl by building the __builtin_va_list + // declaration. + if (!VaListTagDecl) + (void)getBuiltinVaListDecl(); + + return VaListTagDecl; + } + + TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { + if (!BuiltinMSVaListDecl) + BuiltinMSVaListDecl = CreateMSVaListDecl(this); + + return BuiltinMSVaListDecl; + } + + bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { + return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); + } + + void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { + assert(ObjCConstantStringType.isNull() && + "'NSConstantString' type already set!"); + + ObjCConstantStringType = getObjCInterfaceType(Decl); + } + + /// Retrieve the template name that corresponds to a non-empty + /// lookup. + TemplateName + ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, + UnresolvedSetIterator End) const { + unsigned size = End - Begin; + assert(size > 1 && "set is not overloaded!"); + + void *memory = Allocate(sizeof(OverloadedTemplateStorage) + + size * sizeof(FunctionTemplateDecl*)); + auto *OT = new (memory) OverloadedTemplateStorage(size); + + NamedDecl **Storage = OT->getStorage(); + for (UnresolvedSetIterator I = Begin; I != End; ++I) { + NamedDecl *D = *I; + assert(isa(D) || + isa(D) || + (isa(D) && + isa(D->getUnderlyingDecl()))); + *Storage++ = D; + } + + return TemplateName(OT); + } + + /// Retrieve a template name representing an unqualified-id that has been + /// assumed to name a template for ADL purposes. + TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { + auto *OT = new (*this) AssumedTemplateStorage(Name); + return TemplateName(OT); + } + + /// Retrieve the template name that represents a qualified + /// template name such as \c std::vector. + TemplateName + ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, + bool TemplateKeyword, + TemplateDecl *Template) const { + assert(NNS && "Missing nested-name-specifier in qualified template name"); + + // FIXME: Canonicalization? + llvm::FoldingSetNodeID ID; + QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); + + void *InsertPos = nullptr; + QualifiedTemplateName *QTN = + QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + if (!QTN) { + QTN = new (*this, alignof(QualifiedTemplateName)) + QualifiedTemplateName(NNS, TemplateKeyword, Template); + QualifiedTemplateNames.InsertNode(QTN, InsertPos); + } + + return TemplateName(QTN); + } + + /// Retrieve the template name that represents a dependent + /// template name such as \c MetaFun::template apply. + TemplateName + ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, + const IdentifierInfo *Name) const { + assert((!NNS || NNS->isDependent()) && + "Nested name specifier must be dependent"); + + llvm::FoldingSetNodeID ID; + DependentTemplateName::Profile(ID, NNS, Name); + + void *InsertPos = nullptr; + DependentTemplateName *QTN = + DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + + if (QTN) + return TemplateName(QTN); + + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + if (CanonNNS == NNS) { + QTN = new (*this, alignof(DependentTemplateName)) + DependentTemplateName(NNS, Name); + } else { + TemplateName Canon = getDependentTemplateName(CanonNNS, Name); + QTN = new (*this, alignof(DependentTemplateName)) + DependentTemplateName(NNS, Name, Canon); + DependentTemplateName *CheckQTN = + DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckQTN && "Dependent type name canonicalization broken"); + (void)CheckQTN; + } + + DependentTemplateNames.InsertNode(QTN, InsertPos); + return TemplateName(QTN); + } + + /// Retrieve the template name that represents a dependent + /// template name such as \c MetaFun::template operator+. + TemplateName + ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, + OverloadedOperatorKind Operator) const { + assert((!NNS || NNS->isDependent()) && + "Nested name specifier must be dependent"); + + llvm::FoldingSetNodeID ID; + DependentTemplateName::Profile(ID, NNS, Operator); + + void *InsertPos = nullptr; + DependentTemplateName *QTN + = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + + if (QTN) + return TemplateName(QTN); + + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + if (CanonNNS == NNS) { + QTN = new (*this, alignof(DependentTemplateName)) + DependentTemplateName(NNS, Operator); + } else { + TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); + QTN = new (*this, alignof(DependentTemplateName)) + DependentTemplateName(NNS, Operator, Canon); + + DependentTemplateName *CheckQTN + = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); + assert(!CheckQTN && "Dependent template name canonicalization broken"); + (void)CheckQTN; + } + + DependentTemplateNames.InsertNode(QTN, InsertPos); + return TemplateName(QTN); + } + + TemplateName + ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, + TemplateName replacement) const { + llvm::FoldingSetNodeID ID; + SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); + + void *insertPos = nullptr; + SubstTemplateTemplateParmStorage *subst + = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); + + if (!subst) { + subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); + SubstTemplateTemplateParms.InsertNode(subst, insertPos); + } + + return TemplateName(subst); + } + + TemplateName + ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, + const TemplateArgument &ArgPack) const { + auto &Self = const_cast(*this); + llvm::FoldingSetNodeID ID; + SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); + + void *InsertPos = nullptr; + SubstTemplateTemplateParmPackStorage *Subst + = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); + + if (!Subst) { + Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, + ArgPack.pack_size(), + ArgPack.pack_begin()); + SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); + } + + return TemplateName(Subst); + } + + /// getFromTargetType - Given one of the integer types provided by + /// TargetInfo, produce the corresponding type. The unsigned @p Type + /// is actually a value of type @c TargetInfo::IntType. + CanQualType ASTContext::getFromTargetType(unsigned Type) const { + switch (Type) { + case TargetInfo::NoInt: return {}; + case TargetInfo::SignedChar: return SignedCharTy; + case TargetInfo::UnsignedChar: return UnsignedCharTy; + case TargetInfo::SignedShort: return ShortTy; + case TargetInfo::UnsignedShort: return UnsignedShortTy; + case TargetInfo::SignedInt: return IntTy; + case TargetInfo::UnsignedInt: return UnsignedIntTy; + case TargetInfo::SignedLong: return LongTy; + case TargetInfo::UnsignedLong: return UnsignedLongTy; + case TargetInfo::SignedLongLong: return LongLongTy; + case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; + } + + llvm_unreachable("Unhandled TargetInfo::IntType value"); + } + + //===----------------------------------------------------------------------===// + // Type Predicates. + //===----------------------------------------------------------------------===// + + /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's + /// garbage collection attribute. + /// + Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { + if (getLangOpts().getGC() == LangOptions::NonGC) + return Qualifiers::GCNone; + + assert(getLangOpts().ObjC); + Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); + + // Default behaviour under objective-C's gc is for ObjC pointers + // (or pointers to them) be treated as though they were declared + // as __strong. + if (GCAttrs == Qualifiers::GCNone) { + if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) + return Qualifiers::Strong; + else if (Ty->isPointerType()) + return getObjCGCAttrKind(Ty->castAs()->getPointeeType()); + } else { + // It's not valid to set GC attributes on anything that isn't a + // pointer. + #ifndef NDEBUG + QualType CT = Ty->getCanonicalTypeInternal(); + while (const auto *AT = dyn_cast(CT)) + CT = AT->getElementType(); + assert(CT->isAnyPointerType() || CT->isBlockPointerType()); + #endif + } + return GCAttrs; + } + + //===----------------------------------------------------------------------===// + // Type Compatibility Testing + //===----------------------------------------------------------------------===// + + /// areCompatVectorTypes - Return true if the two specified vector types are + /// compatible. + static bool areCompatVectorTypes(const VectorType *LHS, + const VectorType *RHS) { + assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); + return LHS->getElementType() == RHS->getElementType() && + LHS->getNumElements() == RHS->getNumElements(); + } + + /// areCompatMatrixTypes - Return true if the two specified matrix types are + /// compatible. + static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, + const ConstantMatrixType *RHS) { + assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); + return LHS->getElementType() == RHS->getElementType() && + LHS->getNumRows() == RHS->getNumRows() && + LHS->getNumColumns() == RHS->getNumColumns(); + } + + bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, + QualType SecondVec) { + assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); + assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); + + if (hasSameUnqualifiedType(FirstVec, SecondVec)) + return true; + + // Treat Neon vector types and most AltiVec vector types as if they are the + // equivalent GCC vector types. + const auto *First = FirstVec->castAs(); + const auto *Second = SecondVec->castAs(); + if (First->getNumElements() == Second->getNumElements() && + hasSameType(First->getElementType(), Second->getElementType()) && + First->getVectorKind() != VectorType::AltiVecPixel && + First->getVectorKind() != VectorType::AltiVecBool && + Second->getVectorKind() != VectorType::AltiVecPixel && + Second->getVectorKind() != VectorType::AltiVecBool && + First->getVectorKind() != VectorType::SveFixedLengthDataVector && + First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && + Second->getVectorKind() != VectorType::SveFixedLengthDataVector && + Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) + return true; + + return false; + } + + bool ASTContext::areCompatibleSveTypes(QualType FirstType, + QualType SecondType) { + assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || + (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && + "Expected SVE builtin type and vector type!"); + + auto IsValidCast = [this](QualType FirstType, QualType SecondType) { + if (const auto *BT = FirstType->getAs()) { + if (const auto *VT = SecondType->getAs()) { + // Predicates have the same representation as uint8 so we also have to + // check the kind to make these types incompatible. + if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) + return BT->getKind() == BuiltinType::SveBool; + else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) + return VT->getElementType().getCanonicalType() == + FirstType->getSveEltType(*this); + else if (VT->getVectorKind() == VectorType::GenericVector) + return getTypeSize(SecondType) == getLangOpts().ArmSveVectorBits && + hasSameType(VT->getElementType(), + getBuiltinVectorTypeInfo(BT).ElementType); + } + } + return false; + }; + + return IsValidCast(FirstType, SecondType) || + IsValidCast(SecondType, FirstType); + } + + bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, + QualType SecondType) { + assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || + (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && + "Expected SVE builtin type and vector type!"); + + auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { + if (!FirstType->getAs()) + return false; + + const auto *VecTy = SecondType->getAs(); + if (VecTy && + (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || + VecTy->getVectorKind() == VectorType::GenericVector)) { + const LangOptions::LaxVectorConversionKind LVCKind = + getLangOpts().getLaxVectorConversions(); + + // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. + // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly + // converts to VLAT and VLAT implicitly converts to GNUT." + // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and + // predicates. + if (VecTy->getVectorKind() == VectorType::GenericVector && + getTypeSize(SecondType) != getLangOpts().ArmSveVectorBits) + return false; + + // If -flax-vector-conversions=all is specified, the types are + // certainly compatible. + if (LVCKind == LangOptions::LaxVectorConversionKind::All) + return true; + + // If -flax-vector-conversions=integer is specified, the types are + // compatible if the elements are integer types. + if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) + return VecTy->getElementType().getCanonicalType()->isIntegerType() && + FirstType->getSveEltType(*this)->isIntegerType(); + } + + return false; + }; + + return IsLaxCompatible(FirstType, SecondType) || + IsLaxCompatible(SecondType, FirstType); + } + + bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { + while (true) { + // __strong id + if (const AttributedType *Attr = dyn_cast(Ty)) { + if (Attr->getAttrKind() == attr::ObjCOwnership) + return true; + + Ty = Attr->getModifiedType(); + + // X *__strong (...) + } else if (const ParenType *Paren = dyn_cast(Ty)) { + Ty = Paren->getInnerType(); + + // We do not want to look through typedefs, typeof(expr), + // typeof(type), or any other way that the type is somehow + // abstracted. + } else { + return false; + } + } + } + + //===----------------------------------------------------------------------===// + // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. + //===----------------------------------------------------------------------===// + + /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the + /// inheritance hierarchy of 'rProto'. + bool + ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, + ObjCProtocolDecl *rProto) const { + if (declaresSameEntity(lProto, rProto)) + return true; + for (auto *PI : rProto->protocols()) + if (ProtocolCompatibleWithProtocol(lProto, PI)) + return true; + return false; + } + + /// ObjCQualifiedClassTypesAreCompatible - compare Class and + /// Class. + bool ASTContext::ObjCQualifiedClassTypesAreCompatible( + const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { + for (auto *lhsProto : lhs->quals()) { + bool match = false; + for (auto *rhsProto : rhs->quals()) { + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { + match = true; + break; + } + } + if (!match) + return false; + } + return true; + } + + /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an + /// ObjCQualifiedIDType. + bool ASTContext::ObjCQualifiedIdTypesAreCompatible( + const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, + bool compare) { + // Allow id and an 'id' in all cases. + if (lhs->isObjCIdType() || rhs->isObjCIdType()) + return true; + + // Don't allow id to convert to Class or Class in either direction. + if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || + rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) + return false; + + if (lhs->isObjCQualifiedIdType()) { + if (rhs->qual_empty()) { + // If the RHS is a unqualified interface pointer "NSString*", + // make sure we check the class hierarchy. + if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { + for (auto *I : lhs->quals()) { + // when comparing an id

on lhs with a static type on rhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + if (!rhsID->ClassImplementsProtocol(I, true)) + return false; + } + } + // If there are no qualifiers and no interface, we have an 'id'. + return true; + } + // Both the right and left sides have qualifiers. + for (auto *lhsProto : lhs->quals()) { + bool match = false; + + // when comparing an id

on lhs with a static type on rhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + for (auto *rhsProto : rhs->quals()) { + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || + (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { + match = true; + break; + } + } + // If the RHS is a qualified interface pointer "NSString

*", + // make sure we check the class hierarchy. + if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { + for (auto *I : lhs->quals()) { + // when comparing an id

on lhs with a static type on rhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + if (rhsID->ClassImplementsProtocol(I, true)) { + match = true; + break; + } + } + } + if (!match) + return false; + } + + return true; + } + + assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id"); + + if (lhs->getInterfaceType()) { + // If both the right and left sides have qualifiers. + for (auto *lhsProto : lhs->quals()) { + bool match = false; + + // when comparing an id

on rhs with a static type on lhs, + // see if static class implements all of id's protocols, directly or + // through its super class and categories. + // First, lhs protocols in the qualifier list must be found, direct + // or indirect in rhs's qualifier list or it is a mismatch. + for (auto *rhsProto : rhs->quals()) { + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || + (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { + match = true; + break; + } + } + if (!match) + return false; + } + + // Static class's protocols, or its super class or category protocols + // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. + if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { + llvm::SmallPtrSet LHSInheritedProtocols; + CollectInheritedProtocols(lhsID, LHSInheritedProtocols); + // This is rather dubious but matches gcc's behavior. If lhs has + // no type qualifier and its class has no static protocol(s) + // assume that it is mismatch. + if (LHSInheritedProtocols.empty() && lhs->qual_empty()) + return false; + for (auto *lhsProto : LHSInheritedProtocols) { + bool match = false; + for (auto *rhsProto : rhs->quals()) { + if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || + (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { + match = true; + break; + } + } + if (!match) + return false; + } + } + return true; + } + return false; + } + + /// canAssignObjCInterfaces - Return true if the two interface types are + /// compatible for assignment from RHS to LHS. This handles validation of any + /// protocol qualifiers on the LHS or RHS. + bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, + const ObjCObjectPointerType *RHSOPT) { + const ObjCObjectType* LHS = LHSOPT->getObjectType(); + const ObjCObjectType* RHS = RHSOPT->getObjectType(); + + // If either type represents the built-in 'id' type, return true. + if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) + return true; + + // Function object that propagates a successful result or handles + // __kindof types. + auto finish = [&](bool succeeded) -> bool { + if (succeeded) + return true; + + if (!RHS->isKindOfType()) + return false; + + // Strip off __kindof and protocol qualifiers, then check whether + // we can assign the other way. + return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), + LHSOPT->stripObjCKindOfTypeAndQuals(*this)); + }; + + // Casts from or to id

are allowed when the other side has compatible + // protocols. + if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { + return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); + } + + // Verify protocol compatibility for casts from Class to Class. + if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { + return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); + } + + // Casts from Class to Class, or vice-versa, are allowed. + if (LHS->isObjCClass() && RHS->isObjCClass()) { + return true; + } + + // If we have 2 user-defined types, fall into that path. + if (LHS->getInterface() && RHS->getInterface()) { + return finish(canAssignObjCInterfaces(LHS, RHS)); + } + + return false; + } + + /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written + /// for providing type-safety for objective-c pointers used to pass/return + /// arguments in block literals. When passed as arguments, passing 'A*' where + /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is + /// not OK. For the return type, the opposite is not OK. + bool ASTContext::canAssignObjCInterfacesInBlockPointer( + const ObjCObjectPointerType *LHSOPT, + const ObjCObjectPointerType *RHSOPT, + bool BlockReturnType) { + + // Function object that propagates a successful result or handles + // __kindof types. + auto finish = [&](bool succeeded) -> bool { + if (succeeded) + return true; + + const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; + if (!Expected->isKindOfType()) + return false; + + // Strip off __kindof and protocol qualifiers, then check whether + // we can assign the other way. + return canAssignObjCInterfacesInBlockPointer( + RHSOPT->stripObjCKindOfTypeAndQuals(*this), + LHSOPT->stripObjCKindOfTypeAndQuals(*this), + BlockReturnType); + }; + + if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) + return true; + + if (LHSOPT->isObjCBuiltinType()) { + return finish(RHSOPT->isObjCBuiltinType() || + RHSOPT->isObjCQualifiedIdType()); + } + + if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { + if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) + // Use for block parameters previous type checking for compatibility. + return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || + // Or corrected type checking as in non-compat mode. + (!BlockReturnType && + ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); + else + return finish(ObjCQualifiedIdTypesAreCompatible( + (BlockReturnType ? LHSOPT : RHSOPT), + (BlockReturnType ? RHSOPT : LHSOPT), false)); + } + + const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); + const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); + if (LHS && RHS) { // We have 2 user-defined types. + if (LHS != RHS) { + if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) + return finish(BlockReturnType); + if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) + return finish(!BlockReturnType); + } + else + return true; + } + return false; + } + + /// Comparison routine for Objective-C protocols to be used with + /// llvm::array_pod_sort. + static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, + ObjCProtocolDecl * const *rhs) { + return (*lhs)->getName().compare((*rhs)->getName()); + } + + /// getIntersectionOfProtocols - This routine finds the intersection of set + /// of protocols inherited from two distinct objective-c pointer objects with + /// the given common base. + /// It is used to build composite qualifier list of the composite type of + /// the conditional expression involving two objective-c pointer objects. + static + void getIntersectionOfProtocols(ASTContext &Context, + const ObjCInterfaceDecl *CommonBase, + const ObjCObjectPointerType *LHSOPT, + const ObjCObjectPointerType *RHSOPT, + SmallVectorImpl &IntersectionSet) { + + const ObjCObjectType* LHS = LHSOPT->getObjectType(); + const ObjCObjectType* RHS = RHSOPT->getObjectType(); + assert(LHS->getInterface() && "LHS must have an interface base"); + assert(RHS->getInterface() && "RHS must have an interface base"); + + // Add all of the protocols for the LHS. + llvm::SmallPtrSet LHSProtocolSet; + + // Start with the protocol qualifiers. + for (auto proto : LHS->quals()) { + Context.CollectInheritedProtocols(proto, LHSProtocolSet); + } + + // Also add the protocols associated with the LHS interface. + Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); + + // Add all of the protocols for the RHS. + llvm::SmallPtrSet RHSProtocolSet; + + // Start with the protocol qualifiers. + for (auto proto : RHS->quals()) { + Context.CollectInheritedProtocols(proto, RHSProtocolSet); + } + + // Also add the protocols associated with the RHS interface. + Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); + + // Compute the intersection of the collected protocol sets. + for (auto proto : LHSProtocolSet) { + if (RHSProtocolSet.count(proto)) + IntersectionSet.push_back(proto); + } + + // Compute the set of protocols that is implied by either the common type or + // the protocols within the intersection. + llvm::SmallPtrSet ImpliedProtocols; + Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); + + // Remove any implied protocols from the list of inherited protocols. + if (!ImpliedProtocols.empty()) { + IntersectionSet.erase( + std::remove_if(IntersectionSet.begin(), + IntersectionSet.end(), + [&](ObjCProtocolDecl *proto) -> bool { + return ImpliedProtocols.count(proto) > 0; + }), + IntersectionSet.end()); + } + + // Sort the remaining protocols by name. + llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), + compareObjCProtocolsByName); + } + + /// Determine whether the first type is a subtype of the second. + static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, + QualType rhs) { + // Common case: two object pointers. + const auto *lhsOPT = lhs->getAs(); + const auto *rhsOPT = rhs->getAs(); + if (lhsOPT && rhsOPT) + return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); + + // Two block pointers. + const auto *lhsBlock = lhs->getAs(); + const auto *rhsBlock = rhs->getAs(); + if (lhsBlock && rhsBlock) + return ctx.typesAreBlockPointerCompatible(lhs, rhs); + + // If either is an unqualified 'id' and the other is a block, it's + // acceptable. + if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || + (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) + return true; + + return false; + } + + // Check that the given Objective-C type argument lists are equivalent. + static bool sameObjCTypeArgs(ASTContext &ctx, + const ObjCInterfaceDecl *iface, + ArrayRef lhsArgs, + ArrayRef rhsArgs, + bool stripKindOf) { + if (lhsArgs.size() != rhsArgs.size()) + return false; + + ObjCTypeParamList *typeParams = iface->getTypeParamList(); + for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { + if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) + continue; + + switch (typeParams->begin()[i]->getVariance()) { + case ObjCTypeParamVariance::Invariant: + if (!stripKindOf || + !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), + rhsArgs[i].stripObjCKindOfType(ctx))) { + return false; + } + break; + + case ObjCTypeParamVariance::Covariant: + if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) + return false; + break; + + case ObjCTypeParamVariance::Contravariant: + if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) + return false; + break; + } + } + + return true; + } + + QualType ASTContext::areCommonBaseCompatible( + const ObjCObjectPointerType *Lptr, + const ObjCObjectPointerType *Rptr) { + const ObjCObjectType *LHS = Lptr->getObjectType(); + const ObjCObjectType *RHS = Rptr->getObjectType(); + const ObjCInterfaceDecl* LDecl = LHS->getInterface(); + const ObjCInterfaceDecl* RDecl = RHS->getInterface(); + + if (!LDecl || !RDecl) + return {}; + + // When either LHS or RHS is a kindof type, we should return a kindof type. + // For example, for common base of kindof(ASub1) and kindof(ASub2), we return + // kindof(A). + bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); + + // Follow the left-hand side up the class hierarchy until we either hit a + // root or find the RHS. Record the ancestors in case we don't find it. + llvm::SmallDenseMap + LHSAncestors; + while (true) { + // Record this ancestor. We'll need this if the common type isn't in the + // path from the LHS to the root. + LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; + + if (declaresSameEntity(LHS->getInterface(), RDecl)) { + // Get the type arguments. + ArrayRef LHSTypeArgs = LHS->getTypeArgsAsWritten(); + bool anyChanges = false; + if (LHS->isSpecialized() && RHS->isSpecialized()) { + // Both have type arguments, compare them. + if (!sameObjCTypeArgs(*this, LHS->getInterface(), + LHS->getTypeArgs(), RHS->getTypeArgs(), + /*stripKindOf=*/true)) + return {}; + } else if (LHS->isSpecialized() != RHS->isSpecialized()) { + // If only one has type arguments, the result will not have type + // arguments. + LHSTypeArgs = {}; + anyChanges = true; + } + + // Compute the intersection of protocols. + SmallVector Protocols; + getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, + Protocols); + if (!Protocols.empty()) + anyChanges = true; + + // If anything in the LHS will have changed, build a new result type. + // If we need to return a kindof type but LHS is not a kindof type, we + // build a new result type. + if (anyChanges || LHS->isKindOfType() != anyKindOf) { + QualType Result = getObjCInterfaceType(LHS->getInterface()); + Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, + anyKindOf || LHS->isKindOfType()); + return getObjCObjectPointerType(Result); + } + + return getObjCObjectPointerType(QualType(LHS, 0)); + } + + // Find the superclass. + QualType LHSSuperType = LHS->getSuperClassType(); + if (LHSSuperType.isNull()) + break; + + LHS = LHSSuperType->castAs(); + } + + // We didn't find anything by following the LHS to its root; now check + // the RHS against the cached set of ancestors. + while (true) { + auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); + if (KnownLHS != LHSAncestors.end()) { + LHS = KnownLHS->second; + + // Get the type arguments. + ArrayRef RHSTypeArgs = RHS->getTypeArgsAsWritten(); + bool anyChanges = false; + if (LHS->isSpecialized() && RHS->isSpecialized()) { + // Both have type arguments, compare them. + if (!sameObjCTypeArgs(*this, LHS->getInterface(), + LHS->getTypeArgs(), RHS->getTypeArgs(), + /*stripKindOf=*/true)) + return {}; + } else if (LHS->isSpecialized() != RHS->isSpecialized()) { + // If only one has type arguments, the result will not have type + // arguments. + RHSTypeArgs = {}; + anyChanges = true; + } + + // Compute the intersection of protocols. + SmallVector Protocols; + getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, + Protocols); + if (!Protocols.empty()) + anyChanges = true; + + // If we need to return a kindof type but RHS is not a kindof type, we + // build a new result type. + if (anyChanges || RHS->isKindOfType() != anyKindOf) { + QualType Result = getObjCInterfaceType(RHS->getInterface()); + Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, + anyKindOf || RHS->isKindOfType()); + return getObjCObjectPointerType(Result); + } + + return getObjCObjectPointerType(QualType(RHS, 0)); + } + + // Find the superclass of the RHS. + QualType RHSSuperType = RHS->getSuperClassType(); + if (RHSSuperType.isNull()) + break; + + RHS = RHSSuperType->castAs(); + } + + return {}; + } + + bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, + const ObjCObjectType *RHS) { + assert(LHS->getInterface() && "LHS is not an interface type"); + assert(RHS->getInterface() && "RHS is not an interface type"); + + // Verify that the base decls are compatible: the RHS must be a subclass of + // the LHS. + ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); + bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); + if (!IsSuperClass) + return false; + + // If the LHS has protocol qualifiers, determine whether all of them are + // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the + // LHS). + if (LHS->getNumProtocols() > 0) { + // OK if conversion of LHS to SuperClass results in narrowing of types + // ; i.e., SuperClass may implement at least one of the protocols + // in LHS's protocol list. Example, SuperObj = lhs is ok. + // But not SuperObj = lhs. + llvm::SmallPtrSet SuperClassInheritedProtocols; + CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); + // Also, if RHS has explicit quelifiers, include them for comparing with LHS's + // qualifiers. + for (auto *RHSPI : RHS->quals()) + CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); + // If there is no protocols associated with RHS, it is not a match. + if (SuperClassInheritedProtocols.empty()) + return false; + + for (const auto *LHSProto : LHS->quals()) { + bool SuperImplementsProtocol = false; + for (auto *SuperClassProto : SuperClassInheritedProtocols) + if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { + SuperImplementsProtocol = true; + break; + } + if (!SuperImplementsProtocol) + return false; + } + } + + // If the LHS is specialized, we may need to check type arguments. + if (LHS->isSpecialized()) { + // Follow the superclass chain until we've matched the LHS class in the + // hierarchy. This substitutes type arguments through. + const ObjCObjectType *RHSSuper = RHS; + while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) + RHSSuper = RHSSuper->getSuperClassType()->castAs(); + + // If the RHS is specializd, compare type arguments. + if (RHSSuper->isSpecialized() && + !sameObjCTypeArgs(*this, LHS->getInterface(), + LHS->getTypeArgs(), RHSSuper->getTypeArgs(), + /*stripKindOf=*/true)) { + return false; + } + } + + return true; + } + + bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { + // get the "pointed to" types + const auto *LHSOPT = LHS->getAs(); + const auto *RHSOPT = RHS->getAs(); + + if (!LHSOPT || !RHSOPT) + return false; + + return canAssignObjCInterfaces(LHSOPT, RHSOPT) || + canAssignObjCInterfaces(RHSOPT, LHSOPT); + } + + bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { + return canAssignObjCInterfaces( + getObjCObjectPointerType(To)->castAs(), + getObjCObjectPointerType(From)->castAs()); + } + + /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, + /// both shall have the identically qualified version of a compatible type. + /// C99 6.2.7p1: Two types have compatible types if their types are the + /// same. See 6.7.[2,3,5] for additional rules. + bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, + bool CompareUnqualified) { + if (getLangOpts().CPlusPlus) + return hasSameType(LHS, RHS); + + return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); + } + + bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { + return typesAreCompatible(LHS, RHS); + } + + bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { + return !mergeTypes(LHS, RHS, true).isNull(); + } + + /// mergeTransparentUnionType - if T is a transparent union type and a member + /// of T is compatible with SubType, return the merged type, else return + /// QualType() + QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, + bool OfBlockPointer, + bool Unqualified) { + if (const RecordType *UT = T->getAsUnionType()) { + RecordDecl *UD = UT->getDecl(); + if (UD->hasAttr()) { + for (const auto *I : UD->fields()) { + QualType ET = I->getType().getUnqualifiedType(); + QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); + if (!MT.isNull()) + return MT; + } + } + } + + return {}; + } + + /// mergeFunctionParameterTypes - merge two types which appear as function + /// parameter types + QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, + bool OfBlockPointer, + bool Unqualified) { + // GNU extension: two types are compatible if they appear as a function + // argument, one of the types is a transparent union type and the other + // type is compatible with a union member + QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, + Unqualified); + if (!lmerge.isNull()) + return lmerge; + + QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, + Unqualified); + if (!rmerge.isNull()) + return rmerge; + + return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); + } + + QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, + bool OfBlockPointer, bool Unqualified, + bool AllowCXX) { + const auto *lbase = lhs->castAs(); + const auto *rbase = rhs->castAs(); + const auto *lproto = dyn_cast(lbase); + const auto *rproto = dyn_cast(rbase); + bool allLTypes = true; + bool allRTypes = true; + + // Check return type + QualType retType; + if (OfBlockPointer) { + QualType RHS = rbase->getReturnType(); + QualType LHS = lbase->getReturnType(); + bool UnqualifiedResult = Unqualified; + if (!UnqualifiedResult) + UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); + retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); + } + else + retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, + Unqualified); + if (retType.isNull()) + return {}; + + if (Unqualified) + retType = retType.getUnqualifiedType(); + + CanQualType LRetType = getCanonicalType(lbase->getReturnType()); + CanQualType RRetType = getCanonicalType(rbase->getReturnType()); + if (Unqualified) { + LRetType = LRetType.getUnqualifiedType(); + RRetType = RRetType.getUnqualifiedType(); + } + + if (getCanonicalType(retType) != LRetType) + allLTypes = false; + if (getCanonicalType(retType) != RRetType) + allRTypes = false; + + // FIXME: double check this + // FIXME: should we error if lbase->getRegParmAttr() != 0 && + // rbase->getRegParmAttr() != 0 && + // lbase->getRegParmAttr() != rbase->getRegParmAttr()? + FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); + FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); + + // Compatible functions must have compatible calling conventions + if (lbaseInfo.getCC() != rbaseInfo.getCC()) + return {}; + + // Regparm is part of the calling convention. + if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) + return {}; + if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) + return {}; + + if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) + return {}; + if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) + return {}; + if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) + return {}; + + // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. + bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); + + if (lbaseInfo.getNoReturn() != NoReturn) + allLTypes = false; + if (rbaseInfo.getNoReturn() != NoReturn) + allRTypes = false; + + FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); + + if (lproto && rproto) { // two C99 style function prototypes + assert((AllowCXX || + (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && + "C++ shouldn't be here"); + // Compatible functions must have the same number of parameters + if (lproto->getNumParams() != rproto->getNumParams()) + return {}; + + // Variadic and non-variadic functions aren't compatible + if (lproto->isVariadic() != rproto->isVariadic()) + return {}; + + if (lproto->getMethodQuals() != rproto->getMethodQuals()) + return {}; + + SmallVector newParamInfos; + bool canUseLeft, canUseRight; + if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, + newParamInfos)) + return {}; + + if (!canUseLeft) + allLTypes = false; + if (!canUseRight) + allRTypes = false; + + // Check parameter type compatibility + SmallVector types; + for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { + QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); + QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); + QualType paramType = mergeFunctionParameterTypes( + lParamType, rParamType, OfBlockPointer, Unqualified); + if (paramType.isNull()) + return {}; + + if (Unqualified) + paramType = paramType.getUnqualifiedType(); + + types.push_back(paramType); + if (Unqualified) { + lParamType = lParamType.getUnqualifiedType(); + rParamType = rParamType.getUnqualifiedType(); + } + + if (getCanonicalType(paramType) != getCanonicalType(lParamType)) + allLTypes = false; + if (getCanonicalType(paramType) != getCanonicalType(rParamType)) + allRTypes = false; + } + + if (allLTypes) return lhs; + if (allRTypes) return rhs; + + FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); + EPI.ExtInfo = einfo; + EPI.ExtParameterInfos = + newParamInfos.empty() ? nullptr : newParamInfos.data(); + return getFunctionType(retType, types, EPI); + } + + if (lproto) allRTypes = false; + if (rproto) allLTypes = false; + + const FunctionProtoType *proto = lproto ? lproto : rproto; + if (proto) { + assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); + if (proto->isVariadic()) + return {}; + // Check that the types are compatible with the types that + // would result from default argument promotions (C99 6.7.5.3p15). + // The only types actually affected are promotable integer + // types and floats, which would be passed as a different + // type depending on whether the prototype is visible. + for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { + QualType paramTy = proto->getParamType(i); + + // Look at the converted type of enum types, since that is the type used + // to pass enum values. + if (const auto *Enum = paramTy->getAs()) { + paramTy = Enum->getDecl()->getIntegerType(); + if (paramTy.isNull()) + return {}; + } + + if (paramTy->isPromotableIntegerType() || + getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) + return {}; + } + + if (allLTypes) return lhs; + if (allRTypes) return rhs; + + FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); + EPI.ExtInfo = einfo; + return getFunctionType(retType, proto->getParamTypes(), EPI); + } + + if (allLTypes) return lhs; + if (allRTypes) return rhs; + return getFunctionNoProtoType(retType, einfo); + } + + /// Given that we have an enum type and a non-enum type, try to merge them. + static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, + QualType other, bool isBlockReturnType) { + // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, + // a signed integer type, or an unsigned integer type. + // Compatibility is based on the underlying type, not the promotion + // type. + QualType underlyingType = ET->getDecl()->getIntegerType(); + if (underlyingType.isNull()) + return {}; + if (Context.hasSameType(underlyingType, other)) + return other; + + // In block return types, we're more permissive and accept any + // integral type of the same size. + if (isBlockReturnType && other->isIntegerType() && + Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) + return other; + + return {}; + } + + QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, + bool OfBlockPointer, + bool Unqualified, bool BlockReturnType) { + // C++ [expr]: If an expression initially has the type "reference to T", the + // type is adjusted to "T" prior to any further analysis, the expression + // designates the object or function denoted by the reference, and the + // expression is an lvalue unless the reference is an rvalue reference and + // the expression is a function call (possibly inside parentheses). + if (LHS->getAs() || RHS->getAs()) + return {}; + + if (Unqualified) { + LHS = LHS.getUnqualifiedType(); + RHS = RHS.getUnqualifiedType(); + } + + QualType LHSCan = getCanonicalType(LHS), + RHSCan = getCanonicalType(RHS); + + // If two types are identical, they are compatible. + if (LHSCan == RHSCan) + return LHS; + + // If the qualifiers are different, the types aren't compatible... mostly. + Qualifiers LQuals = LHSCan.getLocalQualifiers(); + Qualifiers RQuals = RHSCan.getLocalQualifiers(); + if (LQuals != RQuals) { + // If any of these qualifiers are different, we have a type + // mismatch. + if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || + LQuals.getAddressSpace() != RQuals.getAddressSpace() || + LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || + LQuals.hasUnaligned() != RQuals.hasUnaligned()) + return {}; + + // Exactly one GC qualifier difference is allowed: __strong is + // okay if the other type has no GC qualifier but is an Objective + // C object pointer (i.e. implicitly strong by default). We fix + // this by pretending that the unqualified type was actually + // qualified __strong. + Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); + Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); + assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); + + if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) + return {}; + + if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { + return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); + } + if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { + return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); + } + return {}; + } + + // Okay, qualifiers are equal. + + Type::TypeClass LHSClass = LHSCan->getTypeClass(); + Type::TypeClass RHSClass = RHSCan->getTypeClass(); + + // We want to consider the two function types to be the same for these + // comparisons, just force one to the other. + if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; + if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; + + // Same as above for arrays + if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) + LHSClass = Type::ConstantArray; + if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) + RHSClass = Type::ConstantArray; + + // ObjCInterfaces are just specialized ObjCObjects. + if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; + if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; + + // Canonicalize ExtVector -> Vector. + if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; + if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; + + // If the canonical type classes don't match. + if (LHSClass != RHSClass) { + // Note that we only have special rules for turning block enum + // returns into block int returns, not vice-versa. + if (const auto *ETy = LHS->getAs()) { + return mergeEnumWithInteger(*this, ETy, RHS, false); + } + if (const EnumType* ETy = RHS->getAs()) { + return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); + } + // allow block pointer type to match an 'id' type. + if (OfBlockPointer && !BlockReturnType) { + if (LHS->isObjCIdType() && RHS->isBlockPointerType()) + return LHS; + if (RHS->isObjCIdType() && LHS->isBlockPointerType()) + return RHS; + } + + return {}; + } + + // The canonical type classes match. + switch (LHSClass) { + #define TYPE(Class, Base) + #define ABSTRACT_TYPE(Class, Base) + #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: + #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: + #define DEPENDENT_TYPE(Class, Base) case Type::Class: + #include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + llvm_unreachable("C++ should never be in mergeTypes"); + + case Type::ObjCInterface: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::FunctionProto: + case Type::ExtVector: + llvm_unreachable("Types are eliminated above"); + + case Type::Pointer: + { + // Merge two pointer types, while trying to preserve typedef info + QualType LHSPointee = LHS->castAs()->getPointeeType(); + QualType RHSPointee = RHS->castAs()->getPointeeType(); + if (Unqualified) { + LHSPointee = LHSPointee.getUnqualifiedType(); + RHSPointee = RHSPointee.getUnqualifiedType(); + } + QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, + Unqualified); + if (ResultType.isNull()) + return {}; + if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) + return LHS; + if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) + return RHS; + return getPointerType(ResultType); + } + case Type::BlockPointer: + { + // Merge two block pointer types, while trying to preserve typedef info + QualType LHSPointee = LHS->castAs()->getPointeeType(); + QualType RHSPointee = RHS->castAs()->getPointeeType(); + if (Unqualified) { + LHSPointee = LHSPointee.getUnqualifiedType(); + RHSPointee = RHSPointee.getUnqualifiedType(); + } + if (getLangOpts().OpenCL) { + Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); + Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); + // Blocks can't be an expression in a ternary operator (OpenCL v2.0 + // 6.12.5) thus the following check is asymmetric. + if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) + return {}; + LHSPteeQual.removeAddressSpace(); + RHSPteeQual.removeAddressSpace(); + LHSPointee = + QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); + RHSPointee = + QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); + } + QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, + Unqualified); + if (ResultType.isNull()) + return {}; + if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) + return LHS; + if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) + return RHS; + return getBlockPointerType(ResultType); + } + case Type::Atomic: + { + // Merge two pointer types, while trying to preserve typedef info + QualType LHSValue = LHS->castAs()->getValueType(); + QualType RHSValue = RHS->castAs()->getValueType(); + if (Unqualified) { + LHSValue = LHSValue.getUnqualifiedType(); + RHSValue = RHSValue.getUnqualifiedType(); + } + QualType ResultType = mergeTypes(LHSValue, RHSValue, false, + Unqualified); + if (ResultType.isNull()) + return {}; + if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) + return LHS; + if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) + return RHS; + return getAtomicType(ResultType); + } + case Type::ConstantArray: + { + const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); + const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); + if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) + return {}; + + QualType LHSElem = getAsArrayType(LHS)->getElementType(); + QualType RHSElem = getAsArrayType(RHS)->getElementType(); + if (Unqualified) { + LHSElem = LHSElem.getUnqualifiedType(); + RHSElem = RHSElem.getUnqualifiedType(); + } + + QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); + if (ResultType.isNull()) + return {}; + + const VariableArrayType* LVAT = getAsVariableArrayType(LHS); + const VariableArrayType* RVAT = getAsVariableArrayType(RHS); + + // If either side is a variable array, and both are complete, check whether + // the current dimension is definite. + if (LVAT || RVAT) { + auto SizeFetch = [this](const VariableArrayType* VAT, + const ConstantArrayType* CAT) + -> std::pair { + if (VAT) { + Optional TheInt; + Expr *E = VAT->getSizeExpr(); + if (E && (TheInt = E->getIntegerConstantExpr(*this))) + return std::make_pair(true, *TheInt); + return std::make_pair(false, llvm::APSInt()); + } + if (CAT) + return std::make_pair(true, CAT->getSize()); + return std::make_pair(false, llvm::APInt()); + }; + + bool HaveLSize, HaveRSize; + llvm::APInt LSize, RSize; + std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); + std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); + if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) + return {}; // Definite, but unequal, array dimension + } + + if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) + return LHS; + if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) + return RHS; + if (LCAT) + return getConstantArrayType(ResultType, LCAT->getSize(), + LCAT->getSizeExpr(), + ArrayType::ArraySizeModifier(), 0); + if (RCAT) + return getConstantArrayType(ResultType, RCAT->getSize(), + RCAT->getSizeExpr(), + ArrayType::ArraySizeModifier(), 0); + if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) + return LHS; + if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) + return RHS; + if (LVAT) { + // FIXME: This isn't correct! But tricky to implement because + // the array's size has to be the size of LHS, but the type + // has to be different. + return LHS; + } + if (RVAT) { + // FIXME: This isn't correct! But tricky to implement because + // the array's size has to be the size of RHS, but the type + // has to be different. + return RHS; + } + if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; + if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; + return getIncompleteArrayType(ResultType, + ArrayType::ArraySizeModifier(), 0); + } + case Type::FunctionNoProto: + return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); + case Type::Record: + case Type::Enum: + return {}; + case Type::Builtin: + // Only exactly equal builtin types are compatible, which is tested above. + return {}; + case Type::Complex: + // Distinct complex types are incompatible. + return {}; + case Type::Vector: + // FIXME: The merged type should be an ExtVector! + if (areCompatVectorTypes(LHSCan->castAs(), + RHSCan->castAs())) + return LHS; + return {}; + case Type::ConstantMatrix: + if (areCompatMatrixTypes(LHSCan->castAs(), + RHSCan->castAs())) + return LHS; + return {}; + case Type::ObjCObject: { + // Check if the types are assignment compatible. + // FIXME: This should be type compatibility, e.g. whether + // "LHS x; RHS x;" at global scope is legal. + if (canAssignObjCInterfaces(LHS->castAs(), + RHS->castAs())) + return LHS; + return {}; + } + case Type::ObjCObjectPointer: + if (OfBlockPointer) { + if (canAssignObjCInterfacesInBlockPointer( + LHS->castAs(), + RHS->castAs(), BlockReturnType)) + return LHS; + return {}; + } + if (canAssignObjCInterfaces(LHS->castAs(), + RHS->castAs())) + return LHS; + return {}; + case Type::Pipe: + assert(LHS != RHS && + "Equivalent pipe types should have already been handled!"); + return {}; + case Type::ExtInt: { + // Merge two ext-int types, while trying to preserve typedef info. + bool LHSUnsigned = LHS->castAs()->isUnsigned(); + bool RHSUnsigned = RHS->castAs()->isUnsigned(); + unsigned LHSBits = LHS->castAs()->getNumBits(); + unsigned RHSBits = RHS->castAs()->getNumBits(); + + // Like unsigned/int, shouldn't have a type if they dont match. + if (LHSUnsigned != RHSUnsigned) + return {}; + + if (LHSBits != RHSBits) + return {}; + return LHS; + } + } + + llvm_unreachable("Invalid Type::Class!"); + } + + bool ASTContext::mergeExtParameterInfo( + const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, + bool &CanUseFirst, bool &CanUseSecond, + SmallVectorImpl &NewParamInfos) { + assert(NewParamInfos.empty() && "param info list not empty"); + CanUseFirst = CanUseSecond = true; + bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); + bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); + + // Fast path: if the first type doesn't have ext parameter infos, + // we match if and only if the second type also doesn't have them. + if (!FirstHasInfo && !SecondHasInfo) + return true; + + bool NeedParamInfo = false; + size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() + : SecondFnType->getExtParameterInfos().size(); + + for (size_t I = 0; I < E; ++I) { + FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; + if (FirstHasInfo) + FirstParam = FirstFnType->getExtParameterInfo(I); + if (SecondHasInfo) + SecondParam = SecondFnType->getExtParameterInfo(I); + + // Cannot merge unless everything except the noescape flag matches. + if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) + return false; + + bool FirstNoEscape = FirstParam.isNoEscape(); + bool SecondNoEscape = SecondParam.isNoEscape(); + bool IsNoEscape = FirstNoEscape && SecondNoEscape; + NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); + if (NewParamInfos.back().getOpaqueValue()) + NeedParamInfo = true; + if (FirstNoEscape != IsNoEscape) + CanUseFirst = false; + if (SecondNoEscape != IsNoEscape) + CanUseSecond = false; + } + + if (!NeedParamInfo) + NewParamInfos.clear(); + + return true; + } + + void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { + ObjCLayouts[CD] = nullptr; + } + + /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and + /// 'RHS' attributes and returns the merged version; including for function + /// return types. + QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { + QualType LHSCan = getCanonicalType(LHS), + RHSCan = getCanonicalType(RHS); + // If two types are identical, they are compatible. + if (LHSCan == RHSCan) + return LHS; + if (RHSCan->isFunctionType()) { + if (!LHSCan->isFunctionType()) + return {}; + QualType OldReturnType = + cast(RHSCan.getTypePtr())->getReturnType(); + QualType NewReturnType = + cast(LHSCan.getTypePtr())->getReturnType(); + QualType ResReturnType = + mergeObjCGCQualifiers(NewReturnType, OldReturnType); + if (ResReturnType.isNull()) + return {}; + if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { + // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); + // In either case, use OldReturnType to build the new function type. + const auto *F = LHS->castAs(); + if (const auto *FPT = cast(F)) { + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + EPI.ExtInfo = getFunctionExtInfo(LHS); + QualType ResultType = + getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); + return ResultType; + } + } + return {}; + } + + // If the qualifiers are different, the types can still be merged. + Qualifiers LQuals = LHSCan.getLocalQualifiers(); + Qualifiers RQuals = RHSCan.getLocalQualifiers(); + if (LQuals != RQuals) { + // If any of these qualifiers are different, we have a type mismatch. + if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || + LQuals.getAddressSpace() != RQuals.getAddressSpace()) + return {}; + + // Exactly one GC qualifier difference is allowed: __strong is + // okay if the other type has no GC qualifier but is an Objective + // C object pointer (i.e. implicitly strong by default). We fix + // this by pretending that the unqualified type was actually + // qualified __strong. + Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); + Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); + assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); + + if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) + return {}; + + if (GC_L == Qualifiers::Strong) + return LHS; + if (GC_R == Qualifiers::Strong) + return RHS; + return {}; + } + + if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { + QualType LHSBaseQT = LHS->castAs()->getPointeeType(); + QualType RHSBaseQT = RHS->castAs()->getPointeeType(); + QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); + if (ResQT == LHSBaseQT) + return LHS; + if (ResQT == RHSBaseQT) + return RHS; + } + return {}; + } + + //===----------------------------------------------------------------------===// + // Integer Predicates + //===----------------------------------------------------------------------===// + + unsigned ASTContext::getIntWidth(QualType T) const { + if (const auto *ET = T->getAs()) + T = ET->getDecl()->getIntegerType(); + if (T->isBooleanType()) + return 1; + if(const auto *EIT = T->getAs()) + return EIT->getNumBits(); + // For builtin types, just use the standard type sizing method + return (unsigned)getTypeSize(T); + } + + QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { + assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && + "Unexpected type"); + + // Turn <4 x signed int> -> <4 x unsigned int> + if (const auto *VTy = T->getAs()) + return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), + VTy->getNumElements(), VTy->getVectorKind()); + + // For enums, we return the unsigned version of the base type. + if (const auto *ETy = T->getAs()) + T = ETy->getDecl()->getIntegerType(); + + switch (T->castAs()->getKind()) { + case BuiltinType::Char_S: + case BuiltinType::SChar: + return UnsignedCharTy; + case BuiltinType::Short: + return UnsignedShortTy; + case BuiltinType::Int: + return UnsignedIntTy; + case BuiltinType::Long: + return UnsignedLongTy; + case BuiltinType::LongLong: + return UnsignedLongLongTy; + case BuiltinType::Int128: + return UnsignedInt128Ty; + // wchar_t is special. It is either signed or not, but when it's signed, + // there's no matching "unsigned wchar_t". Therefore we return the unsigned + // version of it's underlying type instead. + case BuiltinType::WChar_S: + return getUnsignedWCharType(); + + case BuiltinType::ShortAccum: + return UnsignedShortAccumTy; + case BuiltinType::Accum: + return UnsignedAccumTy; + case BuiltinType::LongAccum: + return UnsignedLongAccumTy; + case BuiltinType::SatShortAccum: + return SatUnsignedShortAccumTy; + case BuiltinType::SatAccum: + return SatUnsignedAccumTy; + case BuiltinType::SatLongAccum: + return SatUnsignedLongAccumTy; + case BuiltinType::ShortFract: + return UnsignedShortFractTy; + case BuiltinType::Fract: + return UnsignedFractTy; + case BuiltinType::LongFract: + return UnsignedLongFractTy; + case BuiltinType::SatShortFract: + return SatUnsignedShortFractTy; + case BuiltinType::SatFract: + return SatUnsignedFractTy; + case BuiltinType::SatLongFract: + return SatUnsignedLongFractTy; + default: + llvm_unreachable("Unexpected signed integer or fixed point type"); + } + } + + ASTMutationListener::~ASTMutationListener() = default; + + void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, + QualType ReturnType) {} + + //===----------------------------------------------------------------------===// + // Builtin Type Computation + //===----------------------------------------------------------------------===// + + /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the + /// pointer over the consumed characters. This returns the resultant type. If + /// AllowTypeModifiers is false then modifier like * are not parsed, just basic + /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of + /// a vector of "i*". + /// + /// RequiresICE is filled in on return to indicate whether the value is required + /// to be an Integer Constant Expression. + static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, + ASTContext::GetBuiltinTypeError &Error, + bool &RequiresICE, + bool AllowTypeModifiers) { + // Modifiers. + int HowLong = 0; + bool Signed = false, Unsigned = false; + RequiresICE = false; + + // Read the prefixed modifiers first. + bool Done = false; + #ifndef NDEBUG + bool IsSpecial = false; + #endif + while (!Done) { + switch (*Str++) { + default: Done = true; --Str; break; + case 'I': + RequiresICE = true; + break; + case 'S': + assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); + assert(!Signed && "Can't use 'S' modifier multiple times!"); + Signed = true; + break; + case 'U': + assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); + assert(!Unsigned && "Can't use 'U' modifier multiple times!"); + Unsigned = true; + break; + case 'L': + assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); + assert(HowLong <= 2 && "Can't have LLLL modifier"); + ++HowLong; + break; + case 'N': + // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. + assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); + assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); + #ifndef NDEBUG + IsSpecial = true; + #endif + if (Context.getTargetInfo().getLongWidth() == 32) + ++HowLong; + break; + case 'W': + // This modifier represents int64 type. + assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); + assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); + #ifndef NDEBUG + IsSpecial = true; + #endif + switch (Context.getTargetInfo().getInt64Type()) { + default: + llvm_unreachable("Unexpected integer type"); + case TargetInfo::SignedLong: + HowLong = 1; + break; + case TargetInfo::SignedLongLong: + HowLong = 2; + break; + } + break; + case 'Z': + // This modifier represents int32 type. + assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); + assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); + #ifndef NDEBUG + IsSpecial = true; + #endif + switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { + default: + llvm_unreachable("Unexpected integer type"); + case TargetInfo::SignedInt: + HowLong = 0; + break; + case TargetInfo::SignedLong: + HowLong = 1; + break; + case TargetInfo::SignedLongLong: + HowLong = 2; + break; + } + break; + case 'O': + assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); + assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); + #ifndef NDEBUG + IsSpecial = true; + #endif + if (Context.getLangOpts().OpenCL) + HowLong = 1; + else + HowLong = 2; + break; + } + } + + QualType Type; + + // Read the base type. + switch (*Str++) { + default: llvm_unreachable("Unknown builtin type letter!"); + case 'y': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'y'!"); + Type = Context.BFloat16Ty; + break; + case 'v': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'v'!"); + Type = Context.VoidTy; + break; + case 'h': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'h'!"); + Type = Context.HalfTy; + break; + case 'f': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'f'!"); + Type = Context.FloatTy; + break; + case 'd': + assert(HowLong < 3 && !Signed && !Unsigned && + "Bad modifiers used with 'd'!"); + if (HowLong == 1) + Type = Context.LongDoubleTy; + else if (HowLong == 2) + Type = Context.Float128Ty; + else + Type = Context.DoubleTy; + break; + case 's': + assert(HowLong == 0 && "Bad modifiers used with 's'!"); + if (Unsigned) + Type = Context.UnsignedShortTy; + else + Type = Context.ShortTy; + break; + case 'i': + if (HowLong == 3) + Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; + else if (HowLong == 2) + Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; + else if (HowLong == 1) + Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; + else + Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; + break; + case 'c': + assert(HowLong == 0 && "Bad modifiers used with 'c'!"); + if (Signed) + Type = Context.SignedCharTy; + else if (Unsigned) + Type = Context.UnsignedCharTy; + else + Type = Context.CharTy; + break; + case 'b': // boolean + assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); + Type = Context.BoolTy; + break; + case 'z': // size_t. + assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); + Type = Context.getSizeType(); + break; + case 'w': // wchar_t. + assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); + Type = Context.getWideCharType(); + break; + case 'F': + Type = Context.getCFConstantStringType(); + break; + case 'G': + Type = Context.getObjCIdType(); + break; + case 'H': + Type = Context.getObjCSelType(); + break; + case 'M': + Type = Context.getObjCSuperType(); + break; + case 'a': + Type = Context.getBuiltinVaListType(); + assert(!Type.isNull() && "builtin va list type not initialized!"); + break; + case 'A': + // This is a "reference" to a va_list; however, what exactly + // this means depends on how va_list is defined. There are two + // different kinds of va_list: ones passed by value, and ones + // passed by reference. An example of a by-value va_list is + // x86, where va_list is a char*. An example of by-ref va_list + // is x86-64, where va_list is a __va_list_tag[1]. For x86, + // we want this argument to be a char*&; for x86-64, we want + // it to be a __va_list_tag*. + Type = Context.getBuiltinVaListType(); + assert(!Type.isNull() && "builtin va list type not initialized!"); + if (Type->isArrayType()) + Type = Context.getArrayDecayedType(Type); + else + Type = Context.getLValueReferenceType(Type); + break; + case 'q': { + char *End; + unsigned NumElements = strtoul(Str, &End, 10); + assert(End != Str && "Missing vector size"); + Str = End; + + QualType ElementType = DecodeTypeFromStr(Str, Context, Error, + RequiresICE, false); + assert(!RequiresICE && "Can't require vector ICE"); + + Type = Context.getScalableVectorType(ElementType, NumElements); + break; + } + case 'V': { + char *End; + unsigned NumElements = strtoul(Str, &End, 10); + assert(End != Str && "Missing vector size"); + Str = End; + + QualType ElementType = DecodeTypeFromStr(Str, Context, Error, + RequiresICE, false); + assert(!RequiresICE && "Can't require vector ICE"); + + // TODO: No way to make AltiVec vectors in builtins yet. + Type = Context.getVectorType(ElementType, NumElements, + VectorType::GenericVector); + break; + } + case 'E': { + char *End; + + unsigned NumElements = strtoul(Str, &End, 10); + assert(End != Str && "Missing vector size"); + + Str = End; + + QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, + false); + Type = Context.getExtVectorType(ElementType, NumElements); + break; + } + case 'X': { + QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, + false); + assert(!RequiresICE && "Can't require complex ICE"); + Type = Context.getComplexType(ElementType); + break; + } + case 'Y': + Type = Context.getPointerDiffType(); + break; + case 'P': + Type = Context.getFILEType(); + if (Type.isNull()) { + Error = ASTContext::GE_Missing_stdio; + return {}; + } + break; + case 'J': + if (Signed) + Type = Context.getsigjmp_bufType(); + else + Type = Context.getjmp_bufType(); + + if (Type.isNull()) { + Error = ASTContext::GE_Missing_setjmp; + return {}; + } + break; + case 'K': + assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); + Type = Context.getucontext_tType(); + + if (Type.isNull()) { + Error = ASTContext::GE_Missing_ucontext; + return {}; + } + break; + case 'p': + Type = Context.getProcessIDType(); + break; + } + + // If there are modifiers and if we're allowed to parse them, go for it. + Done = !AllowTypeModifiers; + while (!Done) { + switch (char c = *Str++) { + default: Done = true; --Str; break; + case '*': + case '&': { + // Both pointers and references can have their pointee types + // qualified with an address space. + char *End; + unsigned AddrSpace = strtoul(Str, &End, 10); + if (End != Str) { + // Note AddrSpace == 0 is not the same as an unspecified address space. + Type = Context.getAddrSpaceQualType( + Type, + Context.getLangASForBuiltinAddressSpace(AddrSpace)); + Str = End; + } + if (c == '*') + Type = Context.getPointerType(Type); + else + Type = Context.getLValueReferenceType(Type); + break; + } + // FIXME: There's no way to have a built-in with an rvalue ref arg. + case 'C': + Type = Type.withConst(); + break; + case 'D': + Type = Context.getVolatileType(Type); + break; + case 'R': + Type = Type.withRestrict(); + break; + } + } + + assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && + "Integer constant 'I' type must be an integer"); + + return Type; + } + + // On some targets such as PowerPC, some of the builtins are defined with custom + // type decriptors for target-dependent types. These descriptors are decoded in + // other functions, but it may be useful to be able to fall back to default + // descriptor decoding to define builtins mixing target-dependent and target- + // independent types. This function allows decoding one type descriptor with + // default decoding. + QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, + GetBuiltinTypeError &Error, bool &RequireICE, + bool AllowTypeModifiers) const { + return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); + } + + /// GetBuiltinType - Return the type for the specified builtin. + QualType ASTContext::GetBuiltinType(unsigned Id, + GetBuiltinTypeError &Error, + unsigned *IntegerConstantArgs) const { + const char *TypeStr = BuiltinInfo.getTypeString(Id); + if (TypeStr[0] == '\0') { + Error = GE_Missing_type; + return {}; + } + + SmallVector ArgTypes; + + bool RequiresICE = false; + Error = GE_None; + QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, + RequiresICE, true); + if (Error != GE_None) + return {}; + + assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); + + while (TypeStr[0] && TypeStr[0] != '.') { + QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); + if (Error != GE_None) + return {}; + + // If this argument is required to be an IntegerConstantExpression and the + // caller cares, fill in the bitmask we return. + if (RequiresICE && IntegerConstantArgs) + *IntegerConstantArgs |= 1 << ArgTypes.size(); + + // Do array -> pointer decay. The builtin should use the decayed type. + if (Ty->isArrayType()) + Ty = getArrayDecayedType(Ty); + + ArgTypes.push_back(Ty); + } + + if (Id == Builtin::BI__GetExceptionInfo) + return {}; + + assert((TypeStr[0] != '.' || TypeStr[1] == 0) && + "'.' should only occur at end of builtin type list!"); + + bool Variadic = (TypeStr[0] == '.'); + + FunctionType::ExtInfo EI(getDefaultCallingConvention( + Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); + if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); + + + // We really shouldn't be making a no-proto type here. + if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) + return getFunctionNoProtoType(ResType, EI); + + FunctionProtoType::ExtProtoInfo EPI; + EPI.ExtInfo = EI; + EPI.Variadic = Variadic; + if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) + EPI.ExceptionSpec.Type = + getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; + + return getFunctionType(ResType, ArgTypes, EPI); + } + + static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, + const FunctionDecl *FD) { + if (!FD->isExternallyVisible()) + return GVA_Internal; + + // Non-user-provided functions get emitted as weak definitions with every + // use, no matter whether they've been explicitly instantiated etc. + if (const auto *MD = dyn_cast(FD)) + if (!MD->isUserProvided()) + return GVA_DiscardableODR; + + GVALinkage External; + switch (FD->getTemplateSpecializationKind()) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + External = GVA_StrongExternal; + break; + + case TSK_ExplicitInstantiationDefinition: + return GVA_StrongODR; + + // C++11 [temp.explicit]p10: + // [ Note: The intent is that an inline function that is the subject of + // an explicit instantiation declaration will still be implicitly + // instantiated when used so that the body can be considered for + // inlining, but that no out-of-line copy of the inline function would be + // generated in the translation unit. -- end note ] + case TSK_ExplicitInstantiationDeclaration: + return GVA_AvailableExternally; + + case TSK_ImplicitInstantiation: + External = GVA_DiscardableODR; + break; + } + + if (!FD->isInlined()) + return External; + + if ((!Context.getLangOpts().CPlusPlus && + !Context.getTargetInfo().getCXXABI().isMicrosoft() && + !FD->hasAttr()) || + FD->hasAttr()) { + // FIXME: This doesn't match gcc's behavior for dllexport inline functions. + + // GNU or C99 inline semantics. Determine whether this symbol should be + // externally visible. + if (FD->isInlineDefinitionExternallyVisible()) + return External; + + // C99 inline semantics, where the symbol is not externally visible. + return GVA_AvailableExternally; + } + + // Functions specified with extern and inline in -fms-compatibility mode + // forcibly get emitted. While the body of the function cannot be later + // replaced, the function definition cannot be discarded. + if (FD->isMSExternInline()) + return GVA_StrongODR; + + return GVA_DiscardableODR; + } + + static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, + const Decl *D, GVALinkage L) { + // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx + // dllexport/dllimport on inline functions. + if (D->hasAttr()) { + if (L == GVA_DiscardableODR || L == GVA_StrongODR) + return GVA_AvailableExternally; + } else if (D->hasAttr()) { + if (L == GVA_DiscardableODR) + return GVA_StrongODR; + } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { + // Device-side functions with __global__ attribute must always be + // visible externally so they can be launched from host. + if (D->hasAttr() && + (L == GVA_DiscardableODR || L == GVA_Internal)) + return GVA_StrongODR; + // Single source offloading languages like CUDA/HIP need to be able to + // access static device variables from host code of the same compilation + // unit. This is done by externalizing the static variable with a shared + // name between the host and device compilation which is the same for the + // same compilation unit whereas different among different compilation + // units. + if (Context.shouldExternalizeStaticVar(D)) + return GVA_StrongExternal; + } + return L; + } + + /// Adjust the GVALinkage for a declaration based on what an external AST source + /// knows about whether there can be other definitions of this declaration. + static GVALinkage + adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, + GVALinkage L) { + ExternalASTSource *Source = Ctx.getExternalSource(); + if (!Source) + return L; + + switch (Source->hasExternalDefinitions(D)) { + case ExternalASTSource::EK_Never: + // Other translation units rely on us to provide the definition. + if (L == GVA_DiscardableODR) + return GVA_StrongODR; + break; + + case ExternalASTSource::EK_Always: + return GVA_AvailableExternally; + + case ExternalASTSource::EK_ReplyHazy: + break; + } + return L; + } + + GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { + return adjustGVALinkageForExternalDefinitionKind(*this, FD, + adjustGVALinkageForAttributes(*this, FD, + basicGVALinkageForFunction(*this, FD))); + } + + static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, + const VarDecl *VD) { + if (!VD->isExternallyVisible()) + return GVA_Internal; + + if (VD->isStaticLocal()) { + const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); + while (LexicalContext && !isa(LexicalContext)) + LexicalContext = LexicalContext->getLexicalParent(); + + // ObjC Blocks can create local variables that don't have a FunctionDecl + // LexicalContext. + if (!LexicalContext) + return GVA_DiscardableODR; + + // Otherwise, let the static local variable inherit its linkage from the + // nearest enclosing function. + auto StaticLocalLinkage = + Context.GetGVALinkageForFunction(cast(LexicalContext)); + + // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must + // be emitted in any object with references to the symbol for the object it + // contains, whether inline or out-of-line." + // Similar behavior is observed with MSVC. An alternative ABI could use + // StrongODR/AvailableExternally to match the function, but none are + // known/supported currently. + if (StaticLocalLinkage == GVA_StrongODR || + StaticLocalLinkage == GVA_AvailableExternally) + return GVA_DiscardableODR; + return StaticLocalLinkage; + } + + // MSVC treats in-class initialized static data members as definitions. + // By giving them non-strong linkage, out-of-line definitions won't + // cause link errors. + if (Context.isMSStaticDataMemberInlineDefinition(VD)) + return GVA_DiscardableODR; + + // Most non-template variables have strong linkage; inline variables are + // linkonce_odr or (occasionally, for compatibility) weak_odr. + GVALinkage StrongLinkage; + switch (Context.getInlineVariableDefinitionKind(VD)) { + case ASTContext::InlineVariableDefinitionKind::None: + StrongLinkage = GVA_StrongExternal; + break; + case ASTContext::InlineVariableDefinitionKind::Weak: + case ASTContext::InlineVariableDefinitionKind::WeakUnknown: + StrongLinkage = GVA_DiscardableODR; + break; + case ASTContext::InlineVariableDefinitionKind::Strong: + StrongLinkage = GVA_StrongODR; + break; + } + + switch (VD->getTemplateSpecializationKind()) { + case TSK_Undeclared: + return StrongLinkage; + + case TSK_ExplicitSpecialization: + return Context.getTargetInfo().getCXXABI().isMicrosoft() && + VD->isStaticDataMember() + ? GVA_StrongODR + : StrongLinkage; + + case TSK_ExplicitInstantiationDefinition: + return GVA_StrongODR; + + case TSK_ExplicitInstantiationDeclaration: + return GVA_AvailableExternally; + + case TSK_ImplicitInstantiation: + return GVA_DiscardableODR; + } + + llvm_unreachable("Invalid Linkage!"); + } + + GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { + return adjustGVALinkageForExternalDefinitionKind(*this, VD, + adjustGVALinkageForAttributes(*this, VD, + basicGVALinkageForVariable(*this, VD))); + } + + bool ASTContext::DeclMustBeEmitted(const Decl *D) { + if (const auto *VD = dyn_cast(D)) { + if (!VD->isFileVarDecl()) + return false; + // Global named register variables (GNU extension) are never emitted. + if (VD->getStorageClass() == SC_Register) + return false; + if (VD->getDescribedVarTemplate() || + isa(VD)) + return false; + } else if (const auto *FD = dyn_cast(D)) { + // We never need to emit an uninstantiated function template. + if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) + return false; + } else if (isa(D)) + return true; + else if (isa(D)) + return true; + else if (isa(D)) + return true; + else if (isa(D)) + return !D->getDeclContext()->isDependentContext(); + else if (isa(D)) + return !D->getDeclContext()->isDependentContext(); + else if (isa(D) || isa(D)) + return !D->getDeclContext()->isDependentContext(); + else if (isa(D)) + return true; + else + return false; + + // If this is a member of a class template, we do not need to emit it. + if (D->getDeclContext()->isDependentContext()) + return false; + + // Weak references don't produce any output by themselves. + if (D->hasAttr()) + return false; + + // Aliases and used decls are required. + if (D->hasAttr() || D->hasAttr()) + return true; + + if (const auto *FD = dyn_cast(D)) { + // Forward declarations aren't required. + if (!FD->doesThisDeclarationHaveABody()) + return FD->doesDeclarationForceExternallyVisibleDefinition(); + + // Constructors and destructors are required. + if (FD->hasAttr() || FD->hasAttr()) + return true; + + // The key function for a class is required. This rule only comes + // into play when inline functions can be key functions, though. + if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { + if (const auto *MD = dyn_cast(FD)) { + const CXXRecordDecl *RD = MD->getParent(); + if (MD->isOutOfLine() && RD->isDynamicClass()) { + const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); + if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) + return true; + } + } + } + + GVALinkage Linkage = GetGVALinkageForFunction(FD); + + // static, static inline, always_inline, and extern inline functions can + // always be deferred. Normal inline functions can be deferred in C99/C++. + // Implicit template instantiations can also be deferred in C++. + return !isDiscardableGVALinkage(Linkage); + } + + const auto *VD = cast(D); + assert(VD->isFileVarDecl() && "Expected file scoped var"); + + // If the decl is marked as `declare target to`, it should be emitted for the + // host and for the device. + if (LangOpts.OpenMP && + OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) + return true; + + if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && + !isMSStaticDataMemberInlineDefinition(VD)) + return false; + + // Variables that can be needed in other TUs are required. + auto Linkage = GetGVALinkageForVariable(VD); + if (!isDiscardableGVALinkage(Linkage)) + return true; + + // We never need to emit a variable that is available in another TU. + if (Linkage == GVA_AvailableExternally) + return false; + + // Variables that have destruction with side-effects are required. + if (VD->needsDestruction(*this)) + return true; + + // Variables that have initialization with side-effects are required. + if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && + // We can get a value-dependent initializer during error recovery. + (VD->getInit()->isValueDependent() || !VD->evaluateValue())) + return true; + + // Likewise, variables with tuple-like bindings are required if their + // bindings have side-effects. + if (const auto *DD = dyn_cast(VD)) + for (const auto *BD : DD->bindings()) + if (const auto *BindingVD = BD->getHoldingVar()) + if (DeclMustBeEmitted(BindingVD)) + return true; + + return false; + } + + void ASTContext::forEachMultiversionedFunctionVersion( + const FunctionDecl *FD, + llvm::function_ref Pred) const { + assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); + llvm::SmallDenseSet SeenDecls; + FD = FD->getMostRecentDecl(); + // FIXME: The order of traversal here matters and depends on the order of + // lookup results, which happens to be (mostly) oldest-to-newest, but we + // shouldn't rely on that. + for (auto *CurDecl : + FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { + FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); + if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && + std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { + SeenDecls.insert(CurFD); + Pred(CurFD); + } + } + } + + CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, + bool IsCXXMethod, + bool IsBuiltin) const { + // Pass through to the C++ ABI object + if (IsCXXMethod) + return ABI->getDefaultMethodCallConv(IsVariadic); + + // Builtins ignore user-specified default calling convention and remain the + // Target's default calling convention. + if (!IsBuiltin) { + switch (LangOpts.getDefaultCallingConv()) { + case LangOptions::DCC_None: + break; + case LangOptions::DCC_CDecl: + return CC_C; + case LangOptions::DCC_FastCall: + if (getTargetInfo().hasFeature("sse2") && !IsVariadic) + return CC_X86FastCall; + break; + case LangOptions::DCC_StdCall: + if (!IsVariadic) + return CC_X86StdCall; + break; + case LangOptions::DCC_VectorCall: + // __vectorcall cannot be applied to variadic functions. + if (!IsVariadic) + return CC_X86VectorCall; + break; + case LangOptions::DCC_RegCall: + // __regcall cannot be applied to variadic functions. + if (!IsVariadic) + return CC_X86RegCall; + break; + } + } + return Target->getDefaultCallingConv(); + } + + bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { + // Pass through to the C++ ABI object + return ABI->isNearlyEmpty(RD); + } + + VTableContextBase *ASTContext::getVTableContext() { + if (!VTContext.get()) { + auto ABI = Target->getCXXABI(); + if (ABI.isMicrosoft()) + VTContext.reset(new MicrosoftVTableContext(*this)); + else { + auto ComponentLayout = getLangOpts().RelativeCXXABIVTables + ? ItaniumVTableContext::Relative + : ItaniumVTableContext::Pointer; + VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); + } + } + return VTContext.get(); + } + + MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { + if (!T) + T = Target; + switch (T->getCXXABI().getKind()) { + case TargetCXXABI::AppleARM64: + case TargetCXXABI::Fuchsia: + case TargetCXXABI::GenericAArch64: + case TargetCXXABI::GenericItanium: + case TargetCXXABI::GenericARM: + case TargetCXXABI::GenericMIPS: + case TargetCXXABI::iOS: + case TargetCXXABI::WebAssembly: + case TargetCXXABI::WatchOS: + case TargetCXXABI::XL: + return ItaniumMangleContext::create(*this, getDiagnostics()); + case TargetCXXABI::Microsoft: + return MicrosoftMangleContext::create(*this, getDiagnostics()); + } + llvm_unreachable("Unsupported ABI"); + } + + CXXABI::~CXXABI() = default; + + size_t ASTContext::getSideTableAllocatedMemory() const { + return ASTRecordLayouts.getMemorySize() + + llvm::capacity_in_bytes(ObjCLayouts) + + llvm::capacity_in_bytes(KeyFunctions) + + llvm::capacity_in_bytes(ObjCImpls) + + llvm::capacity_in_bytes(BlockVarCopyInits) + + llvm::capacity_in_bytes(DeclAttrs) + + llvm::capacity_in_bytes(TemplateOrInstantiation) + + llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + + llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + + llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + + llvm::capacity_in_bytes(OverriddenMethods) + + llvm::capacity_in_bytes(Types) + + llvm::capacity_in_bytes(VariableArrayTypes); + } + + /// getIntTypeForBitwidth - + /// sets integer QualTy according to specified details: + /// bitwidth, signed/unsigned. + /// Returns empty type if there is no appropriate target types. + QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, + unsigned Signed) const { + TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); + CanQualType QualTy = getFromTargetType(Ty); + if (!QualTy && DestWidth == 128) + return Signed ? Int128Ty : UnsignedInt128Ty; + return QualTy; + } + + /// getRealTypeForBitwidth - + /// sets floating point QualTy according to specified bitwidth. + /// Returns empty type if there is no appropriate target types. + QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, + bool ExplicitIEEE) const { + TargetInfo::RealType Ty = + getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE); + switch (Ty) { + case TargetInfo::Float: + return FloatTy; + case TargetInfo::Double: + return DoubleTy; + case TargetInfo::LongDouble: + return LongDoubleTy; + case TargetInfo::Float128: + return Float128Ty; + case TargetInfo::NoFloat: + return {}; + } + + llvm_unreachable("Unhandled TargetInfo::RealType value"); + } + + void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { + if (Number > 1) + MangleNumbers[ND] = Number; + } + + unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { + auto I = MangleNumbers.find(ND); + return I != MangleNumbers.end() ? I->second : 1; + } + + void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { + if (Number > 1) + StaticLocalNumbers[VD] = Number; + } + + unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { + auto I = StaticLocalNumbers.find(VD); + return I != StaticLocalNumbers.end() ? I->second : 1; + } + + MangleNumberingContext & + ASTContext::getManglingNumberContext(const DeclContext *DC) { + assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. + std::unique_ptr &MCtx = MangleNumberingContexts[DC]; + if (!MCtx) + MCtx = createMangleNumberingContext(); + return *MCtx; + } + + MangleNumberingContext & + ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { + assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. + std::unique_ptr &MCtx = + ExtraMangleNumberingContexts[D]; + if (!MCtx) + MCtx = createMangleNumberingContext(); + return *MCtx; + } + + std::unique_ptr + ASTContext::createMangleNumberingContext() const { + return ABI->createMangleNumberingContext(); + } + + const CXXConstructorDecl * + ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { + return ABI->getCopyConstructorForExceptionObject( + cast(RD->getFirstDecl())); + } + + void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, + CXXConstructorDecl *CD) { + return ABI->addCopyConstructorForExceptionObject( + cast(RD->getFirstDecl()), + cast(CD->getFirstDecl())); + } + + void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, + TypedefNameDecl *DD) { + return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); + } + + TypedefNameDecl * + ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { + return ABI->getTypedefNameForUnnamedTagDecl(TD); + } + + void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, + DeclaratorDecl *DD) { + return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); + } + + DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { + return ABI->getDeclaratorForUnnamedTagDecl(TD); + } + + void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { + ParamIndices[D] = index; + } + + unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { + ParameterIndexTable::const_iterator I = ParamIndices.find(D); + assert(I != ParamIndices.end() && + "ParmIndices lacks entry set by ParmVarDecl"); + return I->second; + } + + QualType ASTContext::getStringLiteralArrayType(QualType EltTy, + unsigned Length) const { + // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). + if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) + EltTy = EltTy.withConst(); + + EltTy = adjustStringLiteralBaseType(EltTy); + + // Get an array type for the string, according to C99 6.4.5. This includes + // the null terminator character. + return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, + ArrayType::Normal, /*IndexTypeQuals*/ 0); + } + + StringLiteral * + ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { + StringLiteral *&Result = StringLiteralCache[Key]; + if (!Result) + Result = StringLiteral::Create( + *this, Key, StringLiteral::Ascii, + /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), + SourceLocation()); + return Result; + } + + MSGuidDecl * + ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { + assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); + + llvm::FoldingSetNodeID ID; + MSGuidDecl::Profile(ID, Parts); + + void *InsertPos; + if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) + return Existing; + + QualType GUIDType = getMSGuidType().withConst(); + MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); + MSGuidDecls.InsertNode(New, InsertPos); + return New; + } + + TemplateParamObjectDecl * + ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { + assert(T->isRecordType() && "template param object of unexpected type"); + + // C++ [temp.param]p8: + // [...] a static storage duration object of type 'const T' [...] + T.addConst(); + + llvm::FoldingSetNodeID ID; + TemplateParamObjectDecl::Profile(ID, T, V); + + void *InsertPos; + if (TemplateParamObjectDecl *Existing = + TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) + return Existing; + + TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); + TemplateParamObjectDecls.InsertNode(New, InsertPos); + return New; + } + + bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { + const llvm::Triple &T = getTargetInfo().getTriple(); + if (!T.isOSDarwin()) + return false; + + if (!(T.isiOS() && T.isOSVersionLT(7)) && + !(T.isMacOSX() && T.isOSVersionLT(10, 9))) + return false; + + QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); + CharUnits sizeChars = getTypeSizeInChars(AtomicTy); + uint64_t Size = sizeChars.getQuantity(); + CharUnits alignChars = getTypeAlignInChars(AtomicTy); + unsigned Align = alignChars.getQuantity(); + unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); + return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); + } + + bool + ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, + const ObjCMethodDecl *MethodImpl) { + // No point trying to match an unavailable/deprecated mothod. + if (MethodDecl->hasAttr() + || MethodDecl->hasAttr()) + return false; + if (MethodDecl->getObjCDeclQualifier() != + MethodImpl->getObjCDeclQualifier()) + return false; + if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) + return false; + + if (MethodDecl->param_size() != MethodImpl->param_size()) + return false; + + for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), + IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), + EF = MethodDecl->param_end(); + IM != EM && IF != EF; ++IM, ++IF) { + const ParmVarDecl *DeclVar = (*IF); + const ParmVarDecl *ImplVar = (*IM); + if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) + return false; + if (!hasSameType(DeclVar->getType(), ImplVar->getType())) + return false; + } + + return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); + } + + uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { + LangAS AS; + if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) + AS = LangAS::Default; + else + AS = QT->getPointeeType().getAddressSpace(); + + return getTargetInfo().getNullPointerValue(AS); + } + + unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { + if (isTargetAddressSpace(AS)) + return toTargetAddressSpace(AS); + else + return (*AddrSpaceMap)[(unsigned)AS]; + } + + QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { + assert(Ty->isFixedPointType()); + + if (Ty->isSaturatedFixedPointType()) return Ty; + + switch (Ty->castAs()->getKind()) { + default: + llvm_unreachable("Not a fixed point type!"); + case BuiltinType::ShortAccum: + return SatShortAccumTy; + case BuiltinType::Accum: + return SatAccumTy; + case BuiltinType::LongAccum: + return SatLongAccumTy; + case BuiltinType::UShortAccum: + return SatUnsignedShortAccumTy; + case BuiltinType::UAccum: + return SatUnsignedAccumTy; + case BuiltinType::ULongAccum: + return SatUnsignedLongAccumTy; + case BuiltinType::ShortFract: + return SatShortFractTy; + case BuiltinType::Fract: + return SatFractTy; + case BuiltinType::LongFract: + return SatLongFractTy; + case BuiltinType::UShortFract: + return SatUnsignedShortFractTy; + case BuiltinType::UFract: + return SatUnsignedFractTy; + case BuiltinType::ULongFract: + return SatUnsignedLongFractTy; + } + } + + LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { + if (LangOpts.OpenCL) + return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); + + if (LangOpts.CUDA) + return getTargetInfo().getCUDABuiltinAddressSpace(AS); + + return getLangASFromTargetAS(AS); + } + + // Explicitly instantiate this in case a Redeclarable is used from a TU that + // doesn't include ASTContext.h + template + clang::LazyGenerationalUpdatePtr< + const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType + clang::LazyGenerationalUpdatePtr< + const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( + const clang::ASTContext &Ctx, Decl *Value); + + unsigned char ASTContext::getFixedPointScale(QualType Ty) const { + assert(Ty->isFixedPointType()); + + const TargetInfo &Target = getTargetInfo(); + switch (Ty->castAs()->getKind()) { + default: + llvm_unreachable("Not a fixed point type!"); + case BuiltinType::ShortAccum: + case BuiltinType::SatShortAccum: + return Target.getShortAccumScale(); + case BuiltinType::Accum: + case BuiltinType::SatAccum: + return Target.getAccumScale(); + case BuiltinType::LongAccum: + case BuiltinType::SatLongAccum: + return Target.getLongAccumScale(); + case BuiltinType::UShortAccum: + case BuiltinType::SatUShortAccum: + return Target.getUnsignedShortAccumScale(); + case BuiltinType::UAccum: + case BuiltinType::SatUAccum: + return Target.getUnsignedAccumScale(); + case BuiltinType::ULongAccum: + case BuiltinType::SatULongAccum: + return Target.getUnsignedLongAccumScale(); + case BuiltinType::ShortFract: + case BuiltinType::SatShortFract: + return Target.getShortFractScale(); + case BuiltinType::Fract: + case BuiltinType::SatFract: + return Target.getFractScale(); + case BuiltinType::LongFract: + case BuiltinType::SatLongFract: + return Target.getLongFractScale(); + case BuiltinType::UShortFract: + case BuiltinType::SatUShortFract: + return Target.getUnsignedShortFractScale(); + case BuiltinType::UFract: + case BuiltinType::SatUFract: + return Target.getUnsignedFractScale(); + case BuiltinType::ULongFract: + case BuiltinType::SatULongFract: + return Target.getUnsignedLongFractScale(); + } + } + + unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { + assert(Ty->isFixedPointType()); + + const TargetInfo &Target = getTargetInfo(); + switch (Ty->castAs()->getKind()) { + default: + llvm_unreachable("Not a fixed point type!"); + case BuiltinType::ShortAccum: + case BuiltinType::SatShortAccum: + return Target.getShortAccumIBits(); + case BuiltinType::Accum: + case BuiltinType::SatAccum: + return Target.getAccumIBits(); + case BuiltinType::LongAccum: + case BuiltinType::SatLongAccum: + return Target.getLongAccumIBits(); + case BuiltinType::UShortAccum: + case BuiltinType::SatUShortAccum: + return Target.getUnsignedShortAccumIBits(); + case BuiltinType::UAccum: + case BuiltinType::SatUAccum: + return Target.getUnsignedAccumIBits(); + case BuiltinType::ULongAccum: + case BuiltinType::SatULongAccum: + return Target.getUnsignedLongAccumIBits(); + case BuiltinType::ShortFract: + case BuiltinType::SatShortFract: + case BuiltinType::Fract: + case BuiltinType::SatFract: + case BuiltinType::LongFract: + case BuiltinType::SatLongFract: + case BuiltinType::UShortFract: + case BuiltinType::SatUShortFract: + case BuiltinType::UFract: + case BuiltinType::SatUFract: + case BuiltinType::ULongFract: + case BuiltinType::SatULongFract: + return 0; + } + } + + llvm::FixedPointSemantics + ASTContext::getFixedPointSemantics(QualType Ty) const { + assert((Ty->isFixedPointType() || Ty->isIntegerType()) && + "Can only get the fixed point semantics for a " + "fixed point or integer type."); + if (Ty->isIntegerType()) + return llvm::FixedPointSemantics::GetIntegerSemantics( + getIntWidth(Ty), Ty->isSignedIntegerType()); + + bool isSigned = Ty->isSignedFixedPointType(); + return llvm::FixedPointSemantics( + static_cast(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, + Ty->isSaturatedFixedPointType(), + !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); + } + + llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { + assert(Ty->isFixedPointType()); + return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); + } + + llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { + assert(Ty->isFixedPointType()); + return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); + } + + QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { + assert(Ty->isUnsignedFixedPointType() && + "Expected unsigned fixed point type"); + + switch (Ty->castAs()->getKind()) { + case BuiltinType::UShortAccum: + return ShortAccumTy; + case BuiltinType::UAccum: + return AccumTy; + case BuiltinType::ULongAccum: + return LongAccumTy; + case BuiltinType::SatUShortAccum: + return SatShortAccumTy; + case BuiltinType::SatUAccum: + return SatAccumTy; + case BuiltinType::SatULongAccum: + return SatLongAccumTy; + case BuiltinType::UShortFract: + return ShortFractTy; + case BuiltinType::UFract: + return FractTy; + case BuiltinType::ULongFract: + return LongFractTy; + case BuiltinType::SatUShortFract: + return SatShortFractTy; + case BuiltinType::SatUFract: + return SatFractTy; + case BuiltinType::SatULongFract: + return SatLongFractTy; + default: + llvm_unreachable("Unexpected unsigned fixed point type"); + } + } + + ParsedTargetAttr + ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { + assert(TD != nullptr); + ParsedTargetAttr ParsedAttr = TD->parse(); + + ParsedAttr.Features.erase( + llvm::remove_if(ParsedAttr.Features, + [&](const std::string &Feat) { + return !Target->isValidFeatureName( + StringRef{Feat}.substr(1)); + }), + ParsedAttr.Features.end()); + return ParsedAttr; + } + + void ASTContext::getFunctionFeatureMap(llvm::StringMap &FeatureMap, + const FunctionDecl *FD) const { + if (FD) + getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); + else + Target->initFeatureMap(FeatureMap, getDiagnostics(), + Target->getTargetOpts().CPU, + Target->getTargetOpts().Features); + } + + // Fills in the supplied string map with the set of target features for the + // passed in function. + void ASTContext::getFunctionFeatureMap(llvm::StringMap &FeatureMap, + GlobalDecl GD) const { + StringRef TargetCPU = Target->getTargetOpts().CPU; + const FunctionDecl *FD = GD.getDecl()->getAsFunction(); + if (const auto *TD = FD->getAttr()) { + ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); + + // Make a copy of the features as passed on the command line into the + // beginning of the additional features from the function to override. + ParsedAttr.Features.insert( + ParsedAttr.Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); + + if (ParsedAttr.Architecture != "" && + Target->isValidCPUName(ParsedAttr.Architecture)) + TargetCPU = ParsedAttr.Architecture; + + // Now populate the feature map, first with the TargetCPU which is either + // the default or a new one from the target attribute string. Then we'll use + // the passed in features (FeaturesAsWritten) along with the new ones from + // the attribute. + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, + ParsedAttr.Features); + } else if (const auto *SD = FD->getAttr()) { + llvm::SmallVector FeaturesTmp; + Target->getCPUSpecificCPUDispatchFeatures( + SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); + std::vector Features(FeaturesTmp.begin(), FeaturesTmp.end()); + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); + } else { + FeatureMap = Target->getTargetOpts().FeatureMap; + } + } + + OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { + OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); + return *OMPTraitInfoVector.back(); + } + + const StreamingDiagnostic &clang:: + operator<<(const StreamingDiagnostic &DB, + const ASTContext::SectionInfo &Section) { + if (Section.Decl) + return DB << Section.Decl; + return DB << "a prior #pragma section"; + } + + bool ASTContext::mayExternalizeStaticVar(const Decl *D) const { + bool IsStaticVar = + isa(D) && cast(D)->getStorageClass() == SC_Static; + bool IsExplicitDeviceVar = (D->hasAttr() && + !D->getAttr()->isImplicit()) || + (D->hasAttr() && + !D->getAttr()->isImplicit()); + // CUDA/HIP: static managed variables need to be externalized since it is + // a declaration in IR, therefore cannot have internal linkage. + return IsStaticVar && + (D->hasAttr() || IsExplicitDeviceVar); + } + + bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const { + return mayExternalizeStaticVar(D) && + (D->hasAttr() || + CUDAStaticDeviceVarReferencedByHost.count(cast(D))); + } + + StringRef ASTContext::getCUIDHash() const { + if (!CUIDHash.empty()) + return CUIDHash; + if (LangOpts.CUID.empty()) + return StringRef(); + CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); + return CUIDHash; + } +diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp +index bc3c607dd74e..3e7c2a25a686 100644 +--- a/clang/lib/Basic/TargetInfo.cpp ++++ b/clang/lib/Basic/TargetInfo.cpp +@@ -1,850 +1,851 @@ + //===--- TargetInfo.cpp - Information about Target machine ----------------===// + // + // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + // See https://llvm.org/LICENSE.txt for license information. + // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + // + //===----------------------------------------------------------------------===// + // + // This file implements the TargetInfo and TargetInfoImpl interfaces. + // + //===----------------------------------------------------------------------===// + + #include "clang/Basic/TargetInfo.h" + #include "clang/Basic/AddressSpaces.h" + #include "clang/Basic/CharInfo.h" + #include "clang/Basic/Diagnostic.h" + #include "clang/Basic/LangOptions.h" + #include "llvm/ADT/APFloat.h" + #include "llvm/ADT/STLExtras.h" + #include "llvm/IR/DataLayout.h" + #include "llvm/Support/ErrorHandling.h" + #include "llvm/Support/TargetParser.h" + #include + using namespace clang; + + static const LangASMap DefaultAddrSpaceMap = {0}; + + // TargetInfo Constructor. + TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) { + // Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or + // SPARC. These should be overridden by concrete targets as needed. + BigEndian = !T.isLittleEndian(); + TLSSupported = true; + VLASupported = true; + NoAsmVariants = false; + HasLegalHalfType = false; + HasFloat128 = false; + HasFloat16 = false; + HasBFloat16 = false; + HasStrictFP = false; + PointerWidth = PointerAlign = 32; + BoolWidth = BoolAlign = 8; + IntWidth = IntAlign = 32; + LongWidth = LongAlign = 32; + LongLongWidth = LongLongAlign = 64; + + // Fixed point default bit widths + ShortAccumWidth = ShortAccumAlign = 16; + AccumWidth = AccumAlign = 32; + LongAccumWidth = LongAccumAlign = 64; + ShortFractWidth = ShortFractAlign = 8; + FractWidth = FractAlign = 16; + LongFractWidth = LongFractAlign = 32; + + // Fixed point default integral and fractional bit sizes + // We give the _Accum 1 fewer fractional bits than their corresponding _Fract + // types by default to have the same number of fractional bits between _Accum + // and _Fract types. + PaddingOnUnsignedFixedPoint = false; + ShortAccumScale = 7; + AccumScale = 15; + LongAccumScale = 31; + + SuitableAlign = 64; + DefaultAlignForAttributeAligned = 128; + MinGlobalAlign = 0; + // From the glibc documentation, on GNU systems, malloc guarantees 16-byte + // alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See + // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html. + // This alignment guarantee also applies to Windows and Android. On Darwin, + // the alignment is 16 bytes on both 64-bit and 32-bit systems. + if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment() || T.isAndroid()) + NewAlign = Triple.isArch64Bit() ? 128 : Triple.isArch32Bit() ? 64 : 0; + else if (T.isOSDarwin()) + NewAlign = 128; + else + NewAlign = 0; // Infer from basic type alignment. + HalfWidth = 16; + HalfAlign = 16; + FloatWidth = 32; + FloatAlign = 32; + DoubleWidth = 64; + DoubleAlign = 64; + LongDoubleWidth = 64; + LongDoubleAlign = 64; + Float128Align = 128; + LargeArrayMinWidth = 0; + LargeArrayAlign = 0; + MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0; + MaxVectorAlign = 0; + MaxTLSAlign = 0; + SimdDefaultAlign = 0; + SizeType = UnsignedLong; + PtrDiffType = SignedLong; + IntMaxType = SignedLongLong; + IntPtrType = SignedLong; + WCharType = SignedInt; + WIntType = SignedInt; + Char16Type = UnsignedShort; + Char32Type = UnsignedInt; + Int64Type = SignedLongLong; + SigAtomicType = SignedInt; + ProcessIDType = SignedInt; + UseSignedCharForObjCBool = true; + UseBitFieldTypeAlignment = true; + UseZeroLengthBitfieldAlignment = false; + UseExplicitBitFieldAlignment = true; + ZeroLengthBitfieldBoundary = 0; ++ MaxAlignedAttribute = 0; + HalfFormat = &llvm::APFloat::IEEEhalf(); + FloatFormat = &llvm::APFloat::IEEEsingle(); + DoubleFormat = &llvm::APFloat::IEEEdouble(); + LongDoubleFormat = &llvm::APFloat::IEEEdouble(); + Float128Format = &llvm::APFloat::IEEEquad(); + MCountName = "mcount"; + RegParmMax = 0; + SSERegParmMax = 0; + HasAlignMac68kSupport = false; + HasBuiltinMSVaList = false; + IsRenderScriptTarget = false; + HasAArch64SVETypes = false; + HasRISCVVTypes = false; + AllowAMDGPUUnsafeFPAtomics = false; + ARMCDECoprocMask = 0; + + // Default to no types using fpret. + RealTypeUsesObjCFPRet = 0; + + // Default to not using fp2ret for __Complex long double + ComplexLongDoubleUsesFP2Ret = false; + + // Set the C++ ABI based on the triple. + TheCXXABI.set(Triple.isKnownWindowsMSVCEnvironment() + ? TargetCXXABI::Microsoft + : TargetCXXABI::GenericItanium); + + // Default to an empty address space map. + AddrSpaceMap = &DefaultAddrSpaceMap; + UseAddrSpaceMapMangling = false; + + // Default to an unknown platform name. + PlatformName = "unknown"; + PlatformMinVersion = VersionTuple(); + + MaxOpenCLWorkGroupSize = 1024; + } + + // Out of line virtual dtor for TargetInfo. + TargetInfo::~TargetInfo() {} + + void TargetInfo::resetDataLayout(StringRef DL) { + DataLayout.reset(new llvm::DataLayout(DL)); + } + + bool + TargetInfo::checkCFProtectionBranchSupported(DiagnosticsEngine &Diags) const { + Diags.Report(diag::err_opt_not_valid_on_target) << "cf-protection=branch"; + return false; + } + + bool + TargetInfo::checkCFProtectionReturnSupported(DiagnosticsEngine &Diags) const { + Diags.Report(diag::err_opt_not_valid_on_target) << "cf-protection=return"; + return false; + } + + /// getTypeName - Return the user string for the specified integer type enum. + /// For example, SignedShort -> "short". + const char *TargetInfo::getTypeName(IntType T) { + switch (T) { + default: llvm_unreachable("not an integer!"); + case SignedChar: return "signed char"; + case UnsignedChar: return "unsigned char"; + case SignedShort: return "short"; + case UnsignedShort: return "unsigned short"; + case SignedInt: return "int"; + case UnsignedInt: return "unsigned int"; + case SignedLong: return "long int"; + case UnsignedLong: return "long unsigned int"; + case SignedLongLong: return "long long int"; + case UnsignedLongLong: return "long long unsigned int"; + } + } + + /// getTypeConstantSuffix - Return the constant suffix for the specified + /// integer type enum. For example, SignedLong -> "L". + const char *TargetInfo::getTypeConstantSuffix(IntType T) const { + switch (T) { + default: llvm_unreachable("not an integer!"); + case SignedChar: + case SignedShort: + case SignedInt: return ""; + case SignedLong: return "L"; + case SignedLongLong: return "LL"; + case UnsignedChar: + if (getCharWidth() < getIntWidth()) + return ""; + LLVM_FALLTHROUGH; + case UnsignedShort: + if (getShortWidth() < getIntWidth()) + return ""; + LLVM_FALLTHROUGH; + case UnsignedInt: return "U"; + case UnsignedLong: return "UL"; + case UnsignedLongLong: return "ULL"; + } + } + + /// getTypeFormatModifier - Return the printf format modifier for the + /// specified integer type enum. For example, SignedLong -> "l". + + const char *TargetInfo::getTypeFormatModifier(IntType T) { + switch (T) { + default: llvm_unreachable("not an integer!"); + case SignedChar: + case UnsignedChar: return "hh"; + case SignedShort: + case UnsignedShort: return "h"; + case SignedInt: + case UnsignedInt: return ""; + case SignedLong: + case UnsignedLong: return "l"; + case SignedLongLong: + case UnsignedLongLong: return "ll"; + } + } + + /// getTypeWidth - Return the width (in bits) of the specified integer type + /// enum. For example, SignedInt -> getIntWidth(). + unsigned TargetInfo::getTypeWidth(IntType T) const { + switch (T) { + default: llvm_unreachable("not an integer!"); + case SignedChar: + case UnsignedChar: return getCharWidth(); + case SignedShort: + case UnsignedShort: return getShortWidth(); + case SignedInt: + case UnsignedInt: return getIntWidth(); + case SignedLong: + case UnsignedLong: return getLongWidth(); + case SignedLongLong: + case UnsignedLongLong: return getLongLongWidth(); + }; + } + + TargetInfo::IntType TargetInfo::getIntTypeByWidth( + unsigned BitWidth, bool IsSigned) const { + if (getCharWidth() == BitWidth) + return IsSigned ? SignedChar : UnsignedChar; + if (getShortWidth() == BitWidth) + return IsSigned ? SignedShort : UnsignedShort; + if (getIntWidth() == BitWidth) + return IsSigned ? SignedInt : UnsignedInt; + if (getLongWidth() == BitWidth) + return IsSigned ? SignedLong : UnsignedLong; + if (getLongLongWidth() == BitWidth) + return IsSigned ? SignedLongLong : UnsignedLongLong; + return NoInt; + } + + TargetInfo::IntType TargetInfo::getLeastIntTypeByWidth(unsigned BitWidth, + bool IsSigned) const { + if (getCharWidth() >= BitWidth) + return IsSigned ? SignedChar : UnsignedChar; + if (getShortWidth() >= BitWidth) + return IsSigned ? SignedShort : UnsignedShort; + if (getIntWidth() >= BitWidth) + return IsSigned ? SignedInt : UnsignedInt; + if (getLongWidth() >= BitWidth) + return IsSigned ? SignedLong : UnsignedLong; + if (getLongLongWidth() >= BitWidth) + return IsSigned ? SignedLongLong : UnsignedLongLong; + return NoInt; + } + + TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth, + bool ExplicitIEEE) const { + if (getFloatWidth() == BitWidth) + return Float; + if (getDoubleWidth() == BitWidth) + return Double; + + switch (BitWidth) { + case 96: + if (&getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended()) + return LongDouble; + break; + case 128: + // The caller explicitly asked for an IEEE compliant type but we still + // have to check if the target supports it. + if (ExplicitIEEE) + return hasFloat128Type() ? Float128 : NoFloat; + if (&getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble() || + &getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) + return LongDouble; + if (hasFloat128Type()) + return Float128; + break; + } + + return NoFloat; + } + + /// getTypeAlign - Return the alignment (in bits) of the specified integer type + /// enum. For example, SignedInt -> getIntAlign(). + unsigned TargetInfo::getTypeAlign(IntType T) const { + switch (T) { + default: llvm_unreachable("not an integer!"); + case SignedChar: + case UnsignedChar: return getCharAlign(); + case SignedShort: + case UnsignedShort: return getShortAlign(); + case SignedInt: + case UnsignedInt: return getIntAlign(); + case SignedLong: + case UnsignedLong: return getLongAlign(); + case SignedLongLong: + case UnsignedLongLong: return getLongLongAlign(); + }; + } + + /// isTypeSigned - Return whether an integer types is signed. Returns true if + /// the type is signed; false otherwise. + bool TargetInfo::isTypeSigned(IntType T) { + switch (T) { + default: llvm_unreachable("not an integer!"); + case SignedChar: + case SignedShort: + case SignedInt: + case SignedLong: + case SignedLongLong: + return true; + case UnsignedChar: + case UnsignedShort: + case UnsignedInt: + case UnsignedLong: + case UnsignedLongLong: + return false; + }; + } + + /// adjust - Set forced language options. + /// Apply changes to the target information with respect to certain + /// language options which change the target configuration and adjust + /// the language based on the target options where applicable. + void TargetInfo::adjust(LangOptions &Opts) { + if (Opts.NoBitFieldTypeAlign) + UseBitFieldTypeAlignment = false; + + switch (Opts.WCharSize) { + default: llvm_unreachable("invalid wchar_t width"); + case 0: break; + case 1: WCharType = Opts.WCharIsSigned ? SignedChar : UnsignedChar; break; + case 2: WCharType = Opts.WCharIsSigned ? SignedShort : UnsignedShort; break; + case 4: WCharType = Opts.WCharIsSigned ? SignedInt : UnsignedInt; break; + } + + if (Opts.AlignDouble) { + DoubleAlign = LongLongAlign = 64; + LongDoubleAlign = 64; + } + + if (Opts.OpenCL) { + // OpenCL C requires specific widths for types, irrespective of + // what these normally are for the target. + // We also define long long and long double here, although the + // OpenCL standard only mentions these as "reserved". + IntWidth = IntAlign = 32; + LongWidth = LongAlign = 64; + LongLongWidth = LongLongAlign = 128; + HalfWidth = HalfAlign = 16; + FloatWidth = FloatAlign = 32; + + // Embedded 32-bit targets (OpenCL EP) might have double C type + // defined as float. Let's not override this as it might lead + // to generating illegal code that uses 64bit doubles. + if (DoubleWidth != FloatWidth) { + DoubleWidth = DoubleAlign = 64; + DoubleFormat = &llvm::APFloat::IEEEdouble(); + } + LongDoubleWidth = LongDoubleAlign = 128; + + unsigned MaxPointerWidth = getMaxPointerWidth(); + assert(MaxPointerWidth == 32 || MaxPointerWidth == 64); + bool Is32BitArch = MaxPointerWidth == 32; + SizeType = Is32BitArch ? UnsignedInt : UnsignedLong; + PtrDiffType = Is32BitArch ? SignedInt : SignedLong; + IntPtrType = Is32BitArch ? SignedInt : SignedLong; + + IntMaxType = SignedLongLong; + Int64Type = SignedLong; + + HalfFormat = &llvm::APFloat::IEEEhalf(); + FloatFormat = &llvm::APFloat::IEEEsingle(); + LongDoubleFormat = &llvm::APFloat::IEEEquad(); + } + + if (Opts.DoubleSize) { + if (Opts.DoubleSize == 32) { + DoubleWidth = 32; + LongDoubleWidth = 32; + DoubleFormat = &llvm::APFloat::IEEEsingle(); + LongDoubleFormat = &llvm::APFloat::IEEEsingle(); + } else if (Opts.DoubleSize == 64) { + DoubleWidth = 64; + LongDoubleWidth = 64; + DoubleFormat = &llvm::APFloat::IEEEdouble(); + LongDoubleFormat = &llvm::APFloat::IEEEdouble(); + } + } + + if (Opts.LongDoubleSize) { + if (Opts.LongDoubleSize == DoubleWidth) { + LongDoubleWidth = DoubleWidth; + LongDoubleAlign = DoubleAlign; + LongDoubleFormat = DoubleFormat; + } else if (Opts.LongDoubleSize == 128) { + LongDoubleWidth = LongDoubleAlign = 128; + LongDoubleFormat = &llvm::APFloat::IEEEquad(); + } + } + + if (Opts.NewAlignOverride) + NewAlign = Opts.NewAlignOverride * getCharWidth(); + + // Each unsigned fixed point type has the same number of fractional bits as + // its corresponding signed type. + PaddingOnUnsignedFixedPoint |= Opts.PaddingOnUnsignedFixedPoint; + CheckFixedPointBits(); + } + + bool TargetInfo::initFeatureMap( + llvm::StringMap &Features, DiagnosticsEngine &Diags, StringRef CPU, + const std::vector &FeatureVec) const { + for (const auto &F : FeatureVec) { + StringRef Name = F; + // Apply the feature via the target. + bool Enabled = Name[0] == '+'; + setFeatureEnabled(Features, Name.substr(1), Enabled); + } + return true; + } + + TargetInfo::CallingConvKind + TargetInfo::getCallingConvKind(bool ClangABICompat4) const { + if (getCXXABI() != TargetCXXABI::Microsoft && + (ClangABICompat4 || getTriple().getOS() == llvm::Triple::PS4)) + return CCK_ClangABI4OrPS4; + return CCK_Default; + } + + LangAS TargetInfo::getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const { + switch (TK) { + case OCLTK_Image: + case OCLTK_Pipe: + return LangAS::opencl_global; + + case OCLTK_Sampler: + return LangAS::opencl_constant; + + default: + return LangAS::Default; + } + } + + //===----------------------------------------------------------------------===// + + + static StringRef removeGCCRegisterPrefix(StringRef Name) { + if (Name[0] == '%' || Name[0] == '#') + Name = Name.substr(1); + + return Name; + } + + /// isValidClobber - Returns whether the passed in string is + /// a valid clobber in an inline asm statement. This is used by + /// Sema. + bool TargetInfo::isValidClobber(StringRef Name) const { + return (isValidGCCRegisterName(Name) || + Name == "memory" || Name == "cc"); + } + + /// isValidGCCRegisterName - Returns whether the passed in string + /// is a valid register name according to GCC. This is used by Sema for + /// inline asm statements. + bool TargetInfo::isValidGCCRegisterName(StringRef Name) const { + if (Name.empty()) + return false; + + // Get rid of any register prefix. + Name = removeGCCRegisterPrefix(Name); + if (Name.empty()) + return false; + + ArrayRef Names = getGCCRegNames(); + + // If we have a number it maps to an entry in the register name array. + if (isDigit(Name[0])) { + unsigned n; + if (!Name.getAsInteger(0, n)) + return n < Names.size(); + } + + // Check register names. + if (llvm::is_contained(Names, Name)) + return true; + + // Check any additional names that we have. + for (const AddlRegName &ARN : getGCCAddlRegNames()) + for (const char *AN : ARN.Names) { + if (!AN) + break; + // Make sure the register that the additional name is for is within + // the bounds of the register names from above. + if (AN == Name && ARN.RegNum < Names.size()) + return true; + } + + // Now check aliases. + for (const GCCRegAlias &GRA : getGCCRegAliases()) + for (const char *A : GRA.Aliases) { + if (!A) + break; + if (A == Name) + return true; + } + + return false; + } + + StringRef TargetInfo::getNormalizedGCCRegisterName(StringRef Name, + bool ReturnCanonical) const { + assert(isValidGCCRegisterName(Name) && "Invalid register passed in"); + + // Get rid of any register prefix. + Name = removeGCCRegisterPrefix(Name); + + ArrayRef Names = getGCCRegNames(); + + // First, check if we have a number. + if (isDigit(Name[0])) { + unsigned n; + if (!Name.getAsInteger(0, n)) { + assert(n < Names.size() && "Out of bounds register number!"); + return Names[n]; + } + } + + // Check any additional names that we have. + for (const AddlRegName &ARN : getGCCAddlRegNames()) + for (const char *AN : ARN.Names) { + if (!AN) + break; + // Make sure the register that the additional name is for is within + // the bounds of the register names from above. + if (AN == Name && ARN.RegNum < Names.size()) + return ReturnCanonical ? Names[ARN.RegNum] : Name; + } + + // Now check aliases. + for (const GCCRegAlias &RA : getGCCRegAliases()) + for (const char *A : RA.Aliases) { + if (!A) + break; + if (A == Name) + return RA.Register; + } + + return Name; + } + + bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const { + const char *Name = Info.getConstraintStr().c_str(); + // An output constraint must start with '=' or '+' + if (*Name != '=' && *Name != '+') + return false; + + if (*Name == '+') + Info.setIsReadWrite(); + + Name++; + while (*Name) { + switch (*Name) { + default: + if (!validateAsmConstraint(Name, Info)) { + // FIXME: We temporarily return false + // so we can add more constraints as we hit it. + // Eventually, an unknown constraint should just be treated as 'g'. + return false; + } + break; + case '&': // early clobber. + Info.setEarlyClobber(); + break; + case '%': // commutative. + // FIXME: Check that there is a another register after this one. + break; + case 'r': // general register. + Info.setAllowsRegister(); + break; + case 'm': // memory operand. + case 'o': // offsetable memory operand. + case 'V': // non-offsetable memory operand. + case '<': // autodecrement memory operand. + case '>': // autoincrement memory operand. + Info.setAllowsMemory(); + break; + case 'g': // general register, memory operand or immediate integer. + case 'X': // any operand. + Info.setAllowsRegister(); + Info.setAllowsMemory(); + break; + case ',': // multiple alternative constraint. Pass it. + // Handle additional optional '=' or '+' modifiers. + if (Name[1] == '=' || Name[1] == '+') + Name++; + break; + case '#': // Ignore as constraint. + while (Name[1] && Name[1] != ',') + Name++; + break; + case '?': // Disparage slightly code. + case '!': // Disparage severely. + case '*': // Ignore for choosing register preferences. + case 'i': // Ignore i,n,E,F as output constraints (match from the other + // chars) + case 'n': + case 'E': + case 'F': + break; // Pass them. + } + + Name++; + } + + // Early clobber with a read-write constraint which doesn't permit registers + // is invalid. + if (Info.earlyClobber() && Info.isReadWrite() && !Info.allowsRegister()) + return false; + + // If a constraint allows neither memory nor register operands it contains + // only modifiers. Reject it. + return Info.allowsMemory() || Info.allowsRegister(); + } + + bool TargetInfo::resolveSymbolicName(const char *&Name, + ArrayRef OutputConstraints, + unsigned &Index) const { + assert(*Name == '[' && "Symbolic name did not start with '['"); + Name++; + const char *Start = Name; + while (*Name && *Name != ']') + Name++; + + if (!*Name) { + // Missing ']' + return false; + } + + std::string SymbolicName(Start, Name - Start); + + for (Index = 0; Index != OutputConstraints.size(); ++Index) + if (SymbolicName == OutputConstraints[Index].getName()) + return true; + + return false; + } + + bool TargetInfo::validateInputConstraint( + MutableArrayRef OutputConstraints, + ConstraintInfo &Info) const { + const char *Name = Info.ConstraintStr.c_str(); + + if (!*Name) + return false; + + while (*Name) { + switch (*Name) { + default: + // Check if we have a matching constraint + if (*Name >= '0' && *Name <= '9') { + const char *DigitStart = Name; + while (Name[1] >= '0' && Name[1] <= '9') + Name++; + const char *DigitEnd = Name; + unsigned i; + if (StringRef(DigitStart, DigitEnd - DigitStart + 1) + .getAsInteger(10, i)) + return false; + + // Check if matching constraint is out of bounds. + if (i >= OutputConstraints.size()) return false; + + // A number must refer to an output only operand. + if (OutputConstraints[i].isReadWrite()) + return false; + + // If the constraint is already tied, it must be tied to the + // same operand referenced to by the number. + if (Info.hasTiedOperand() && Info.getTiedOperand() != i) + return false; + + // The constraint should have the same info as the respective + // output constraint. + Info.setTiedOperand(i, OutputConstraints[i]); + } else if (!validateAsmConstraint(Name, Info)) { + // FIXME: This error return is in place temporarily so we can + // add more constraints as we hit it. Eventually, an unknown + // constraint should just be treated as 'g'. + return false; + } + break; + case '[': { + unsigned Index = 0; + if (!resolveSymbolicName(Name, OutputConstraints, Index)) + return false; + + // If the constraint is already tied, it must be tied to the + // same operand referenced to by the number. + if (Info.hasTiedOperand() && Info.getTiedOperand() != Index) + return false; + + // A number must refer to an output only operand. + if (OutputConstraints[Index].isReadWrite()) + return false; + + Info.setTiedOperand(Index, OutputConstraints[Index]); + break; + } + case '%': // commutative + // FIXME: Fail if % is used with the last operand. + break; + case 'i': // immediate integer. + break; + case 'n': // immediate integer with a known value. + Info.setRequiresImmediate(); + break; + case 'I': // Various constant constraints with target-specific meanings. + case 'J': + case 'K': + case 'L': + case 'M': + case 'N': + case 'O': + case 'P': + if (!validateAsmConstraint(Name, Info)) + return false; + break; + case 'r': // general register. + Info.setAllowsRegister(); + break; + case 'm': // memory operand. + case 'o': // offsettable memory operand. + case 'V': // non-offsettable memory operand. + case '<': // autodecrement memory operand. + case '>': // autoincrement memory operand. + Info.setAllowsMemory(); + break; + case 'g': // general register, memory operand or immediate integer. + case 'X': // any operand. + Info.setAllowsRegister(); + Info.setAllowsMemory(); + break; + case 'E': // immediate floating point. + case 'F': // immediate floating point. + case 'p': // address operand. + break; + case ',': // multiple alternative constraint. Ignore comma. + break; + case '#': // Ignore as constraint. + while (Name[1] && Name[1] != ',') + Name++; + break; + case '?': // Disparage slightly code. + case '!': // Disparage severely. + case '*': // Ignore for choosing register preferences. + break; // Pass them. + } + + Name++; + } + + return true; + } + + void TargetInfo::CheckFixedPointBits() const { + // Check that the number of fractional and integral bits (and maybe sign) can + // fit into the bits given for a fixed point type. + assert(ShortAccumScale + getShortAccumIBits() + 1 <= ShortAccumWidth); + assert(AccumScale + getAccumIBits() + 1 <= AccumWidth); + assert(LongAccumScale + getLongAccumIBits() + 1 <= LongAccumWidth); + assert(getUnsignedShortAccumScale() + getUnsignedShortAccumIBits() <= + ShortAccumWidth); + assert(getUnsignedAccumScale() + getUnsignedAccumIBits() <= AccumWidth); + assert(getUnsignedLongAccumScale() + getUnsignedLongAccumIBits() <= + LongAccumWidth); + + assert(getShortFractScale() + 1 <= ShortFractWidth); + assert(getFractScale() + 1 <= FractWidth); + assert(getLongFractScale() + 1 <= LongFractWidth); + assert(getUnsignedShortFractScale() <= ShortFractWidth); + assert(getUnsignedFractScale() <= FractWidth); + assert(getUnsignedLongFractScale() <= LongFractWidth); + + // Each unsigned fract type has either the same number of fractional bits + // as, or one more fractional bit than, its corresponding signed fract type. + assert(getShortFractScale() == getUnsignedShortFractScale() || + getShortFractScale() == getUnsignedShortFractScale() - 1); + assert(getFractScale() == getUnsignedFractScale() || + getFractScale() == getUnsignedFractScale() - 1); + assert(getLongFractScale() == getUnsignedLongFractScale() || + getLongFractScale() == getUnsignedLongFractScale() - 1); + + // When arranged in order of increasing rank (see 6.3.1.3a), the number of + // fractional bits is nondecreasing for each of the following sets of + // fixed-point types: + // - signed fract types + // - unsigned fract types + // - signed accum types + // - unsigned accum types. + assert(getLongFractScale() >= getFractScale() && + getFractScale() >= getShortFractScale()); + assert(getUnsignedLongFractScale() >= getUnsignedFractScale() && + getUnsignedFractScale() >= getUnsignedShortFractScale()); + assert(LongAccumScale >= AccumScale && AccumScale >= ShortAccumScale); + assert(getUnsignedLongAccumScale() >= getUnsignedAccumScale() && + getUnsignedAccumScale() >= getUnsignedShortAccumScale()); + + // When arranged in order of increasing rank (see 6.3.1.3a), the number of + // integral bits is nondecreasing for each of the following sets of + // fixed-point types: + // - signed accum types + // - unsigned accum types + assert(getLongAccumIBits() >= getAccumIBits() && + getAccumIBits() >= getShortAccumIBits()); + assert(getUnsignedLongAccumIBits() >= getUnsignedAccumIBits() && + getUnsignedAccumIBits() >= getUnsignedShortAccumIBits()); + + // Each signed accum type has at least as many integral bits as its + // corresponding unsigned accum type. + assert(getShortAccumIBits() >= getUnsignedShortAccumIBits()); + assert(getAccumIBits() >= getUnsignedAccumIBits()); + assert(getLongAccumIBits() >= getUnsignedLongAccumIBits()); + } + + void TargetInfo::copyAuxTarget(const TargetInfo *Aux) { + auto *Target = static_cast(this); + auto *Src = static_cast(Aux); + *Target = *Src; + } +diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h +index 539466c4f678..d444af48db45 100644 +--- a/clang/lib/Basic/Targets/OSTargets.h ++++ b/clang/lib/Basic/Targets/OSTargets.h +@@ -1,954 +1,955 @@ + //===--- OSTargets.h - Declare OS target feature support --------*- C++ -*-===// + // + // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + // See https://llvm.org/LICENSE.txt for license information. + // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + // + //===----------------------------------------------------------------------===// + // + // This file declares OS specific TargetInfo types. + //===----------------------------------------------------------------------===// + + #ifndef LLVM_CLANG_LIB_BASIC_TARGETS_OSTARGETS_H + #define LLVM_CLANG_LIB_BASIC_TARGETS_OSTARGETS_H + + #include "Targets.h" + #include "llvm/MC/MCSectionMachO.h" + + namespace clang { + namespace targets { + + template + class LLVM_LIBRARY_VISIBILITY OSTargetInfo : public TgtInfo { + protected: + virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const = 0; + + public: + OSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : TgtInfo(Triple, Opts) {} + + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override { + TgtInfo::getTargetDefines(Opts, Builder); + getOSDefines(Opts, TgtInfo::getTriple(), Builder); + } + }; + + // CloudABI Target + template + class LLVM_LIBRARY_VISIBILITY CloudABITargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + Builder.defineMacro("__CloudABI__"); + Builder.defineMacro("__ELF__"); + + // CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t. + Builder.defineMacro("__STDC_ISO_10646__", "201206L"); + Builder.defineMacro("__STDC_UTF_16__"); + Builder.defineMacro("__STDC_UTF_32__"); + } + + public: + CloudABITargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) {} + }; + + // Ananas target + template + class LLVM_LIBRARY_VISIBILITY AnanasTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // Ananas defines + Builder.defineMacro("__Ananas__"); + Builder.defineMacro("__ELF__"); + } + + public: + AnanasTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) {} + }; + + void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts, + const llvm::Triple &Triple, StringRef &PlatformName, + VersionTuple &PlatformMinVersion); + + template + class LLVM_LIBRARY_VISIBILITY DarwinTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + getDarwinDefines(Builder, Opts, Triple, this->PlatformName, + this->PlatformMinVersion); + } + + public: + DarwinTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + // By default, no TLS, and we list permitted architecture/OS + // combinations. + this->TLSSupported = false; + + if (Triple.isMacOSX()) + this->TLSSupported = !Triple.isMacOSXVersionLT(10, 7); + else if (Triple.isiOS()) { + // 64-bit iOS supported it from 8 onwards, 32-bit device from 9 onwards, + // 32-bit simulator from 10 onwards. + if (Triple.isArch64Bit()) + this->TLSSupported = !Triple.isOSVersionLT(8); + else if (Triple.isArch32Bit()) { + if (!Triple.isSimulatorEnvironment()) + this->TLSSupported = !Triple.isOSVersionLT(9); + else + this->TLSSupported = !Triple.isOSVersionLT(10); + } + } else if (Triple.isWatchOS()) { + if (!Triple.isSimulatorEnvironment()) + this->TLSSupported = !Triple.isOSVersionLT(2); + else + this->TLSSupported = !Triple.isOSVersionLT(3); + } + + this->MCountName = "\01mcount"; + } + + llvm::Error isValidSectionSpecifier(StringRef SR) const override { + // Let MCSectionMachO validate this. + StringRef Segment, Section; + unsigned TAA, StubSize; + bool HasTAA; + return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section, + TAA, HasTAA, StubSize); + } + + const char *getStaticInitSectionSpecifier() const override { + // FIXME: We should return 0 when building kexts. + return "__TEXT,__StaticInit,regular,pure_instructions"; + } + + /// Darwin does not support protected visibility. Darwin's "default" + /// is very similar to ELF's "protected"; Darwin requires a "weak" + /// attribute on declarations that can be dynamically replaced. + bool hasProtectedVisibility() const override { return false; } + + unsigned getExnObjectAlignment() const override { + // Older versions of libc++abi guarantee an alignment of only 8-bytes for + // exception objects because of a bug in __cxa_exception that was + // eventually fixed in r319123. + llvm::VersionTuple MinVersion; + const llvm::Triple &T = this->getTriple(); + + // Compute the earliest OS versions that have the fix to libc++abi. + switch (T.getOS()) { + case llvm::Triple::Darwin: + case llvm::Triple::MacOSX: // Earliest supporting version is 10.14. + MinVersion = llvm::VersionTuple(10U, 14U); + break; + case llvm::Triple::IOS: + case llvm::Triple::TvOS: // Earliest supporting version is 12.0.0. + MinVersion = llvm::VersionTuple(12U); + break; + case llvm::Triple::WatchOS: // Earliest supporting version is 5.0.0. + MinVersion = llvm::VersionTuple(5U); + break; + default: + // Conservatively return 8 bytes if OS is unknown. + return 64; + } + + unsigned Major, Minor, Micro; + T.getOSVersion(Major, Minor, Micro); + if (llvm::VersionTuple(Major, Minor, Micro) < MinVersion) + return 64; + return OSTargetInfo::getExnObjectAlignment(); + } + + TargetInfo::IntType getLeastIntTypeByWidth(unsigned BitWidth, + bool IsSigned) const final { + // Darwin uses `long long` for `int_least64_t` and `int_fast64_t`. + return BitWidth == 64 + ? (IsSigned ? TargetInfo::SignedLongLong + : TargetInfo::UnsignedLongLong) + : TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned); + } + }; + + // DragonFlyBSD Target + template + class LLVM_LIBRARY_VISIBILITY DragonFlyBSDTargetInfo + : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // DragonFly defines; list based off of gcc output + Builder.defineMacro("__DragonFly__"); + Builder.defineMacro("__DragonFly_cc_version", "100001"); + Builder.defineMacro("__ELF__"); + Builder.defineMacro("__KPRINTF_ATTRIBUTE__"); + Builder.defineMacro("__tune_i386__"); + DefineStd(Builder, "unix", Opts); + } + + public: + DragonFlyBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + switch (Triple.getArch()) { + default: + case llvm::Triple::x86: + case llvm::Triple::x86_64: + this->MCountName = ".mcount"; + break; + } + } + }; + + #ifndef FREEBSD_CC_VERSION + #define FREEBSD_CC_VERSION 0U + #endif + + // FreeBSD Target + template + class LLVM_LIBRARY_VISIBILITY FreeBSDTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // FreeBSD defines; list based off of gcc output + + unsigned Release = Triple.getOSMajorVersion(); + if (Release == 0U) + Release = 8U; + unsigned CCVersion = FREEBSD_CC_VERSION; + if (CCVersion == 0U) + CCVersion = Release * 100000U + 1U; + + Builder.defineMacro("__FreeBSD__", Twine(Release)); + Builder.defineMacro("__FreeBSD_cc_version", Twine(CCVersion)); + Builder.defineMacro("__KPRINTF_ATTRIBUTE__"); + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__ELF__"); + + // On FreeBSD, wchar_t contains the number of the code point as + // used by the character set of the locale. These character sets are + // not necessarily a superset of ASCII. + // + // FIXME: This is wrong; the macro refers to the numerical values + // of wchar_t *literals*, which are not locale-dependent. However, + // FreeBSD systems apparently depend on us getting this wrong, and + // setting this to 1 is conforming even if all the basic source + // character literals have the same encoding as char and wchar_t. + Builder.defineMacro("__STDC_MB_MIGHT_NEQ_WC__", "1"); + } + + public: + FreeBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + switch (Triple.getArch()) { + default: + case llvm::Triple::x86: + case llvm::Triple::x86_64: + this->MCountName = ".mcount"; + break; + case llvm::Triple::mips: + case llvm::Triple::mipsel: + case llvm::Triple::ppc: + case llvm::Triple::ppcle: + case llvm::Triple::ppc64: + case llvm::Triple::ppc64le: + this->MCountName = "_mcount"; + break; + case llvm::Triple::arm: + this->MCountName = "__mcount"; + break; + } + } + }; + + // GNU/kFreeBSD Target + template + class LLVM_LIBRARY_VISIBILITY KFreeBSDTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // GNU/kFreeBSD defines; list based off of gcc output + + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__FreeBSD_kernel__"); + Builder.defineMacro("__GLIBC__"); + Builder.defineMacro("__ELF__"); + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + if (Opts.CPlusPlus) + Builder.defineMacro("_GNU_SOURCE"); + } + + public: + KFreeBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) {} + }; + + // Haiku Target + template + class LLVM_LIBRARY_VISIBILITY HaikuTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // Haiku defines; list based off of gcc output + Builder.defineMacro("__HAIKU__"); + Builder.defineMacro("__ELF__"); + DefineStd(Builder, "unix", Opts); + if (this->HasFloat128) + Builder.defineMacro("__FLOAT128__"); + } + + public: + HaikuTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->SizeType = TargetInfo::UnsignedLong; + this->IntPtrType = TargetInfo::SignedLong; + this->PtrDiffType = TargetInfo::SignedLong; + this->ProcessIDType = TargetInfo::SignedLong; + this->TLSSupported = false; + switch (Triple.getArch()) { + default: + break; + case llvm::Triple::x86: + case llvm::Triple::x86_64: + this->HasFloat128 = true; + break; + } + } + }; + + // Hurd target + template + class LLVM_LIBRARY_VISIBILITY HurdTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // Hurd defines; list based off of gcc output. + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__GNU__"); + Builder.defineMacro("__gnu_hurd__"); + Builder.defineMacro("__MACH__"); + Builder.defineMacro("__GLIBC__"); + Builder.defineMacro("__ELF__"); + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + if (Opts.CPlusPlus) + Builder.defineMacro("_GNU_SOURCE"); + } + public: + HurdTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) {} + }; + + // Minix Target + template + class LLVM_LIBRARY_VISIBILITY MinixTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // Minix defines + + Builder.defineMacro("__minix", "3"); + Builder.defineMacro("_EM_WSIZE", "4"); + Builder.defineMacro("_EM_PSIZE", "4"); + Builder.defineMacro("_EM_SSIZE", "2"); + Builder.defineMacro("_EM_LSIZE", "4"); + Builder.defineMacro("_EM_FSIZE", "4"); + Builder.defineMacro("_EM_DSIZE", "8"); + Builder.defineMacro("__ELF__"); + DefineStd(Builder, "unix", Opts); + } + + public: + MinixTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) {} + }; + + // Linux target + template + class LLVM_LIBRARY_VISIBILITY LinuxTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // Linux defines; list based off of gcc output + DefineStd(Builder, "unix", Opts); + DefineStd(Builder, "linux", Opts); + Builder.defineMacro("__ELF__"); + if (Triple.isAndroid()) { + Builder.defineMacro("__ANDROID__", "1"); + unsigned Maj, Min, Rev; + Triple.getEnvironmentVersion(Maj, Min, Rev); + this->PlatformName = "android"; + this->PlatformMinVersion = VersionTuple(Maj, Min, Rev); + if (Maj) { + Builder.defineMacro("__ANDROID_MIN_SDK_VERSION__", Twine(Maj)); + // This historical but ambiguous name for the minSdkVersion macro. Keep + // defined for compatibility. + Builder.defineMacro("__ANDROID_API__", "__ANDROID_MIN_SDK_VERSION__"); + } + } else { + Builder.defineMacro("__gnu_linux__"); + } + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + if (Opts.CPlusPlus) + Builder.defineMacro("_GNU_SOURCE"); + if (this->HasFloat128) + Builder.defineMacro("__FLOAT128__"); + } + + public: + LinuxTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->WIntType = TargetInfo::UnsignedInt; + + switch (Triple.getArch()) { + default: + break; + case llvm::Triple::mips: + case llvm::Triple::mipsel: + case llvm::Triple::mips64: + case llvm::Triple::mips64el: + case llvm::Triple::ppc: + case llvm::Triple::ppcle: + case llvm::Triple::ppc64: + case llvm::Triple::ppc64le: + this->MCountName = "_mcount"; + break; + case llvm::Triple::x86: + case llvm::Triple::x86_64: + this->HasFloat128 = true; + break; + } + } + + const char *getStaticInitSectionSpecifier() const override { + return ".text.startup"; + } + }; + + // NetBSD Target + template + class LLVM_LIBRARY_VISIBILITY NetBSDTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // NetBSD defines; list based off of gcc output + Builder.defineMacro("__NetBSD__"); + Builder.defineMacro("__unix__"); + Builder.defineMacro("__ELF__"); + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + } + + public: + NetBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->MCountName = "__mcount"; + } + }; + + // OpenBSD Target + template + class LLVM_LIBRARY_VISIBILITY OpenBSDTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // OpenBSD defines; list based off of gcc output + + Builder.defineMacro("__OpenBSD__"); + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__ELF__"); + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + if (this->HasFloat128) + Builder.defineMacro("__FLOAT128__"); + } + + public: + OpenBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->WCharType = this->WIntType = this->SignedInt; + this->IntMaxType = TargetInfo::SignedLongLong; + this->Int64Type = TargetInfo::SignedLongLong; + switch (Triple.getArch()) { + case llvm::Triple::x86: + case llvm::Triple::x86_64: + this->HasFloat128 = true; + LLVM_FALLTHROUGH; + default: + this->MCountName = "__mcount"; + break; + case llvm::Triple::mips64: + case llvm::Triple::mips64el: + case llvm::Triple::ppc: + case llvm::Triple::ppc64: + case llvm::Triple::ppc64le: + case llvm::Triple::sparcv9: + this->MCountName = "_mcount"; + break; + } + } + }; + + // PSP Target + template + class LLVM_LIBRARY_VISIBILITY PSPTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // PSP defines; list based on the output of the pspdev gcc toolchain. + Builder.defineMacro("PSP"); + Builder.defineMacro("_PSP"); + Builder.defineMacro("__psp__"); + Builder.defineMacro("__ELF__"); + } + + public: + PSPTargetInfo(const llvm::Triple &Triple) : OSTargetInfo(Triple) {} + }; + + // PS3 PPU Target + template + class LLVM_LIBRARY_VISIBILITY PS3PPUTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // PS3 PPU defines. + Builder.defineMacro("__PPC__"); + Builder.defineMacro("__PPU__"); + Builder.defineMacro("__CELLOS_LV2__"); + Builder.defineMacro("__ELF__"); + Builder.defineMacro("__LP32__"); + Builder.defineMacro("_ARCH_PPC64"); + Builder.defineMacro("__powerpc64__"); + } + + public: + PS3PPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->LongWidth = this->LongAlign = 32; + this->PointerWidth = this->PointerAlign = 32; + this->IntMaxType = TargetInfo::SignedLongLong; + this->Int64Type = TargetInfo::SignedLongLong; + this->SizeType = TargetInfo::UnsignedInt; + this->resetDataLayout("E-m:e-p:32:32-i64:64-n32:64"); + } + }; + + template + class LLVM_LIBRARY_VISIBILITY PS4OSTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + Builder.defineMacro("__FreeBSD__", "9"); + Builder.defineMacro("__FreeBSD_cc_version", "900001"); + Builder.defineMacro("__KPRINTF_ATTRIBUTE__"); + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__ELF__"); + Builder.defineMacro("__SCE__"); + Builder.defineMacro("__ORBIS__"); + } + + public: + PS4OSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->WCharType = TargetInfo::UnsignedShort; + + // On PS4, TLS variable cannot be aligned to more than 32 bytes (256 bits). + this->MaxTLSAlign = 256; + + // On PS4, do not honor explicit bit field alignment, + // as in "__attribute__((aligned(2))) int b : 1;". + this->UseExplicitBitFieldAlignment = false; + + switch (Triple.getArch()) { + default: + case llvm::Triple::x86_64: + this->MCountName = ".mcount"; + this->NewAlign = 256; + break; + } + } + TargetInfo::CallingConvCheckResult + checkCallingConvention(CallingConv CC) const override { + return (CC == CC_C) ? TargetInfo::CCCR_OK : TargetInfo::CCCR_Error; + } + }; + + // RTEMS Target + template + class LLVM_LIBRARY_VISIBILITY RTEMSTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // RTEMS defines; list based off of gcc output + + Builder.defineMacro("__rtems__"); + Builder.defineMacro("__ELF__"); + if (Opts.CPlusPlus) + Builder.defineMacro("_GNU_SOURCE"); + } + + public: + RTEMSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + switch (Triple.getArch()) { + default: + case llvm::Triple::x86: + // this->MCountName = ".mcount"; + break; + case llvm::Triple::mips: + case llvm::Triple::mipsel: + case llvm::Triple::ppc: + case llvm::Triple::ppc64: + case llvm::Triple::ppc64le: + // this->MCountName = "_mcount"; + break; + case llvm::Triple::arm: + // this->MCountName = "__mcount"; + break; + } + } + }; + + // Solaris target + template + class LLVM_LIBRARY_VISIBILITY SolarisTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + DefineStd(Builder, "sun", Opts); + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__ELF__"); + Builder.defineMacro("__svr4__"); + Builder.defineMacro("__SVR4"); + // Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and + // newer, but to 500 for everything else. feature_test.h has a check to + // ensure that you are not using C99 with an old version of X/Open or C89 + // with a new version. + if (Opts.C99) + Builder.defineMacro("_XOPEN_SOURCE", "600"); + else + Builder.defineMacro("_XOPEN_SOURCE", "500"); + if (Opts.CPlusPlus) { + Builder.defineMacro("__C99FEATURES__"); + Builder.defineMacro("_FILE_OFFSET_BITS", "64"); + } + // GCC restricts the next two to C++. + Builder.defineMacro("_LARGEFILE_SOURCE"); + Builder.defineMacro("_LARGEFILE64_SOURCE"); + Builder.defineMacro("__EXTENSIONS__"); + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + if (this->HasFloat128) + Builder.defineMacro("__FLOAT128__"); + } + + public: + SolarisTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + if (this->PointerWidth == 64) { + this->WCharType = this->WIntType = this->SignedInt; + } else { + this->WCharType = this->WIntType = this->SignedLong; + } + switch (Triple.getArch()) { + default: + break; + case llvm::Triple::x86: + case llvm::Triple::x86_64: + this->HasFloat128 = true; + break; + } + } + }; + + // AIX Target + template + class AIXTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("_IBMR2"); + Builder.defineMacro("_POWER"); + + Builder.defineMacro("_AIX"); + + if (Opts.EnableAIXExtendedAltivecABI) + Builder.defineMacro("__EXTABI__"); + + unsigned Major, Minor, Micro; + Triple.getOSVersion(Major, Minor, Micro); + + // Define AIX OS-Version Macros. + // Includes logic for legacy versions of AIX; no specific intent to support. + std::pair OsVersion = {Major, Minor}; + if (OsVersion >= std::make_pair(3, 2)) Builder.defineMacro("_AIX32"); + if (OsVersion >= std::make_pair(4, 1)) Builder.defineMacro("_AIX41"); + if (OsVersion >= std::make_pair(4, 3)) Builder.defineMacro("_AIX43"); + if (OsVersion >= std::make_pair(5, 0)) Builder.defineMacro("_AIX50"); + if (OsVersion >= std::make_pair(5, 1)) Builder.defineMacro("_AIX51"); + if (OsVersion >= std::make_pair(5, 2)) Builder.defineMacro("_AIX52"); + if (OsVersion >= std::make_pair(5, 3)) Builder.defineMacro("_AIX53"); + if (OsVersion >= std::make_pair(6, 1)) Builder.defineMacro("_AIX61"); + if (OsVersion >= std::make_pair(7, 1)) Builder.defineMacro("_AIX71"); + if (OsVersion >= std::make_pair(7, 2)) Builder.defineMacro("_AIX72"); + + // FIXME: Do not define _LONG_LONG when -fno-long-long is specified. + Builder.defineMacro("_LONG_LONG"); + + if (Opts.POSIXThreads) { + Builder.defineMacro("_THREAD_SAFE"); + } + + if (this->PointerWidth == 64) { + Builder.defineMacro("__64BIT__"); + } + + // Define _WCHAR_T when it is a fundamental type + // (i.e., for C++ without -fno-wchar). + if (Opts.CPlusPlus && Opts.WChar) { + Builder.defineMacro("_WCHAR_T"); + } + } + + public: + AIXTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->TheCXXABI.set(TargetCXXABI::XL); + + if (this->PointerWidth == 64) { + this->WCharType = this->UnsignedInt; + } else { + this->WCharType = this->UnsignedShort; + } + this->UseZeroLengthBitfieldAlignment = true; + } + + // AIX sets FLT_EVAL_METHOD to be 1. + unsigned getFloatEvalMethod() const override { return 1; } + bool hasInt128Type() const override { return false; } + + bool defaultsToAIXPowerAlignment() const override { return true; } + }; + + // z/OS target + template + class LLVM_LIBRARY_VISIBILITY ZOSTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // FIXME: _LONG_LONG should not be defined under -std=c89. + Builder.defineMacro("_LONG_LONG"); + Builder.defineMacro("_OPEN_DEFAULT"); + // _UNIX03_WITHDRAWN is required to build libcxx. + Builder.defineMacro("_UNIX03_WITHDRAWN"); + Builder.defineMacro("__370__"); + Builder.defineMacro("__BFP__"); + // FIXME: __BOOL__ should not be defined under -std=c89. + Builder.defineMacro("__BOOL__"); + Builder.defineMacro("__LONGNAME__"); + Builder.defineMacro("__MVS__"); + Builder.defineMacro("__THW_370__"); + Builder.defineMacro("__THW_BIG_ENDIAN__"); + Builder.defineMacro("__TOS_390__"); + Builder.defineMacro("__TOS_MVS__"); + Builder.defineMacro("__XPLINK__"); + + if (this->PointerWidth == 64) + Builder.defineMacro("__64BIT__"); + + if (Opts.CPlusPlus) { + Builder.defineMacro("__DLL__"); + // _XOPEN_SOURCE=600 is required to build libcxx. + Builder.defineMacro("_XOPEN_SOURCE", "600"); + } + + if (Opts.GNUMode) { + Builder.defineMacro("_MI_BUILTIN"); + Builder.defineMacro("_EXT"); + } + + if (Opts.CPlusPlus && Opts.WChar) { + // Macro __wchar_t is defined so that the wchar_t data + // type is not declared as a typedef in system headers. + Builder.defineMacro("__wchar_t"); + } + + this->PlatformName = llvm::Triple::getOSTypeName(Triple.getOS()); + } + + public: + ZOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->WCharType = TargetInfo::UnsignedInt; ++ this->MaxAlignedAttribute = 128; + this->UseBitFieldTypeAlignment = false; + this->UseZeroLengthBitfieldAlignment = true; + this->ZeroLengthBitfieldBoundary = 32; + this->MinGlobalAlign = 0; + this->DefaultAlignForAttributeAligned = 128; + } + }; + + void addWindowsDefines(const llvm::Triple &Triple, const LangOptions &Opts, + MacroBuilder &Builder); + + // Windows target + template + class LLVM_LIBRARY_VISIBILITY WindowsTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + addWindowsDefines(Triple, Opts, Builder); + } + + public: + WindowsTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->WCharType = TargetInfo::UnsignedShort; + this->WIntType = TargetInfo::UnsignedShort; + } + }; + + template + class LLVM_LIBRARY_VISIBILITY NaClTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + if (Opts.CPlusPlus) + Builder.defineMacro("_GNU_SOURCE"); + + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__ELF__"); + Builder.defineMacro("__native_client__"); + } + + public: + NaClTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->LongAlign = 32; + this->LongWidth = 32; + this->PointerAlign = 32; + this->PointerWidth = 32; + this->IntMaxType = TargetInfo::SignedLongLong; + this->Int64Type = TargetInfo::SignedLongLong; + this->DoubleAlign = 64; + this->LongDoubleWidth = 64; + this->LongDoubleAlign = 64; + this->LongLongWidth = 64; + this->LongLongAlign = 64; + this->SizeType = TargetInfo::UnsignedInt; + this->PtrDiffType = TargetInfo::SignedInt; + this->IntPtrType = TargetInfo::SignedInt; + // RegParmMax is inherited from the underlying architecture. + this->LongDoubleFormat = &llvm::APFloat::IEEEdouble(); + if (Triple.getArch() == llvm::Triple::arm) { + // Handled in ARM's setABI(). + } else if (Triple.getArch() == llvm::Triple::x86) { + this->resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-" + "i64:64-n8:16:32-S128"); + } else if (Triple.getArch() == llvm::Triple::x86_64) { + this->resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-" + "i64:64-n8:16:32:64-S128"); + } else if (Triple.getArch() == llvm::Triple::mipsel) { + // Handled on mips' setDataLayout. + } else { + assert(Triple.getArch() == llvm::Triple::le32); + this->resetDataLayout("e-p:32:32-i64:64"); + } + } + }; + + // Fuchsia Target + template + class LLVM_LIBRARY_VISIBILITY FuchsiaTargetInfo : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + Builder.defineMacro("__Fuchsia__"); + Builder.defineMacro("__ELF__"); + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + // Required by the libc++ locale support. + if (Opts.CPlusPlus) + Builder.defineMacro("_GNU_SOURCE"); + } + + public: + FuchsiaTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->MCountName = "__mcount"; + this->TheCXXABI.set(TargetCXXABI::Fuchsia); + } + }; + + // WebAssembly target + template + class LLVM_LIBRARY_VISIBILITY WebAssemblyOSTargetInfo + : public OSTargetInfo { + protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + // A common platform macro. + if (Opts.POSIXThreads) + Builder.defineMacro("_REENTRANT"); + // Follow g++ convention and predefine _GNU_SOURCE for C++. + if (Opts.CPlusPlus) + Builder.defineMacro("_GNU_SOURCE"); + // Indicate that we have __float128. + Builder.defineMacro("__FLOAT128__"); + } + + public: + explicit WebAssemblyOSTargetInfo(const llvm::Triple &Triple, + const TargetOptions &Opts) + : OSTargetInfo(Triple, Opts) { + this->MCountName = "__mcount"; + this->TheCXXABI.set(TargetCXXABI::WebAssembly); + this->HasFloat128 = true; + } + }; + + // WASI target + template + class LLVM_LIBRARY_VISIBILITY WASITargetInfo + : public WebAssemblyOSTargetInfo { + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const final { + WebAssemblyOSTargetInfo::getOSDefines(Opts, Triple, Builder); + Builder.defineMacro("__wasi__"); + } + + public: + explicit WASITargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : WebAssemblyOSTargetInfo(Triple, Opts) {} + }; + + // Emscripten target + template + class LLVM_LIBRARY_VISIBILITY EmscriptenTargetInfo + : public WebAssemblyOSTargetInfo { + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const final { + WebAssemblyOSTargetInfo::getOSDefines(Opts, Triple, Builder); + Builder.defineMacro("__EMSCRIPTEN__"); + if (Opts.POSIXThreads) + Builder.defineMacro("__EMSCRIPTEN_PTHREADS__"); + } + + public: + explicit EmscriptenTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts) + : WebAssemblyOSTargetInfo(Triple, Opts) {} + }; + + } // namespace targets + } // namespace clang + #endif // LLVM_CLANG_LIB_BASIC_TARGETS_OSTARGETS_H +diff --git a/clang/test/CodeGen/SystemZ/zos-alignment.c b/clang/test/CodeGen/SystemZ/zos-alignment.c +index 4b572fcac5a9..0e8ee18fb7d0 100644 +--- a/clang/test/CodeGen/SystemZ/zos-alignment.c ++++ b/clang/test/CodeGen/SystemZ/zos-alignment.c +@@ -1,140 +1,152 @@ +-// RUN: %clang_cc1 -emit-llvm-only -triple s390x-none-zos -fdump-record-layouts %s | FileCheck %s ++// RUN: %clang_cc1 -emit-llvm-only -triple s390x-none-zos -fdump-record-layouts %s | FileCheck %s --check-prefix=CHECK ++// RUN: %clang_cc1 -emit-llvm -triple s390x-none-zos %s -o - | FileCheck %s --check-prefix=DECL ++ ++static int __attribute__((aligned(32))) v0; ++int __attribute__((aligned(32))) v1; ++typedef int __attribute__((aligned(32))) int32; ++static int32 v2; ++int32 v3; ++int f0() { return v0 + v1 + v2 + v3; } ++// DECL: @v0 {{.*}} align 16 ++// DECL-NEXT: @v1 {{.*}} align 32 ++// DECL-NEXT: @v2 {{.*}} align 16 ++// DECL-NEXT: @v3 {{.*}} align 32 + + struct s0 { + short a:3; + long b:5; + int c:1; + long d:10; + char e:5; + } S0; + // CHECK: 0 | struct s0 + // CHECK-NEXT: 0:0-2 | short a + // CHECK-NEXT: 0:3-7 | long b + // CHECK-NEXT: 1:0-0 | int c + // CHECK-NEXT: 1:1-10 | long d + // CHECK-NEXT: 2:3-7 | char e + // CHECK-NEXT: | [sizeof=3, align=1] + + struct s1 { + char a:7; + long b:27; + int c:2; + } S1; + // CHECK: 0 | struct s1 + // CHECK-NEXT: 0:0-6 | char a + // CHECK-NEXT: 0:7-33 | long b + // CHECK-NEXT: 4:2-3 | int c + // CHECK-NEXT: | [sizeof=5, align=1] + + struct s2 { + char a:7; + char :0; + short :0; + short :0; + } S2; + // CHECK: 0 | struct s2 + // CHECK-NEXT: 0:0-6 | char a + // CHECK-NEXT: 4:- | char + // CHECK-NEXT: 4:- | short + // CHECK-NEXT: 4:- | short + // CHECK-NEXT: | [sizeof=4, align=4] + + struct s3 { + int a; + int b:16; + char :0; + char c:1; + } S3; + // CHECK: 0 | struct s3 + // CHECK-NEXT: 0 | int a + // CHECK-NEXT: 4:0-15 | int b + // CHECK-NEXT: 8:- | char + // CHECK-NEXT: 8:0-0 | char c + // CHECK-NEXT: | [sizeof=12, align=4] + + struct s4 { + unsigned int __attribute__((aligned(32))) a; + } S4; + // CHECK: 0 | struct s4 + // CHECK-NEXT: 0 | unsigned int a + // CHECK-NEXT: | [sizeof=32, align=32] + + struct s5 { + char a; + int b:19 __attribute__((aligned(4))); + int c:22 __attribute__((aligned(8))); + int :0; + int d:10; + } S5; + // CHECK: 0 | struct s5 + // CHECK-NEXT: 0 | char a + // CHECK-NEXT: 4:0-18 | int b + // CHECK-NEXT: 8:0-21 | int c + // CHECK-NEXT: 12:- | int + // CHECK-NEXT: 12:0-9 | int d + // CHECK-NEXT: | [sizeof=16, align=8] + + struct s6 { + char * a; + char * b[]; + } S6; + // CHECK: 0 | struct s6 + // CHECK-NEXT: 0 | char * a + // CHECK-NEXT: 8 | char *[] b + // CHECK-NEXT: | [sizeof=8, align=8] + + struct s10 { + unsigned int __attribute__((aligned)) a; + } S10; + // CHECK: 0 | struct s10 + // CHECK-NEXT: 0 | unsigned int a + // CHECK-NEXT: | [sizeof=16, align=16] + + struct s11 { + char a; + long :0; + char b; + } S11; + // CHECK: 0 | struct s11 + // CHECK-NEXT: 0 | char a + // CHECK-NEXT: 8:- | long + // CHECK-NEXT: 8 | char b + // CHECK-NEXT: | [sizeof=16, align=8] + + union u0 { + unsigned short d1 __attribute__((packed)); + int d2:10; + long d3; + } U0 __attribute__((aligned(8))); + // CHECK: 0 | union u0 + // CHECK-NEXT: 0 | unsigned short d1 + // CHECK-NEXT: 0:0-9 | int d2 + // CHECK-NEXT: 0 | long d3 + // CHECK-NEXT: | [sizeof=8, align=8] + + union u1 { + unsigned int :0; + short a; + } U1; + // CHECK: 0 | union u1 + // CHECK-NEXT: 0:- | unsigned int + \ No newline at end of file