diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt --- a/clang/docs/tools/clang-formatted-files.txt +++ b/clang/docs/tools/clang-formatted-files.txt @@ -378,6 +378,9 @@ clang/lib/Basic/Targets/WebAssembly.cpp clang/lib/Basic/Targets/WebAssembly.h clang/lib/Basic/Targets/XCore.cpp +clang/lib/CodeGen/ABIInfo.cpp +clang/lib/CodeGen/ABIInfoImpl.cpp +clang/lib/CodeGen/ABIInfoImpl.h clang/lib/CodeGen/CGCUDARuntime.cpp clang/lib/CodeGen/CGLoopInfo.cpp clang/lib/CodeGen/CGLoopInfo.h @@ -386,6 +389,31 @@ clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp clang/lib/CodeGen/PatternInit.cpp clang/lib/CodeGen/PatternInit.h +clang/lib/CodeGen/Targets/AArch64.cpp +clang/lib/CodeGen/Targets/AMDGPU.cpp +clang/lib/CodeGen/Targets/ARC.cpp +clang/lib/CodeGen/Targets/ARM.cpp +clang/lib/CodeGen/Targets/AVR.cpp +clang/lib/CodeGen/Targets/BPF.cpp +clang/lib/CodeGen/Targets/CSKY.cpp +clang/lib/CodeGen/Targets/Hexagon.cpp +clang/lib/CodeGen/Targets/Lanai.cpp +clang/lib/CodeGen/Targets/LoongArch.cpp +clang/lib/CodeGen/Targets/M68k.cpp +clang/lib/CodeGen/Targets/MSP430.cpp +clang/lib/CodeGen/Targets/Mips.cpp +clang/lib/CodeGen/Targets/NVPTX.cpp +clang/lib/CodeGen/Targets/PNaCl.cpp +clang/lib/CodeGen/Targets/PPC.cpp +clang/lib/CodeGen/Targets/RISCV.cpp +clang/lib/CodeGen/Targets/SPIR.cpp +clang/lib/CodeGen/Targets/Sparc.cpp +clang/lib/CodeGen/Targets/SystemZ.cpp +clang/lib/CodeGen/Targets/TCE.cpp +clang/lib/CodeGen/Targets/VE.cpp +clang/lib/CodeGen/Targets/WebAssembly.cpp +clang/lib/CodeGen/Targets/X86.cpp +clang/lib/CodeGen/Targets/XCore.cpp clang/lib/CodeGen/VarBypassDetector.cpp clang/lib/DirectoryWatcher/DirectoryScanner.cpp clang/lib/DirectoryWatcher/DirectoryScanner.h diff --git a/clang/lib/CodeGen/ABIInfoImpl.cpp b/clang/lib/CodeGen/ABIInfoImpl.cpp --- a/clang/lib/CodeGen/ABIInfoImpl.cpp +++ b/clang/lib/CodeGen/ABIInfoImpl.cpp @@ -176,12 +176,10 @@ /// \param ForceRightAdjust - Default is false. On big-endian platform and /// if the argument is smaller than a slot, set this flag will force /// right-adjust the argument in its slot irrespective of the type. -static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, - Address VAListAddr, +static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, - CharUnits DirectAlign, - CharUnits SlotSize, + CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign, bool ForceRightAdjust = false) { // Cast the element type to i8* if necessary. Some platforms define diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp --- a/clang/lib/CodeGen/Targets/AArch64.cpp +++ b/clang/lib/CodeGen/Targets/AArch64.cpp @@ -125,7 +125,8 @@ auto *Fn = cast(GV); static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; - Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast(BPI.SignReturnAddr)]); + Fn->addFnAttr("sign-return-address", + SignReturnAddrStr[static_cast(BPI.SignReturnAddr)]); if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) { Fn->addFnAttr("sign-return-address-key", @@ -179,7 +180,7 @@ return; addStackProbeTargetAttributes(D, GV, CGM); } -} +} // namespace ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const { assert(Ty->isVectorType() && "expected vector type!"); @@ -292,7 +293,7 @@ // copy constructor are always indirect. if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == - CGCXXABI::RAA_DirectInMemory); + CGCXXABI::RAA_DirectInMemory); } // Empty records are always ignored on Darwin, but actually passed in C++ mode @@ -659,8 +660,8 @@ auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); - Address Tmp = CGF.CreateTempAlloca(HFATy, - std::max(TyAlign, BaseTyInfo.Align)); + Address Tmp = + CGF.CreateTempAlloca(HFATy, std::max(TyAlign, BaseTyInfo.Align)); // On big-endian platforms, the value will be right-aligned in its slot. int Offset = 0; @@ -671,7 +672,7 @@ for (unsigned i = 0; i < NumMembers; ++i) { CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); Address LoadAddr = - CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); + CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); @@ -687,8 +688,7 @@ // It might be right-aligned in its slot. CharUnits SlotSize = BaseAddr.getAlignment(); if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && - (IsHFA || !isAggregateTypeForABI(Ty)) && - TySize < SlotSize) { + (IsHFA || !isAggregateTypeForABI(Ty)) && TySize < SlotSize) { CharUnits Offset = SlotSize - TySize; BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); } @@ -717,8 +717,7 @@ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), "align_stack"); OnStackPtr = CGF.Builder.CreateAnd( - OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), - "align_stack"); + OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), "align_stack"); OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); } @@ -797,8 +796,8 @@ IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); } - return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, - TyInfo, SlotSize, /*AllowHigherAlign*/ true); + return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, SlotSize, + /*AllowHigherAlign*/ true); } Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp --- a/clang/lib/CodeGen/Targets/AMDGPU.cpp +++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp @@ -39,8 +39,7 @@ } public: - explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : - DefaultABIInfo(CGT) {} + explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyKernelArgumentType(QualType Ty) const; @@ -55,8 +54,8 @@ return true; } -bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( - const Type *Base, uint64_t Members) const { +bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, + uint64_t Members) const { uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; // Homogeneous Aggregates may occupy at most 16 registers. @@ -273,7 +272,8 @@ unsigned getOpenCLKernelCallingConv() const override; llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, - llvm::PointerType *T, QualType QT) const override; + llvm::PointerType *T, + QualType QT) const override; LangAS getASTAllocaAddressSpace() const override { return getLangASFromTargetAS( @@ -292,7 +292,7 @@ bool shouldEmitDWARFBitFieldSeparators() const override; void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; }; -} +} // namespace static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV) { @@ -424,9 +424,10 @@ // emitting null pointers in private and local address spaces, a null // pointer in generic address space is emitted which is casted to a // pointer in local or private address space. -llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( - const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, - QualType QT) const { +llvm::Constant * +AMDGPUTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, + llvm::PointerType *PT, + QualType QT) const { if (CGM.getContext().getTargetNullPointerValue(QT) == 0) return llvm::ConstantPointerNull::get(PT); @@ -462,11 +463,9 @@ return DefaultGlobalAS; } -llvm::SyncScope::ID -AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, - SyncScope Scope, - llvm::AtomicOrdering Ordering, - llvm::LLVMContext &Ctx) const { +llvm::SyncScope::ID AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID( + const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, + llvm::LLVMContext &Ctx) const { std::string Name; switch (Scope) { case SyncScope::HIPSingleThread: diff --git a/clang/lib/CodeGen/Targets/ARC.cpp b/clang/lib/CodeGen/Targets/ARC.cpp --- a/clang/lib/CodeGen/Targets/ARC.cpp +++ b/clang/lib/CodeGen/Targets/ARC.cpp @@ -67,10 +67,9 @@ : TargetCodeGenInfo(std::make_unique(CGT)) {} }; - ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const { - return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) : - getNaturalAlignIndirect(Ty, false); + return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) + : getNaturalAlignIndirect(Ty, false); } ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const { @@ -122,9 +121,9 @@ SmallVector Elements(SizeInRegs, Int32); llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); - return FreeRegs >= SizeInRegs ? - ABIArgInfo::getDirectInReg(Result) : - ABIArgInfo::getDirect(Result, 0, nullptr, false); + return FreeRegs >= SizeInRegs + ? ABIArgInfo::getDirectInReg(Result) + : ABIArgInfo::getDirect(Result, 0, nullptr, false); } if (const auto *EIT = Ty->getAs()) diff --git a/clang/lib/CodeGen/Targets/ARM.cpp b/clang/lib/CodeGen/Targets/ARM.cpp --- a/clang/lib/CodeGen/Targets/ARM.cpp +++ b/clang/lib/CodeGen/Targets/ARM.cpp @@ -26,7 +26,7 @@ ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) { setCCs(); IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" || - CGT.getCodeGenOpts().FloatABI == ""; // default + CGT.getCodeGenOpts().FloatABI == ""; // default } bool isEABI() const { @@ -181,12 +181,24 @@ const char *Kind; switch (Attr->getInterrupt()) { - case ARMInterruptAttr::Generic: Kind = ""; break; - case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; - case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; - case ARMInterruptAttr::SWI: Kind = "SWI"; break; - case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; - case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; + case ARMInterruptAttr::Generic: + Kind = ""; + break; + case ARMInterruptAttr::IRQ: + Kind = "IRQ"; + break; + case ARMInterruptAttr::FIQ: + Kind = "FIQ"; + break; + case ARMInterruptAttr::SWI: + Kind = "SWI"; + break; + case ARMInterruptAttr::ABORT: + Kind = "ABORT"; + break; + case ARMInterruptAttr::UNDEF: + Kind = "UNDEF"; + break; } Fn->addFnAttr("interrupt", Kind); @@ -230,7 +242,7 @@ return; addStackProbeTargetAttributes(D, GV, CGM); } -} +} // namespace void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { if (!::classifyReturnType(getCXXABI(), FI, *this)) @@ -241,7 +253,6 @@ I.info = classifyArgumentType(I.type, FI.isVariadic(), FI.getCallingConvention()); - // Always honor user-specified calling convention. if (FI.getCallingConvention() != llvm::CallingConv::C) return; @@ -291,8 +302,7 @@ ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { uint64_t Size = getContext().getTypeSize(Ty); if (Size <= 32) { - llvm::Type *ResType = - llvm::Type::getInt32Ty(getVMContext()); + llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); return ABIArgInfo::getDirect(ResType); } if (Size == 64 || Size == 128) { @@ -340,8 +350,8 @@ // 64-bit containerized vectors or 128-bit containerized vectors with one // to four Elements. // Variadic functions should always marshal to the base standard. - bool IsAAPCS_VFP = - !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); + bool IsAAPCS_VFP = !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, + /* AAPCS16 */ false); Ty = useFirstFieldIfTransparentUnion(Ty); @@ -387,7 +397,7 @@ if (isHomogeneousAggregate(Ty, Base, Members)) { assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); llvm::Type *Ty = - llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); + llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); return ABIArgInfo::getDirect(Ty, 0, nullptr, false); } } @@ -428,7 +438,7 @@ } // Otherwise, pass by coercing to a structure of the appropriate size. - llvm::Type* ElemTy; + llvm::Type *ElemTy; unsigned SizeRegs; // FIXME: Try to match the types of the arguments more accurately where // we can. @@ -476,7 +486,8 @@ // Otherwise, it must be a record type. const RecordType *RT = Ty->getAs(); - if (!RT) return false; + if (!RT) + return false; // Ignore records with flexible arrays. const RecordDecl *RD = RT->getDecl(); @@ -532,8 +543,8 @@ unsigned functionCallConv) const { // Variadic functions should always marshal to the base standard. - bool IsAAPCS_VFP = - !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); + bool IsAAPCS_VFP = !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, + /* AAPCS16 */ true); if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); @@ -545,10 +556,9 @@ // TODO: FP16/BF16 vectors should be converted to integer vectors // This check is similar to isIllegalVectorType - refactor? if ((!getTarget().hasLegalHalfType() && - (VT->getElementType()->isFloat16Type() || - VT->getElementType()->isHalfType())) || - (IsFloatABISoftFP && - VT->getElementType()->isBFloat16Type())) + (VT->getElementType()->isFloat16Type() || + VT->getElementType()->isHalfType())) || + (IsFloatABISoftFP && VT->getElementType()->isBFloat16Type())) return coerceIllegalVector(RetTy); } @@ -637,7 +647,7 @@ /// isIllegalVector - check whether Ty is an illegal vector type. bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { - if (const VectorType *VT = Ty->getAs ()) { + if (const VectorType *VT = Ty->getAs()) { // On targets that don't support half, fp16 or bfloat, they are expanded // into float, and we don't want the ABI to depend on whether or not they // are supported in hardware. Thus return false to coerce vectors of these @@ -645,10 +655,9 @@ // We do not depend on hasLegalHalfType for bfloat as it is a // separate IR type. if ((!getTarget().hasLegalHalfType() && - (VT->getElementType()->isFloat16Type() || - VT->getElementType()->isHalfType())) || - (IsFloatABISoftFP && - VT->getElementType()->isBFloat16Type())) + (VT->getElementType()->isFloat16Type() || + VT->getElementType()->isHalfType())) || + (IsFloatABISoftFP && VT->getElementType()->isBFloat16Type())) return true; if (isAndroid()) { // Android shipped using Clang 3.1, which supported a slightly different @@ -781,17 +790,17 @@ if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { IsIndirect = true; - // ARMv7k passes structs bigger than 16 bytes indirectly, in space - // allocated by the caller. + // ARMv7k passes structs bigger than 16 bytes indirectly, in space + // allocated by the caller. } else if (TySize > CharUnits::fromQuantity(16) && getABIKind() == ARMABIKind::AAPCS16_VFP && !isHomogeneousAggregate(Ty, Base, Members)) { IsIndirect = true; - // Otherwise, bound the type's ABI alignment. - // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for - // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. - // Our callers should be prepared to handle an under-aligned address. + // Otherwise, bound the type's ABI alignment. + // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for + // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. + // Our callers should be prepared to handle an under-aligned address. } else if (getABIKind() == ARMABIKind::AAPCS_VFP || getABIKind() == ARMABIKind::AAPCS) { TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); @@ -805,8 +814,8 @@ } TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None); - return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, - SlotSize, /*AllowHigherAlign*/ true); + return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, SlotSize, + /*AllowHigherAlign*/ true); } std::unique_ptr diff --git a/clang/lib/CodeGen/Targets/AVR.cpp b/clang/lib/CodeGen/Targets/AVR.cpp --- a/clang/lib/CodeGen/Targets/AVR.cpp +++ b/clang/lib/CodeGen/Targets/AVR.cpp @@ -135,7 +135,8 @@ if (GV->isDeclaration()) return; const auto *FD = dyn_cast_or_null(D); - if (!FD) return; + if (!FD) + return; auto *Fn = cast(GV); if (FD->getAttr()) @@ -145,7 +146,7 @@ Fn->addFnAttr("signal"); } }; -} +} // namespace std::unique_ptr CodeGen::createAVRTargetCodeGenInfo(CodeGenModule &CGM, unsigned NPR, diff --git a/clang/lib/CodeGen/Targets/BPF.cpp b/clang/lib/CodeGen/Targets/BPF.cpp --- a/clang/lib/CodeGen/Targets/BPF.cpp +++ b/clang/lib/CodeGen/Targets/BPF.cpp @@ -83,7 +83,6 @@ for (auto &I : FI.arguments()) I.info = classifyArgumentType(I.type); } - }; class BPFTargetCodeGenInfo : public TargetCodeGenInfo { @@ -92,7 +91,7 @@ : TargetCodeGenInfo(std::make_unique(CGT)) {} }; -} +} // namespace std::unique_ptr CodeGen::createBPFTargetCodeGenInfo(CodeGenModule &CGM) { diff --git a/clang/lib/CodeGen/Targets/Hexagon.cpp b/clang/lib/CodeGen/Targets/Hexagon.cpp --- a/clang/lib/CodeGen/Targets/Hexagon.cpp +++ b/clang/lib/CodeGen/Targets/Hexagon.cpp @@ -145,8 +145,8 @@ // HVX vectors are returned in vector registers or register pairs. if (T.hasFeature("hvx")) { assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b")); - uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8; - if (Size == VecSize || Size == 2*VecSize) + uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64 * 8 : 128 * 8; + if (Size == VecSize || Size == 2 * VecSize) return ABIArgInfo::getDirectInReg(); } // Large vector types should be returned via memory. @@ -221,10 +221,10 @@ // Round up to the minimum stack alignment for varargs which is 4 bytes. uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); - __overflow_area_pointer = CGF.Builder.CreateGEP( - CGF.Int8Ty, __overflow_area_pointer, - llvm::ConstantInt::get(CGF.Int32Ty, Offset), - "__overflow_area_pointer.next"); + __overflow_area_pointer = + CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, + llvm::ConstantInt::get(CGF.Int32Ty, Offset), + "__overflow_area_pointer.next"); CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p); return AddrTyped; @@ -380,10 +380,10 @@ // Get the pointer for next argument in overflow area and store it // to overflow area pointer. - llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( - CGF.Int8Ty, __overflow_area_pointer, - llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), - "__overflow_area_pointer.next"); + llvm::Value *__new_overflow_area_pointer = + CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, + llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), + "__overflow_area_pointer.next"); CGF.Builder.CreateStore(__new_overflow_area_pointer, __overflow_area_pointer_p); diff --git a/clang/lib/CodeGen/Targets/Lanai.cpp b/clang/lib/CodeGen/Targets/Lanai.cpp --- a/clang/lib/CodeGen/Targets/Lanai.cpp +++ b/clang/lib/CodeGen/Targets/Lanai.cpp @@ -146,7 +146,7 @@ LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) : TargetCodeGenInfo(std::make_unique(CGT)) {} }; -} +} // namespace std::unique_ptr CodeGen::createLanaiTargetCodeGenInfo(CodeGenModule &CGM) { diff --git a/clang/lib/CodeGen/Targets/MSP430.cpp b/clang/lib/CodeGen/Targets/MSP430.cpp --- a/clang/lib/CodeGen/Targets/MSP430.cpp +++ b/clang/lib/CodeGen/Targets/MSP430.cpp @@ -65,7 +65,7 @@ CodeGen::CodeGenModule &M) const override; }; -} +} // namespace void MSP430TargetCodeGenInfo::setTargetAttributes( const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { diff --git a/clang/lib/CodeGen/Targets/Mips.cpp b/clang/lib/CodeGen/Targets/Mips.cpp --- a/clang/lib/CodeGen/Targets/Mips.cpp +++ b/clang/lib/CodeGen/Targets/Mips.cpp @@ -23,13 +23,14 @@ const unsigned MinABIStackAlignInBytes, StackAlignInBytes; void CoerceToIntArgs(uint64_t TySize, SmallVectorImpl &ArgList) const; - llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; - llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; - llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; + llvm::Type *HandleAggregates(QualType Ty, uint64_t TySize) const; + llvm::Type *returnAggregateInRegs(QualType RetTy, uint64_t Size) const; + llvm::Type *getPaddingType(uint64_t Align, uint64_t Offset) const; + public: - MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : - ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), - StackAlignInBytes(IsO32 ? 8 : 16) {} + MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) + : ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), + StackAlignInBytes(IsO32 ? 8 : 16) {} ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; @@ -41,6 +42,7 @@ class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { unsigned SizeOfUnwindException; + public: MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) : TargetCodeGenInfo(std::make_unique(CGT, IsO32)), @@ -53,7 +55,8 @@ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override { const FunctionDecl *FD = dyn_cast_or_null(D); - if (!FD) return; + if (!FD) + return; llvm::Function *Fn = cast(GV); if (FD->hasAttr()) @@ -67,8 +70,7 @@ if (FD->hasAttr()) { Fn->addFnAttr("mips16"); - } - else if (FD->hasAttr()) { + } else if (FD->hasAttr()) { Fn->addFnAttr("nomips16"); } @@ -83,19 +85,36 @@ const char *Kind; switch (Attr->getInterrupt()) { - case MipsInterruptAttr::eic: Kind = "eic"; break; - case MipsInterruptAttr::sw0: Kind = "sw0"; break; - case MipsInterruptAttr::sw1: Kind = "sw1"; break; - case MipsInterruptAttr::hw0: Kind = "hw0"; break; - case MipsInterruptAttr::hw1: Kind = "hw1"; break; - case MipsInterruptAttr::hw2: Kind = "hw2"; break; - case MipsInterruptAttr::hw3: Kind = "hw3"; break; - case MipsInterruptAttr::hw4: Kind = "hw4"; break; - case MipsInterruptAttr::hw5: Kind = "hw5"; break; + case MipsInterruptAttr::eic: + Kind = "eic"; + break; + case MipsInterruptAttr::sw0: + Kind = "sw0"; + break; + case MipsInterruptAttr::sw1: + Kind = "sw1"; + break; + case MipsInterruptAttr::hw0: + Kind = "hw0"; + break; + case MipsInterruptAttr::hw1: + Kind = "hw1"; + break; + case MipsInterruptAttr::hw2: + Kind = "hw2"; + break; + case MipsInterruptAttr::hw3: + Kind = "hw3"; + break; + case MipsInterruptAttr::hw4: + Kind = "hw4"; + break; + case MipsInterruptAttr::hw5: + Kind = "hw5"; + break; } Fn->addFnAttr("interrupt", Kind); - } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, @@ -105,12 +124,12 @@ return SizeOfUnwindException; } }; -} +} // namespace void MipsABIInfo::CoerceToIntArgs( uint64_t TySize, SmallVectorImpl &ArgList) const { llvm::IntegerType *IntTy = - llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); + llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); // Add (TySize / MinABIStackAlignInBytes) args of IntTy. for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) @@ -125,8 +144,8 @@ // In N32/64, an aligned double precision floating point field is passed in // a register. -llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { - SmallVector ArgList, IntArgList; +llvm::Type *MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { + SmallVector ArgList, IntArgList; if (IsO32) { CoerceToIntArgs(TySize, ArgList); @@ -189,8 +208,8 @@ return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); } -ABIArgInfo -MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { +ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty, + uint64_t &Offset) const { Ty = useFirstFieldIfTransparentUnion(Ty); uint64_t OrigOffset = Offset; @@ -241,10 +260,10 @@ nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); } -llvm::Type* -MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { +llvm::Type *MipsABIInfo::returnAggregateInRegs(QualType RetTy, + uint64_t Size) const { const RecordType *RT = RetTy->getAs(); - SmallVector RTList; + SmallVector RTList; if (RT && RT->isStructureOrClassType()) { const RecordDecl *RD = RT->getDecl(); @@ -328,7 +347,8 @@ return ABIArgInfo::getExtend(RetTy); if ((RetTy->isUnsignedIntegerOrEnumerationType() || - RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32) + RetTy->isSignedIntegerOrEnumerationType()) && + Size == 32 && !IsO32) return ABIArgInfo::getSignExtend(RetTy); return ABIArgInfo::getDirect(); @@ -355,8 +375,7 @@ unsigned SlotSizeInBits = IsO32 ? 32 : 64; unsigned PtrWidth = getTarget().getPointerWidth(LangAS::Default); bool DidPromote = false; - if ((Ty->isIntegerType() && - getContext().getIntWidth(Ty) < SlotSizeInBits) || + if ((Ty->isIntegerType() && getContext().getIntWidth(Ty) < SlotSizeInBits) || (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { DidPromote = true; Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, @@ -368,14 +387,14 @@ // The alignment of things in the argument area is never larger than // StackAlignInBytes. TyInfo.Align = - std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes)); + std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes)); // MinABIStackAlignInBytes is the size of argument slots on the stack. CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); - Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, - TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); - + Address Addr = + emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, TyInfo, + ArgSlotSize, /*AllowHigherAlign*/ true); // If there was a promotion, "unpromote" into a temporary. // TODO: can we just use a pointer into a subset of the original slot? @@ -384,8 +403,8 @@ llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); // Truncate down to the right width. - llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() - : CGF.IntPtrTy); + llvm::Type *IntTy = + (OrigTy->isIntegerType() ? Temp.getElementType() : CGF.IntPtrTy); llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); if (OrigTy->isPointerType()) V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); @@ -407,9 +426,8 @@ return ABIArgInfo::getExtend(Ty); } -bool -MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, - llvm::Value *Address) const { +bool MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable( + CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { // This information comes from gcc's implementation, which seems to // as canonical as it gets. diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp --- a/clang/lib/CodeGen/Targets/NVPTX.cpp +++ b/clang/lib/CodeGen/Targets/NVPTX.cpp @@ -229,7 +229,8 @@ } const FunctionDecl *FD = dyn_cast_or_null(D); - if (!FD) return; + if (!FD) + return; llvm::Function *F = cast(GV); @@ -300,7 +301,7 @@ bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { return false; } -} +} // namespace std::unique_ptr CodeGen::createNVPTXTargetCodeGenInfo(CodeGenModule &CGM) { diff --git a/clang/lib/CodeGen/Targets/PNaCl.cpp b/clang/lib/CodeGen/Targets/PNaCl.cpp --- a/clang/lib/CodeGen/Targets/PNaCl.cpp +++ b/clang/lib/CodeGen/Targets/PNaCl.cpp @@ -20,21 +20,21 @@ //===----------------------------------------------------------------------===// class PNaClABIInfo : public ABIInfo { - public: +public: PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType RetTy) const; void computeInfo(CGFunctionInfo &FI) const override; - Address EmitVAArg(CodeGenFunction &CGF, - Address VAListAddr, QualType Ty) const override; + Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, + QualType Ty) const override; }; class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { - public: - PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) - : TargetCodeGenInfo(std::make_unique(CGT)) {} +public: + PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) + : TargetCodeGenInfo(std::make_unique(CGT)) {} }; void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp --- a/clang/lib/CodeGen/Targets/PPC.cpp +++ b/clang/lib/CodeGen/Targets/PPC.cpp @@ -281,7 +281,7 @@ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; }; -} +} // namespace CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { // Complex types are passed just like their elements. @@ -387,7 +387,7 @@ // "Align" the register count when TY is i64. if (isI64 || (isF64 && IsSoftFloatABI)) { NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); - NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); + NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t)~1U)); } llvm::Value *CC = @@ -400,7 +400,8 @@ Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy; - if (isIndirect) DirectTy = DirectTy->getPointerTo(0); + if (isIndirect) + DirectTy = DirectTy->getPointerTo(0); // Case 1: consume registers. Address RegAddr = Address::invalid(); @@ -420,7 +421,8 @@ // Get the address of the saved value by scaling the number of // registers we've used by the number of - CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); + CharUnits RegSize = + CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); llvm::Value *RegOffset = Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); RegAddr = Address( @@ -429,9 +431,8 @@ RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); // Increase the used-register count. - NumRegs = - Builder.CreateAdd(NumRegs, - Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); + NumRegs = Builder.CreateAdd( + NumRegs, Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); Builder.CreateStore(NumRegs, NumRegsAddr); CGF.EmitBranch(Cont); @@ -509,9 +510,8 @@ return false; } -bool -PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, - llvm::Value *Address) const { +bool PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable( + CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false, /*IsAIX*/ false); } @@ -605,12 +605,11 @@ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; }; -} +} // namespace // Return true if the ABI requires Ty to be passed sign- or zero- // extended to 64 bits. -bool -PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { +bool PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); @@ -644,7 +643,7 @@ if (const ComplexType *CTy = Ty->getAs()) Ty = CTy->getElementType(); - auto FloatUsesVector = [this](QualType Ty){ + auto FloatUsesVector = [this](QualType Ty) { return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics( Ty) == &llvm::APFloat::IEEEquad(); }; @@ -652,7 +651,8 @@ // Only vector types of size 16 bytes need alignment (larger types are // passed via reference, smaller types are not aligned). if (Ty->isVectorType()) { - return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); + return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 + : 8); } else if (FloatUsesVector(Ty)) { // According to ABI document section 'Optional Save Areas': If extended // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION @@ -720,18 +720,17 @@ const Type *Base, uint64_t Members) const { // Vector and fp128 types require one register, other floating point types // require one or two registers depending on their size. - uint32_t NumRegs = - ((getContext().getTargetInfo().hasFloat128Type() && - Base->isFloat128Type()) || - Base->isVectorType()) ? 1 - : (getContext().getTypeSize(Base) + 63) / 64; + uint32_t NumRegs = ((getContext().getTargetInfo().hasFloat128Type() && + Base->isFloat128Type()) || + Base->isVectorType()) + ? 1 + : (getContext().getTypeSize(Base) + 63) / 64; // Homogeneous Aggregates may occupy at most 8 registers. return Members * NumRegs <= 8; } -ABIArgInfo -PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { +ABIArgInfo PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { Ty = useFirstFieldIfTransparentUnion(Ty); if (Ty->isAnyComplexType()) @@ -805,8 +804,7 @@ : ABIArgInfo::getDirect()); } -ABIArgInfo -PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { +ABIArgInfo PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); @@ -902,17 +900,14 @@ /*ForceRightAdjust*/ true); } -bool -PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( - CodeGen::CodeGenFunction &CGF, - llvm::Value *Address) const { +bool PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( + CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, /*IsAIX*/ false); } -bool -PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, - llvm::Value *Address) const { +bool PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable( + CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, /*IsAIX*/ false); } diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp --- a/clang/lib/CodeGen/Targets/RISCV.cpp +++ b/clang/lib/CodeGen/Targets/RISCV.cpp @@ -277,7 +277,8 @@ CharUnits Field2Align = CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty)); - CharUnits Field1End = Field1Off + + CharUnits Field1End = + Field1Off + CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align); @@ -318,8 +319,8 @@ const auto *BT = VT->getElementType()->castAs(); unsigned EltSize = getContext().getTypeSize(BT); llvm::ScalableVectorType *ResType = - llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()), - llvm::RISCV::RVVBitsPerBlock / EltSize); + llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()), + llvm::RISCV::RVVBitsPerBlock / EltSize); return ABIArgInfo::getDirect(ResType); } @@ -345,8 +346,8 @@ uint64_t Size = getContext().getTypeSize(Ty); // Pass floating point values via FPRs if possible. - if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && - FLen >= Size && ArgFPRsLeft) { + if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && FLen >= Size && + ArgFPRsLeft) { ArgFPRsLeft--; return ABIArgInfo::getDirect(); } @@ -473,8 +474,8 @@ // Arguments bigger than 2*Xlen bytes are passed indirectly. bool IsIndirect = TInfo.Width > 2 * SlotSize; - return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, - SlotSize, /*AllowHigherAlign=*/true); + return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, SlotSize, + /*AllowHigherAlign=*/true); } ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { @@ -495,7 +496,8 @@ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override { const auto *FD = dyn_cast_or_null(D); - if (!FD) return; + if (!FD) + return; const auto *Attr = FD->getAttr(); if (!Attr) @@ -503,8 +505,12 @@ const char *Kind; switch (Attr->getInterrupt()) { - case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; - case RISCVInterruptAttr::machine: Kind = "machine"; break; + case RISCVInterruptAttr::supervisor: + Kind = "supervisor"; + break; + case RISCVInterruptAttr::machine: + Kind = "machine"; + break; } auto *Fn = cast(GV); diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp --- a/clang/lib/CodeGen/Targets/SPIR.cpp +++ b/clang/lib/CodeGen/Targets/SPIR.cpp @@ -115,8 +115,8 @@ else CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI); } -} -} +} // namespace CodeGen +} // namespace clang unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { return llvm::CallingConv::SPIR_KERNEL; @@ -182,8 +182,8 @@ enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 }; switch (BuiltinTy->getKind()) { #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ - case BuiltinType::Id: \ - return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix); + case BuiltinType::Id: \ + return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix); #include "clang/Basic/OpenCLImageTypes.def" case BuiltinType::OCLSampler: return llvm::TargetExtType::get(Ctx, "spirv.Sampler"); @@ -196,8 +196,8 @@ case BuiltinType::OCLReserveID: return llvm::TargetExtType::get(Ctx, "spirv.ReserveId"); #define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \ - case BuiltinType::OCLIntelSubgroupAVC##Id: \ - return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL"); + case BuiltinType::OCLIntelSubgroupAVC##Id: \ + return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL"); #include "clang/Basic/OpenCLExtensionTypes.def" default: return nullptr; diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp --- a/clang/lib/CodeGen/Targets/Sparc.cpp +++ b/clang/lib/CodeGen/Targets/Sparc.cpp @@ -29,13 +29,10 @@ }; } // end anonymous namespace - -ABIArgInfo -SparcV8ABIInfo::classifyReturnType(QualType Ty) const { +ABIArgInfo SparcV8ABIInfo::classifyReturnType(QualType Ty) const { if (Ty->isAnyComplexType()) { return ABIArgInfo::getDirect(); - } - else { + } else { return DefaultABIInfo::classifyReturnType(Ty); } } @@ -128,12 +125,12 @@ struct CoerceBuilder { llvm::LLVMContext &Context; const llvm::DataLayout &DL; - SmallVector Elems; + SmallVector Elems; uint64_t Size; bool InReg; CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) - : Context(c), DL(dl), Size(0), InReg(false) {} + : Context(c), DL(dl), Size(0), InReg(false) {} // Pad Elems with integers until Size is ToSize. void pad(uint64_t ToSize) { @@ -222,8 +219,7 @@ }; } // end anonymous namespace -ABIArgInfo -SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { +ABIArgInfo SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { if (Ty->isVoidType()) return ABIArgInfo::getIgnore(); @@ -364,9 +360,8 @@ }; } // end anonymous namespace -bool -SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, - llvm::Value *Address) const { +bool SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable( + CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { // This is calculated from the LLVM and GCC tables and verified // against gcc output. AFAIK all ABIs use the same encoding. diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp --- a/clang/lib/CodeGen/Targets/SystemZ.cpp +++ b/clang/lib/CodeGen/Targets/SystemZ.cpp @@ -60,7 +60,7 @@ SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI) : TargetCodeGenInfo( std::make_unique(CGT, HasVector, SoftFloatABI)), - Ctx(CGT.getContext()) { + Ctx(CGT.getContext()) { SwiftInfo = std::make_unique(CGT, /*SwiftErrorInRegister=*/false); } @@ -89,12 +89,11 @@ if (const auto *VD = dyn_cast(D)) { if (VD->isExternallyVisible()) handleExternallyVisibleObjABI(VD->getType().getTypePtr(), M, - /*IsParam*/false); - } - else if (const FunctionDecl *FD = dyn_cast(D)) { + /*IsParam*/ false); + } else if (const FunctionDecl *FD = dyn_cast(D)) { if (FD->isExternallyVisible()) handleExternallyVisibleObjABI(FD->getType().getTypePtr(), M, - /*IsParam*/false); + /*IsParam*/ false); } } @@ -140,7 +139,7 @@ return nullptr; } }; -} +} // namespace bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { // Treat an enum type as its underlying type. @@ -168,14 +167,12 @@ } bool SystemZABIInfo::isCompoundType(QualType Ty) const { - return (Ty->isAnyComplexType() || - Ty->isVectorType() || + return (Ty->isAnyComplexType() || Ty->isVectorType() || isAggregateTypeForABI(Ty)); } bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { - return (HasVector && - Ty->isVectorType() && + return (HasVector && Ty->isVectorType() && getContext().getTypeSize(Ty) <= 128); } @@ -270,7 +267,7 @@ CharUnits UnpaddedSize; CharUnits DirectAlign; SZCGI.handleExternallyVisibleObjABI(Ty.getTypePtr(), CGT.getCGM(), - /*IsParam*/true); + /*IsParam*/ true); if (IsIndirect) { DirectTy = llvm::PointerType::getUnqual(DirectTy); UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); @@ -291,7 +288,7 @@ llvm::Type *IndexTy = CGF.Int64Ty; llvm::Value *PaddedSizeV = - llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); + llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); if (IsVector) { // Work out the address of a vector argument on the stack. @@ -319,14 +316,14 @@ unsigned MaxRegs, RegCountField, RegSaveIndex; CharUnits RegPadding; if (InFPRs) { - MaxRegs = 4; // Maximum of 4 FPR arguments - RegCountField = 1; // __fpr - RegSaveIndex = 16; // save offset for f0 + MaxRegs = 4; // Maximum of 4 FPR arguments + RegCountField = 1; // __fpr + RegSaveIndex = 16; // save offset for f0 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR } else { - MaxRegs = 5; // Maximum of 5 GPR arguments - RegCountField = 0; // __gpr - RegSaveIndex = 2; // save offset for r2 + MaxRegs = 5; // Maximum of 5 GPR arguments + RegCountField = 0; // __gpr + RegSaveIndex = 2; // save offset for r2 RegPadding = Padding; // values are passed in the low bits of a GPR } @@ -334,8 +331,8 @@ CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); - llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, - "fits_in_regs"); + llvm::Value *InRegs = + CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, "fits_in_regs"); llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); @@ -347,12 +344,12 @@ // Work out the address of an argument register. llvm::Value *ScaledRegCount = - CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); + CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); llvm::Value *RegBase = - llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() - + RegPadding.getQuantity()); + llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() + + RegPadding.getQuantity()); llvm::Value *RegOffset = - CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); + CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); Address RegSaveAreaPtr = CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); llvm::Value *RegSaveArea = @@ -365,8 +362,7 @@ // Update the register count llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); - llvm::Value *NewRegCount = - CGF.Builder.CreateAdd(RegCount, One, "reg_count"); + llvm::Value *NewRegCount = CGF.Builder.CreateAdd(RegCount, One, "reg_count"); CGF.Builder.CreateStore(NewRegCount, RegCountPtr); CGF.EmitBranch(ContBlock); @@ -382,13 +378,12 @@ Address RawMemAddr = CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); Address MemAddr = - CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); + CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); // Update overflow_arg_area_ptr pointer - llvm::Value *NewOverflowArgArea = - CGF.Builder.CreateGEP(OverflowArgArea.getElementType(), - OverflowArgArea.getPointer(), PaddedSizeV, - "overflow_arg_area"); + llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP( + OverflowArgArea.getElementType(), OverflowArgArea.getPointer(), + PaddedSizeV, "overflow_arg_area"); CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); CGF.EmitBranch(ContBlock); @@ -479,7 +474,7 @@ // vector ABI becomes visible as the va_list could be passed on to // other functions. SZCGI.handleExternallyVisibleObjABI(I.type.getTypePtr(), CGT.getCGM(), - /*IsParam*/true); + /*IsParam*/ true); } } @@ -496,8 +491,9 @@ const Type *SingleEltTy = getABIInfo() .GetSingleElementType(QualType(Ty, 0)) .getTypePtr(); - bool SingleVecEltStruct = SingleEltTy != Ty && SingleEltTy->isVectorType() && - Ctx.getTypeSize(SingleEltTy) == Ctx.getTypeSize(Ty); + bool SingleVecEltStruct = + SingleEltTy != Ty && SingleEltTy->isVectorType() && + Ctx.getTypeSize(SingleEltTy) == Ctx.getTypeSize(Ty); if (Ty->isVectorType() || SingleVecEltStruct) return Ctx.getTypeSize(Ty) / 8 <= 16; } @@ -508,26 +504,26 @@ // Vectors >= 16 bytes expose the ABI through alignment requirements. if (Ty->isVectorType() && Ctx.getTypeSize(Ty) / 8 >= 16) - return true; + return true; if (const auto *RecordTy = Ty->getAs()) { const RecordDecl *RD = RecordTy->getDecl(); if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) if (CXXRD->hasDefinition()) for (const auto &I : CXXRD->bases()) - if (isVectorTypeBased(I.getType().getTypePtr(), /*IsParam*/false)) + if (isVectorTypeBased(I.getType().getTypePtr(), /*IsParam*/ false)) return true; for (const auto *FD : RD->fields()) - if (isVectorTypeBased(FD->getType().getTypePtr(), /*IsParam*/false)) + if (isVectorTypeBased(FD->getType().getTypePtr(), /*IsParam*/ false)) return true; } if (const auto *FT = Ty->getAs()) - if (isVectorTypeBased(FT->getReturnType().getTypePtr(), /*IsParam*/true)) + if (isVectorTypeBased(FT->getReturnType().getTypePtr(), /*IsParam*/ true)) return true; if (const FunctionProtoType *Proto = Ty->getAs()) for (const auto &ParamType : Proto->getParamTypes()) - if (isVectorTypeBased(ParamType.getTypePtr(), /*IsParam*/true)) + if (isVectorTypeBased(ParamType.getTypePtr(), /*IsParam*/ true)) return true; return false; diff --git a/clang/lib/CodeGen/Targets/TCE.cpp b/clang/lib/CodeGen/Targets/TCE.cpp --- a/clang/lib/CodeGen/Targets/TCE.cpp +++ b/clang/lib/CodeGen/Targets/TCE.cpp @@ -34,7 +34,8 @@ if (GV->isDeclaration()) return; const FunctionDecl *FD = dyn_cast_or_null(D); - if (!FD) return; + if (!FD) + return; llvm::Function *F = cast(GV); @@ -74,7 +75,7 @@ } } -} +} // namespace std::unique_ptr CodeGen::createTCETargetCodeGenInfo(CodeGenModule &CGM) { diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp --- a/clang/lib/CodeGen/Targets/X86.cpp +++ b/clang/lib/CodeGen/Targets/X86.cpp @@ -19,16 +19,16 @@ bool IsX86_MMXType(llvm::Type *IRType) { // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && - cast(IRType)->getElementType()->isIntegerTy() && - IRType->getScalarSizeInBits() != 64; + cast(IRType)->getElementType()->isIntegerTy() && + IRType->getScalarSizeInBits() != 64; } -static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, +static llvm::Type *X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, - llvm::Type* Ty) { + llvm::Type *Ty) { bool IsMMXCons = llvm::StringSwitch(Constraint) - .Cases("y", "&y", "^Ym", true) - .Default(false); + .Cases("y", "&y", "^Ym", true) + .Default(false); if (IsMMXCons && Ty->isVectorTy()) { if (cast(Ty)->getPrimitiveSizeInBits().getFixedValue() != 64) { @@ -72,7 +72,7 @@ } /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. -static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { +static ABIArgInfo getDirectX86Hva(llvm::Type *T = nullptr) { auto AI = ABIArgInfo::getDirect(T); AI.setInReg(true); AI.setCanBeFlattened(false); @@ -96,10 +96,7 @@ /// X86_32ABIInfo - The X86-32 ABI information. class X86_32ABIInfo : public ABIInfo { - enum Class { - Integer, - Float - }; + enum Class { Integer, Float }; static const unsigned MinABIStackAlignInBytes = 4; @@ -161,7 +158,6 @@ void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const; public: - void computeInfo(CGFunctionInfo &FI) const override; Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const override; @@ -204,24 +200,25 @@ SwiftInfo = std::make_unique(CGT); } - static bool isStructReturnInRegABI( - const llvm::Triple &Triple, const CodeGenOptions &Opts); + static bool isStructReturnInRegABI(const llvm::Triple &Triple, + const CodeGenOptions &Opts); void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override; int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { // Darwin uses different dwarf register numbers for EH. - if (CGM.getTarget().getTriple().isOSDarwin()) return 5; + if (CGM.getTarget().getTriple().isOSDarwin()) + return 5; return 4; } bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; - llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, + llvm::Type *adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, - llvm::Type* Ty) const override { + llvm::Type *Ty) const override { return X86AdjustInlineAsmType(CGF, Constraint, Ty); } @@ -235,10 +232,9 @@ llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { - unsigned Sig = (0xeb << 0) | // jmp rel8 - (0x06 << 8) | // .+0x08 - ('v' << 16) | - ('2' << 24); + unsigned Sig = (0xeb << 0) | // jmp rel8 + (0x06 << 8) | // .+0x08 + ('v' << 16) | ('2' << 24); return llvm::ConstantInt::get(CGM.Int32Ty, Sig); } @@ -248,7 +244,7 @@ } }; -} +} // namespace /// Rewrite input constraint references after adding some output constraints. /// In the case where there is one output and one input and we add one output, @@ -342,7 +338,7 @@ // For i386, type must be register sized. // For the MCU ABI, it only needs to be <= 8-byte if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) - return false; + return false; if (Ty->isVectorType()) { // 64- and 128- bit vectors inside structures are not returned in @@ -366,7 +362,8 @@ // Otherwise, it must be a record type. const RecordType *RT = Ty->getAs(); - if (!RT) return false; + if (!RT) + return false; // FIXME: Traverse bases here too. @@ -466,7 +463,8 @@ return Size == getContext().getTypeSize(Ty); } -ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { +ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, + CCState &State) const { // If the return value is indirect, then the hidden argument is consuming one // integer register. if (State.FreeRegs) { @@ -507,8 +505,8 @@ // register, or if it is 64 bits and has a single element. if ((Size == 8 || Size == 16 || Size == 32) || (Size == 64 && VT->getNumElements() == 1)) - return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), - Size)); + return ABIArgInfo::getDirect( + llvm::IntegerType::get(getVMContext(), Size)); return getIndirectReturnResult(RetTy, State); } @@ -550,13 +548,14 @@ // We apply a similar transformation for pointer types to improve the // quality of the generated IR. if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) - if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) - || SeltTy->hasPointerRepresentation()) + if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) || + SeltTy->hasPointerRepresentation()) return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); // FIXME: We should be able to narrow this integer in cases with dead // padding. - return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); + return ABIArgInfo::getDirect( + llvm::IntegerType::get(getVMContext(), Size)); } return getIndirectReturnResult(RetTy, State); @@ -720,7 +719,8 @@ return !IsMCUABI; } -void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const { +void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, + CCState &State) const { // Vectorcall x86 works subtly different than in x64, so the format is // a bit different than the x64 version. First, all vector types (not HVAs) // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers. @@ -805,7 +805,7 @@ bool InReg; if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { unsigned SizeInRegs = (TI.Width + 31) / 32; - SmallVector Elements(SizeInRegs, Int32); + SmallVector Elements(SizeInRegs, Int32); llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); if (InReg) return ABIArgInfo::getDirectInReg(Result); @@ -860,7 +860,6 @@ return ABIArgInfo::getDirect(); } - if (const EnumType *EnumTy = Ty->getAs()) Ty = EnumTy->getDecl()->getIntegerType(); @@ -915,7 +914,7 @@ // The C++ ABI is not aware of register usage, so we have to check if the // return value was sret and put it in a register ourselves if appropriate. if (State.FreeRegs) { - --State.FreeRegs; // The sret parameter consumes a register. + --State.FreeRegs; // The sret parameter consumes a register. if (!IsMCUABI) FI.getReturnInfo().setInReg(true); } @@ -947,10 +946,9 @@ rewriteWithInAlloca(FI); } -void -X86_32ABIInfo::addFieldToArgStruct(SmallVector &FrameFields, - CharUnits &StackOffset, ABIArgInfo &Info, - QualType Type) const { +void X86_32ABIInfo::addFieldToArgStruct( + SmallVector &FrameFields, CharUnits &StackOffset, + ABIArgInfo &Info, QualType Type) const { // Arguments are always 4-byte-aligned. CharUnits WordSize = CharUnits::fromQuantity(4); assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"); @@ -1044,8 +1042,8 @@ StackAlign); } -Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, - Address VAListAddr, QualType Ty) const { +Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, + QualType Ty) const { auto TypeInfo = getContext().getTypeInfoInChars(Ty); @@ -1054,10 +1052,10 @@ // Just messing with TypeInfo like this works because we never pass // anything indirectly. TypeInfo.Align = CharUnits::fromQuantity( - getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity())); + getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity())); - return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, - TypeInfo, CharUnits::fromQuantity(4), + return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo, + CharUnits::fromQuantity(4), /*AllowHigherAlign*/ true); } @@ -1068,9 +1066,9 @@ switch (Opts.getStructReturnConvention()) { case CodeGenOptions::SRCK_Default: break; - case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return + case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return return false; - case CodeGenOptions::SRCK_InRegs: // -freg-struct-return + case CodeGenOptions::SRCK_InRegs: // -freg-struct-return return true; } @@ -1100,8 +1098,8 @@ auto PtrTy = cast(FD->getParamDecl(0)->getType()); llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType()); - llvm::Attribute NewAttr = llvm::Attribute::getWithByValType( - Fn->getContext(), ByValTy); + llvm::Attribute NewAttr = + llvm::Attribute::getWithByValType(Fn->getContext(), ByValTy); Fn->addParamAttr(0, NewAttr); } @@ -1120,8 +1118,7 @@ } bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( - CodeGen::CodeGenFunction &CGF, - llvm::Value *Address) const { + CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { CodeGen::CGBuilderTy &Builder = CGF.Builder; llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); @@ -1143,7 +1140,7 @@ // reason. Builder.CreateAlignedStore( Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), - CharUnits::One()); + CharUnits::One()); // 11-16 are st(0..5). Not sure why we stop at 5. // These have size 12, which is sizeof(long double) on @@ -1159,7 +1156,6 @@ // X86-64 ABI Implementation //===----------------------------------------------------------------------===// - namespace { /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. @@ -1245,11 +1241,11 @@ bool isNamedArg, bool IsRegCall = false) const; llvm::Type *GetByteVectorType(QualType Ty) const; - llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, - unsigned IROffset, QualType SourceTy, + llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, + QualType SourceTy, unsigned SourceOffset) const; - llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, - unsigned IROffset, QualType SourceTy, + llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, + QualType SourceTy, unsigned SourceOffset) const; /// getIndirectResult - Give a source type \arg Ty, return a suitable result @@ -1328,7 +1324,7 @@ unsigned neededInt, neededSSE; // The freeIntRegs argument doesn't matter here. ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, - /*isNamedArg*/true); + /*isNamedArg*/ true); if (info.isDirect()) { llvm::Type *ty = info.getCoerceToType(); if (llvm::VectorType *vectorTy = dyn_cast_or_null(ty)) @@ -1344,9 +1340,7 @@ Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const override; - bool has64BitPointers() const { - return Has64BitPointers; - } + bool has64BitPointers() const { return Has64BitPointers; } }; /// WinX86_64ABIInfo - The Windows X86_64 ABI information. @@ -1409,9 +1403,9 @@ return false; } - llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, + llvm::Type *adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, - llvm::Type* Ty) const override { + llvm::Type *Ty) const override { return X86AdjustInlineAsmType(CGF, Constraint, Ty); } @@ -1425,8 +1419,8 @@ // defines varargs anyway. if (fnType->getCallConv() == CC_C) { bool HasAVXType = false; - for (CallArgList::const_iterator - it = args.begin(), ie = args.end(); it != ie; ++it) { + for (CallArgList::const_iterator it = args.begin(), ie = args.end(); + it != ie; ++it) { if (getABIInfo().isPassedUsingAVXType(it->Ty)) { HasAVXType = true; break; @@ -1444,8 +1438,7 @@ getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { unsigned Sig = (0xeb << 0) | // jmp rel8 (0x06 << 8) | // .+0x08 - ('v' << 16) | - ('2' << 24); + ('v' << 16) | ('2' << 24); return llvm::ConstantInt::get(CGM.Int32Ty, Sig); } @@ -1583,11 +1576,11 @@ namespace { class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { public: - WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, - bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, - unsigned NumRegisterParameters) - : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, - Win32StructABI, NumRegisterParameters, false) {} + WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, + bool RetSmallStructInRegABI, bool Win32StructABI, + unsigned NumRegisterParameters) + : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, + Win32StructABI, NumRegisterParameters, false) {} void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const override; @@ -1598,8 +1591,7 @@ Opt += qualifyWindowsLibrary(Lib); } - void getDetectMismatchOption(llvm::StringRef Name, - llvm::StringRef Value, + void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, llvm::SmallString<32> &Opt) const override { Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; } @@ -1647,8 +1639,7 @@ Opt += qualifyWindowsLibrary(Lib); } - void getDetectMismatchOption(llvm::StringRef Name, - llvm::StringRef Value, + void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, llvm::SmallString<32> &Opt) const override { Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; } @@ -1739,8 +1730,8 @@ return Field; if (Accum == Integer || Field == Integer) return Integer; - if (Field == X87 || Field == X87Up || Field == ComplexX87 || - Accum == X87 || Accum == X87Up) + if (Field == X87 || Field == X87Up || Field == ComplexX87 || Accum == X87 || + Accum == X87Up) return Memory; return SSE; } @@ -1971,7 +1962,8 @@ (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) return; - for (uint64_t i=0, Offset=OffsetBase; igetElementType(), Offset, FieldLo, FieldHi, isNamedArg); Lo = merge(Lo, FieldLo); @@ -2025,7 +2017,7 @@ // initialized to class NO_CLASS. Class FieldLo, FieldHi; uint64_t Offset = - OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); + OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); Lo = merge(Lo, FieldLo); Hi = merge(Hi, FieldHi); @@ -2044,7 +2036,7 @@ bool IsUnion = RT->isUnionType() && !UseClang11Compat; for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); - i != e; ++i, ++idx) { + i != e; ++i, ++idx) { uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); bool BitField = i->isBitField(); @@ -2202,8 +2194,8 @@ // If this type fits in an eightbyte, coerce it into the matching integral // type, which will end up on the stack (with alignment 8). if (Align == 8 && Size <= 64) - return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), - Size)); + return ABIArgInfo::getDirect( + llvm::IntegerType::get(getVMContext(), Size)); } return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); @@ -2239,7 +2231,6 @@ uint64_t Size = getContext().getTypeSize(Ty); assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); - // Return a LLVM IR vector type based on the size of 'Ty'. return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()), Size / 64); @@ -2268,12 +2259,13 @@ // Check each element to see if the element overlaps with the queried range. for (unsigned i = 0; i != NumElts; ++i) { // If the element is after the span we care about, then we're done.. - unsigned EltOffset = i*EltSize; - if (EltOffset >= EndBit) break; + unsigned EltOffset = i * EltSize; + if (EltOffset >= EndBit) + break; - unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; + unsigned EltStart = EltOffset < StartBit ? StartBit - EltOffset : 0; if (!BitsContainNoUserData(AT->getElementType(), EltStart, - EndBit-EltOffset, Context)) + EndBit - EltOffset, Context)) return false; } // If it overlaps no elements, then it is safe to process as padding. @@ -2294,11 +2286,12 @@ // If the base is after the span we care about, ignore it. unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); - if (BaseOffset >= EndBit) continue; + if (BaseOffset >= EndBit) + continue; - unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; - if (!BitsContainNoUserData(I.getType(), BaseStart, - EndBit-BaseOffset, Context)) + unsigned BaseStart = BaseOffset < StartBit ? StartBit - BaseOffset : 0; + if (!BitsContainNoUserData(I.getType(), BaseStart, EndBit - BaseOffset, + Context)) return false; } } @@ -2313,10 +2306,11 @@ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); // If we found a field after the region we care about, then we're done. - if (FieldOffset >= EndBit) break; + if (FieldOffset >= EndBit) + break; - unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; - if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, + unsigned FieldStart = FieldOffset < StartBit ? StartBit - FieldOffset : 0; + if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit - FieldOffset, Context)) return false; } @@ -2359,9 +2353,10 @@ /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the /// low 8 bytes of an XMM register, corresponding to the SSE class. -llvm::Type *X86_64ABIInfo:: -GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, - QualType SourceTy, unsigned SourceOffset) const { +llvm::Type *X86_64ABIInfo::GetSSETypeAtOffset(llvm::Type *IRType, + unsigned IROffset, + QualType SourceTy, + unsigned SourceOffset) const { const llvm::DataLayout &TD = getDataLayout(); unsigned SourceSize = (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset; @@ -2373,10 +2368,10 @@ llvm::Type *T1 = nullptr; unsigned T0Size = TD.getTypeAllocSize(T0); if (SourceSize > T0Size) - T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD); + T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD); if (T1 == nullptr) { - // Check if IRType is a half/bfloat + float. float type will be in IROffset+4 due - // to its alignment. + // Check if IRType is a half/bfloat + float. float type will be in + // IROffset+4 due to its alignment. if (T0->is16bitFPTy() && SourceSize > 4) T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD); // If we can't get a second FP type, return a simple half or float. @@ -2404,7 +2399,6 @@ return llvm::Type::getDoubleTy(getVMContext()); } - /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in /// an 8-byte GPR. This means that we either have a scalar or we are talking /// about the high or low part of an up-to-16-byte struct. This routine picks @@ -2419,9 +2413,10 @@ /// SourceTy is the source-level type for the entire argument. SourceOffset is /// an offset into this that we're processing (which is always either 0 or 8). /// -llvm::Type *X86_64ABIInfo:: -GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, - QualType SourceTy, unsigned SourceOffset) const { +llvm::Type *X86_64ABIInfo::GetINTEGERTypeAtOffset(llvm::Type *IRType, + unsigned IROffset, + QualType SourceTy, + unsigned SourceOffset) const { // If we're dealing with an un-offset LLVM IR type, then it means that we're // returning an 8-byte unit starting with it. See if we can safely use it. if (IROffset == 0) { @@ -2439,11 +2434,12 @@ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || IRType->isIntegerTy(32) || (isa(IRType) && !Has64BitPointers)) { - unsigned BitWidth = isa(IRType) ? 32 : - cast(IRType)->getBitWidth(); + unsigned BitWidth = isa(IRType) + ? 32 + : cast(IRType)->getBitWidth(); - if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, - SourceOffset*8+64, getContext())) + if (BitsContainNoUserData(SourceTy, SourceOffset * 8 + BitWidth, + SourceOffset * 8 + 64, getContext())) return IRType; } } @@ -2463,33 +2459,31 @@ if (llvm::ArrayType *ATy = dyn_cast(IRType)) { llvm::Type *EltTy = ATy->getElementType(); unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); - unsigned EltOffset = IROffset/EltSize*EltSize; - return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, + unsigned EltOffset = IROffset / EltSize * EltSize; + return GetINTEGERTypeAtOffset(EltTy, IROffset - EltOffset, SourceTy, SourceOffset); } // Okay, we don't have any better idea of what to pass, so we pass this in an // integer register that isn't too big to fit the rest of the struct. unsigned TySizeInBytes = - (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); + (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); assert(TySizeInBytes != SourceOffset && "Empty field?"); // It is always safe to classify this as an integer type up to i64 that // isn't larger than the structure. return llvm::IntegerType::get(getVMContext(), - std::min(TySizeInBytes-SourceOffset, 8U)*8); + std::min(TySizeInBytes - SourceOffset, 8U) * 8); } - /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally /// be used as elements of a two register pair to pass or return, return a /// first class aggregate to represent them. For example, if the low part of /// a by-value argument should be passed as i32* and the high part as float, /// return {i32*, float}. -static llvm::Type * -GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, - const llvm::DataLayout &TD) { +static llvm::Type *GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, + const llvm::DataLayout &TD) { // In order to correctly satisfy the ABI, we need to the high part to start // at offset 8. If the high and low parts we inferred are both 4-byte types // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have @@ -2512,8 +2506,8 @@ if (Lo->isHalfTy() || Lo->isFloatTy()) Lo = llvm::Type::getDoubleTy(Lo->getContext()); else { - assert((Lo->isIntegerTy() || Lo->isPointerTy()) - && "Invalid/unknown lo type"); + assert((Lo->isIntegerTy() || Lo->isPointerTy()) && + "Invalid/unknown lo type"); Lo = llvm::Type::getInt64Ty(Lo->getContext()); } } @@ -2526,8 +2520,7 @@ return Result; } -ABIArgInfo X86_64ABIInfo:: -classifyReturnType(QualType RetTy) const { +ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const { // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the // classification algorithm. X86_64ABIInfo::Class Lo, Hi; @@ -2611,12 +2604,12 @@ case Integer: HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); - if (Lo == NoClass) // Return HighPart at offset 8 in memory. + if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; case SSE: HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); - if (Lo == NoClass) // Return HighPart at offset 8 in memory. + if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; @@ -2639,7 +2632,7 @@ // extra bits in an SSE reg. if (Lo != X87) { HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); - if (Lo == NoClass) // Return HighPart at offset 8 in memory. + if (Lo == NoClass) // Return HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); } break; @@ -2741,14 +2734,15 @@ case ComplexX87: llvm_unreachable("Invalid classification for hi word."); - case NoClass: break; + case NoClass: + break; case Integer: ++neededInt; // Pick an 8-byte type based on the preferred type. HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); - if (Lo == NoClass) // Pass HighPart at offset 8 in memory. + if (Lo == NoClass) // Pass HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); break; @@ -2758,7 +2752,7 @@ case SSE: HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); - if (Lo == NoClass) // Pass HighPart at offset 8 in memory. + if (Lo == NoClass) // Pass HighPart at offset 8 in memory. return ABIArgInfo::getDirect(HighPart, 8); ++neededSSE; @@ -2940,7 +2934,7 @@ Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); llvm::Value *overflow_arg_area = - CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); + CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 // byte boundary if alignment needed by type exceeds 8 byte boundary. @@ -2948,15 +2942,14 @@ // alignment greater than 16 where necessary. CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); if (Align > CharUnits::fromQuantity(8)) { - overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, - Align); + overflow_arg_area = + emitRoundPointerUpToAlignment(CGF, overflow_arg_area, Align); } // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); - llvm::Value *Res = - CGF.Builder.CreateBitCast(overflow_arg_area, - llvm::PointerType::getUnqual(LTy)); + llvm::Value *Res = CGF.Builder.CreateBitCast( + overflow_arg_area, llvm::PointerType::getUnqual(LTy)); // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: // l->overflow_arg_area + sizeof(type). @@ -2965,7 +2958,7 @@ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; llvm::Value *Offset = - llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); + llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area, Offset, "overflow_arg_area.next"); CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); @@ -2987,7 +2980,7 @@ Ty = getContext().getCanonicalType(Ty); ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, - /*isNamedArg*/false); + /*isNamedArg*/ false); // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed // in the registers. If not go to step 7. @@ -3019,7 +3012,7 @@ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); llvm::Value *FitsInFP = - llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); + llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; } @@ -3112,12 +3105,11 @@ // to assume that the slots are 16-byte aligned, since the stack is // naturally 16-byte aligned and the prologue is expected to store // all the SSE registers to the RSA. - Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, - fp_offset), - CGF.Int8Ty, CharUnits::fromQuantity(16)); - Address RegAddrHi = - CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, - CharUnits::fromQuantity(16)); + Address RegAddrLo = + Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset), + CGF.Int8Ty, CharUnits::fromQuantity(16)); + Address RegAddrHi = CGF.Builder.CreateConstInBoundsByteGEP( + RegAddrLo, CharUnits::fromQuantity(16)); llvm::Type *ST = AI.canHaveCoerceToType() ? AI.getCoerceToType() : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); @@ -3157,8 +3149,8 @@ // Return the appropriate result. CGF.EmitBlock(ContBlock); - Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, - "vaarg.addr"); + Address ResAddr = + emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, "vaarg.addr"); return ResAddr; } @@ -3211,7 +3203,6 @@ if (RT->getDecl()->hasFlexibleArrayMember()) return getNaturalAlignIndirect(Ty, /*ByVal=*/false); - } const Type *Base = nullptr; diff --git a/clang/lib/CodeGen/Targets/XCore.cpp b/clang/lib/CodeGen/Targets/XCore.cpp --- a/clang/lib/CodeGen/Targets/XCore.cpp +++ b/clang/lib/CodeGen/Targets/XCore.cpp @@ -77,7 +77,7 @@ /// been exited too soon for the encoding to be correct for the member. /// class TypeStringCache { - enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; + enum Status { NonRecursive, Recursive, Incomplete, IncompleteUsed }; struct Entry { std::string Str; // The encoded TypeString for the type. enum Status State; // Information about the encoding in 'Str'. @@ -91,8 +91,7 @@ TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); bool removeIncomplete(const IdentifierInfo *ID); - void addIfComplete(const IdentifierInfo *ID, StringRef Str, - bool IsRecursive); + void addIfComplete(const IdentifierInfo *ID, StringRef Str, bool IsRecursive); StringRef lookupStr(const IdentifierInfo *ID); }; @@ -101,11 +100,13 @@ class FieldEncoding { bool HasName; std::string Enc; + public: FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} StringRef str() { return Enc; } bool operator<(const FieldEncoding &rhs) const { - if (HasName != rhs.HasName) return HasName; + if (HasName != rhs.HasName) + return HasName; return Enc < rhs.Enc; } }; @@ -196,7 +197,7 @@ if (!ID) return; Entry &E = Map[ID]; - assert( (E.Str.empty() || E.State == Recursive) && + assert((E.Str.empty() || E.State == Recursive) && "Incorrectly use of addIncomplete"); assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); E.Swapped.swap(E.Str); // swap out the Recursive @@ -215,8 +216,7 @@ auto I = Map.find(ID); assert(I != Map.end() && "Entry not present"); Entry &E = I->second; - assert( (E.State == Incomplete || - E.State == IncompleteUsed) && + assert((E.State == Incomplete || E.State == IncompleteUsed) && "Entry must be an incomplete type"); bool IsRecursive = false; if (E.State == IncompleteUsed) { @@ -244,7 +244,7 @@ return; // No key or it is an incomplete sub-type so don't add. Entry &E = Map[ID]; if (IsRecursive && !E.Str.empty()) { - assert(E.State==Recursive && E.Str.size() == Str.size() && + assert(E.State == Recursive && E.Str.size() == Str.size() && "This is not the same Recursive entry"); // The parent container was not recursive after all, so we could have used // this Recursive sub-member entry after all, but we assumed the worse when @@ -253,7 +253,7 @@ } assert(E.Str.empty() && "Entry already present"); E.Str = Str.str(); - E.State = IsRecursive? Recursive : NonRecursive; + E.State = IsRecursive ? Recursive : NonRecursive; } /// Return a cached TypeString encoding for the ID. If there isn't one, or we @@ -261,13 +261,13 @@ /// encoding is Recursive, return an empty StringRef. StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { if (!ID) - return StringRef(); // We have no key. + return StringRef(); // We have no key. auto I = Map.find(ID); if (I == Map.end()) - return StringRef(); // We have no encoding. + return StringRef(); // We have no encoding. Entry &E = I->second; if (E.State == Recursive && IncompleteCount) - return StringRef(); // We don't use Recursive encodings for member types. + return StringRef(); // We don't use Recursive encodings for member types. if (E.State == Incomplete) { // The incomplete type is being used to break out of recursion. @@ -303,7 +303,7 @@ llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Enc.str())}; llvm::NamedMDNode *MD = - CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); + CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); } } @@ -325,8 +325,7 @@ } static bool appendType(SmallStringEnc &Enc, QualType QType, - const CodeGen::CodeGenModule &CGM, - TypeStringCache &TSC); + const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); /// Helper function for appendRecordType(). /// Builds a SmallVector containing the encoded field types in declaration @@ -371,7 +370,7 @@ // Start to emit an incomplete TypeString. size_t Start = Enc.size(); - Enc += (RT->isUnionType()? 'u' : 's'); + Enc += (RT->isUnionType() ? 'u' : 's'); Enc += '('; if (ID) Enc += ID->getName(); @@ -386,10 +385,10 @@ // complete TypeString for this RecordType. SmallVector FE; std::string StubEnc(Enc.substr(Start).str()); - StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. + StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. TSC.addIncomplete(ID, std::move(StubEnc)); if (!extractFieldType(FE, RD, CGM, TSC)) { - (void) TSC.removeIncomplete(ID); + (void)TSC.removeIncomplete(ID); return false; } IsRecursive = TSC.removeIncomplete(ID); @@ -412,8 +411,7 @@ /// Appends enum types to Enc and adds the encoding to the cache. static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, - TypeStringCache &TSC, - const IdentifierInfo *ID) { + TypeStringCache &TSC, const IdentifierInfo *ID) { // Append the cached TypeString if we have one. StringRef TypeString = TSC.lookupStr(ID); if (!TypeString.empty()) { @@ -457,14 +455,15 @@ /// This is done prior to appending the type's encoding. static void appendQualifier(SmallStringEnc &Enc, QualType QT) { // Qualifiers are emitted in alphabetical order. - static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; + static const char *const Table[] = { + "", "c:", "r:", "cr:", "v:", "cv:", "rv:", "crv:"}; int Lookup = 0; if (QT.isConstQualified()) - Lookup += 1<<0; + Lookup += 1 << 0; if (QT.isRestrictQualified()) - Lookup += 1<<1; + Lookup += 1 << 1; if (QT.isVolatileQualified()) - Lookup += 1<<2; + Lookup += 1 << 2; Enc += Table[Lookup]; } @@ -472,56 +471,56 @@ static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { const char *EncType; switch (BT->getKind()) { - case BuiltinType::Void: - EncType = "0"; - break; - case BuiltinType::Bool: - EncType = "b"; - break; - case BuiltinType::Char_U: - EncType = "uc"; - break; - case BuiltinType::UChar: - EncType = "uc"; - break; - case BuiltinType::SChar: - EncType = "sc"; - break; - case BuiltinType::UShort: - EncType = "us"; - break; - case BuiltinType::Short: - EncType = "ss"; - break; - case BuiltinType::UInt: - EncType = "ui"; - break; - case BuiltinType::Int: - EncType = "si"; - break; - case BuiltinType::ULong: - EncType = "ul"; - break; - case BuiltinType::Long: - EncType = "sl"; - break; - case BuiltinType::ULongLong: - EncType = "ull"; - break; - case BuiltinType::LongLong: - EncType = "sll"; - break; - case BuiltinType::Float: - EncType = "ft"; - break; - case BuiltinType::Double: - EncType = "d"; - break; - case BuiltinType::LongDouble: - EncType = "ld"; - break; - default: - return false; + case BuiltinType::Void: + EncType = "0"; + break; + case BuiltinType::Bool: + EncType = "b"; + break; + case BuiltinType::Char_U: + EncType = "uc"; + break; + case BuiltinType::UChar: + EncType = "uc"; + break; + case BuiltinType::SChar: + EncType = "sc"; + break; + case BuiltinType::UShort: + EncType = "us"; + break; + case BuiltinType::Short: + EncType = "ss"; + break; + case BuiltinType::UInt: + EncType = "ui"; + break; + case BuiltinType::Int: + EncType = "si"; + break; + case BuiltinType::ULong: + EncType = "ul"; + break; + case BuiltinType::Long: + EncType = "sl"; + break; + case BuiltinType::ULongLong: + EncType = "ull"; + break; + case BuiltinType::LongLong: + EncType = "sll"; + break; + case BuiltinType::Float: + EncType = "ft"; + break; + case BuiltinType::Double: + EncType = "d"; + break; + case BuiltinType::LongDouble: + EncType = "ld"; + break; + default: + return false; } Enc += EncType; return true; @@ -562,8 +561,8 @@ /// Appends a function encoding to Enc, calling appendType for the return type /// and the arguments. static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, - const CodeGen::CodeGenModule &CGM, - TypeStringCache &TSC) { + const CodeGen::CodeGenModule &CGM, + TypeStringCache &TSC) { Enc += "f{"; if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) return false;