Index: llvm/include/llvm/Target/TargetCallingConv.td =================================================================== --- llvm/include/llvm/Target/TargetCallingConv.td +++ llvm/include/llvm/Target/TargetCallingConv.td @@ -160,6 +160,11 @@ /// that the target supports. class CallingConv actions> { list Actions = actions; + + /// If true, this calling convention will be emitted as externally visible in + /// the llvm namespaces instead of as a static function. + bit Entry = 0; + bit Custom = 0; } Index: llvm/lib/Target/X86/X86CallLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86CallLowering.cpp +++ llvm/lib/Target/X86/X86CallLowering.cpp @@ -48,8 +48,6 @@ using namespace llvm; -#include "X86GenCallingConv.inc" - X86CallLowering::X86CallLowering(const X86TargetLowering &TLI) : CallLowering(&TLI) {} Index: llvm/lib/Target/X86/X86CallingConv.h =================================================================== --- llvm/lib/Target/X86/X86CallingConv.h +++ llvm/lib/Target/X86/X86CallingConv.h @@ -21,45 +21,12 @@ namespace llvm { -/// When regcall calling convention compiled to 32 bit arch, special treatment -/// is required for 64 bit masks. -/// The value should be assigned to two GPRs. -/// \return true if registers were allocated and false otherwise. -bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State); +bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, + CCState &State); -/// Vectorcall calling convention has special handling for vector types or -/// HVA for 64 bit arch. -/// For HVAs shadow registers might be allocated on the first pass -/// and actual XMM registers are allocated on the second pass. -/// For vector types, actual XMM registers are allocated on the first pass. -/// \return true if registers were allocated and false otherwise. -bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State); - -/// Vectorcall calling convention has special handling for vector types or -/// HVA for 32 bit arch. -/// For HVAs actual XMM registers are allocated on the second pass. -/// For vector types, actual XMM registers are allocated on the first pass. -/// \return true if registers were allocated and false otherwise. -bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State); - -inline bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &, - CCValAssign::LocInfo &, ISD::ArgFlagsTy &, - CCState &) { - llvm_unreachable("The AnyReg calling convention is only supported by the " \ - "stackmap and patchpoint intrinsics."); - // gracefully fallback to X86 C calling convention on Release builds. - return false; -} - -bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State); +bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State); } // End llvm namespace Index: llvm/lib/Target/X86/X86CallingConv.cpp =================================================================== --- llvm/lib/Target/X86/X86CallingConv.cpp +++ llvm/lib/Target/X86/X86CallingConv.cpp @@ -12,16 +12,23 @@ // //===----------------------------------------------------------------------===// -#include "MCTargetDesc/X86MCTargetDesc.h" +#include "X86CallingConv.h" #include "X86Subtarget.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/IR/CallingConv.h" -namespace llvm { - -bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State) { +using namespace llvm; + +/// When regcall calling convention compiled to 32 bit arch, special treatment +/// is required for 64 bit masks. +/// The value should be assigned to two GPRs. +/// \return true if registers were allocated and false otherwise. +static bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State) { // List of GPR registers that are available to store values in regcall // calling convention. static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI, @@ -113,9 +120,15 @@ return false; } -bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State) { +/// Vectorcall calling convention has special handling for vector types or +/// HVA for 64 bit arch. +/// For HVAs shadow registers might be allocated on the first pass +/// and actual XMM registers are allocated on the second pass. +/// For vector types, actual XMM registers are allocated on the first pass. +/// \return true if registers were allocated and false otherwise. +static bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { // On the second pass, go through the HVAs only. if (ArgFlags.isSecArgPass()) { if (ArgFlags.isHva()) @@ -165,9 +178,14 @@ return ArgFlags.isHva(); } -bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State) { +/// Vectorcall calling convention has special handling for vector types or +/// HVA for 32 bit arch. +/// For HVAs actual XMM registers are allocated on the second pass. +/// For vector types, actual XMM registers are allocated on the first pass. +/// \return true if registers were allocated and false otherwise. +static bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { // On the second pass, go through the HVAs only. if (ArgFlags.isSecArgPass()) { if (ArgFlags.isHva()) @@ -205,9 +223,18 @@ return false; // No register was assigned - Continue the search. } -bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State) { +static bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &, + CCValAssign::LocInfo &, ISD::ArgFlagsTy &, + CCState &) { + llvm_unreachable("The AnyReg calling convention is only supported by the " + "stackmap and patchpoint intrinsics."); + // gracefully fallback to X86 C calling convention on Release builds. + return false; +} + +static bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { // This is similar to CCAssignToReg<[EAX, EDX, ECX]>, but makes sure // not to split i64 and double between a register and stack static const MCPhysReg RegList[] = {X86::EAX, X86::EDX, X86::ECX}; @@ -261,4 +288,5 @@ return true; } -} // End llvm namespace +// Provides entry points of CC_X86 and RetCC_X86. +#include "X86GenCallingConv.inc" Index: llvm/lib/Target/X86/X86CallingConv.td =================================================================== --- llvm/lib/Target/X86/X86CallingConv.td +++ llvm/lib/Target/X86/X86CallingConv.td @@ -477,6 +477,7 @@ ]>; // This is the return-value convention used for the entire X86 backend. +let Entry = 1 in def RetCC_X86 : CallingConv<[ // Check if this is the Intel OpenCL built-ins calling convention @@ -1039,6 +1040,7 @@ ]>; // This is the argument convention used for the entire X86 backend. +let Entry = 1 in def CC_X86 : CallingConv<[ CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo>, CCIfSubtarget<"is64Bit()", CCDelegateTo>, Index: llvm/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/lib/Target/X86/X86FastISel.cpp +++ llvm/lib/Target/X86/X86FastISel.cpp @@ -312,8 +312,6 @@ return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT); } -#include "X86GenCallingConv.inc" - /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. /// Return true and the result register by reference if it is possible. Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2347,8 +2347,6 @@ // Return Value Calling Convention Implementation //===----------------------------------------------------------------------===// -#include "X86GenCallingConv.inc" - bool X86TargetLowering::CanLowerReturn( CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl &Outs, LLVMContext &Context) const { Index: llvm/utils/TableGen/CallingConvEmitter.cpp =================================================================== --- llvm/utils/TableGen/CallingConvEmitter.cpp +++ llvm/utils/TableGen/CallingConvEmitter.cpp @@ -41,7 +41,11 @@ // each other. for (Record *CC : CCs) { if (!CC->getValueAsBit("Custom")) { - O << "static bool " << CC->getName() + if (CC->getValueAsBit("Entry")) + O << "bool llvm::"; + else + O << "static bool "; + O << CC->getName() << "(unsigned ValNo, MVT ValVT,\n" << std::string(CC->getName().size() + 13, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n" @@ -62,7 +66,12 @@ ListInit *CCActions = CC->getValueAsListInit("Actions"); Counter = 0; - O << "\n\nstatic bool " << CC->getName() + O << "\n\n"; + if (CC->getValueAsBit("Entry")) + O << "bool llvm::"; + else + O << "static bool "; + O << CC->getName() << "(unsigned ValNo, MVT ValVT,\n" << std::string(CC->getName().size()+13, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"