Index: docs/WritingAnLLVMBackend.rst =================================================================== --- docs/WritingAnLLVMBackend.rst +++ docs/WritingAnLLVMBackend.rst @@ -1572,9 +1572,12 @@ * ``CCIfNest `` --- If the argument is marked with the "``nest``" attribute, then apply the action. -* ``CCIfNotVarArg `` --- If the current function does not take a +* ``CCIfFuncNotVarArg `` --- If the current function does not take a variable number of arguments, apply the action. +* ``CCIfArgIsVarArg `` --- If the argument is a variable argument, apply + the action. + * ``CCAssignToRegWithShadow `` --- similar to ``CCAssignToReg``, but with a shadow list of registers. Index: include/llvm/CodeGen/TargetCallingConv.h =================================================================== --- include/llvm/CodeGen/TargetCallingConv.h +++ include/llvm/CodeGen/TargetCallingConv.h @@ -46,6 +46,7 @@ unsigned IsInConsecutiveRegs : 1; unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate unsigned IsPointer : 1; + unsigned IsVarArg : 1; unsigned ByValSize; ///< Byval struct size @@ -58,7 +59,7 @@ IsSwiftSelf(0), IsSwiftError(0), IsHva(0), IsHvaStart(0), IsSecArgPass(0), ByValAlign(0), OrigAlign(0), IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0), - IsCopyElisionCandidate(0), IsPointer(0), ByValSize(0), + IsCopyElisionCandidate(0), IsPointer(0), IsVarArg(0), ByValSize(0), PointerAddrSpace(0) { static_assert(sizeof(*this) == 3 * sizeof(unsigned), "flags are too big"); } @@ -120,6 +121,9 @@ bool isPointer() const { return IsPointer; } void setPointer() { IsPointer = 1; } + bool isVarArg() const { return IsVarArg; } + void setVarArg() { IsVarArg = 1; } + unsigned getByValAlign() const { return (1U << ByValAlign) / 2; } void setByValAlign(unsigned A) { ByValAlign = Log2_32(A) + 1; @@ -186,9 +190,6 @@ MVT VT; EVT ArgVT; - /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...". - bool IsFixed = false; - /// Index original Function's argument. unsigned OrigArgIndex; @@ -198,9 +199,9 @@ unsigned PartOffset; OutputArg() = default; - OutputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool isfixed, + OutputArg(ArgFlagsTy flags, EVT vt, EVT argvt, unsigned origIdx, unsigned partOffs) - : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx), + : Flags(flags), OrigArgIndex(origIdx), PartOffset(partOffs) { VT = vt.getSimpleVT(); ArgVT = argvt; Index: include/llvm/Target/TargetCallingConv.td =================================================================== --- include/llvm/Target/TargetCallingConv.td +++ include/llvm/Target/TargetCallingConv.td @@ -76,11 +76,17 @@ /// the specified action. class CCIfSRet : CCIf<"ArgFlags.isSRet()", A> {} -/// CCIfVarArg - If the current function is vararg - apply the action -class CCIfVarArg : CCIf<"State.isVarArg()", A> {} +/// CCIfFuncIsVarArg - If the current function is vararg - apply the action +class CCIfFuncIsVarArg : CCIf<"State.isVarArg()", A> {} -/// CCIfNotVarArg - If the current function is not vararg - apply the action -class CCIfNotVarArg : CCIf<"!State.isVarArg()", A> {} +/// CCIfFuncNotVarArg - If the current function is not vararg - apply the action +class CCIfFuncNotVarArg : CCIf<"!State.isVarArg()", A> {} + +/// CCIfArgIsVarArg - If the current argument is vararg - apply the action +class CCIfArgIsVarArg : CCIf<"ArgFlags.isVarArg()", A> {} + +/// CCIfArgNotVarArg - If the current argument is not vararg - apply the action +class CCIfArgNotVarArg : CCIf<"!ArgFlags.isVarArg()", A> {} /// CCIfPtrAddrSpace - If the top-level parent of the current argument has /// pointer type in the specified address-space. Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1882,7 +1882,7 @@ for (unsigned i = 0; i < NumParts; ++i) { Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(), - VT, /*isfixed=*/true, 0, 0)); + VT, 0, 0)); OutVals.push_back(Parts[i]); } } @@ -1900,7 +1900,7 @@ Flags.setSwiftError(); Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/, EVT(TLI.getPointerTy(DL)) /*argvt*/, - true /*isfixed*/, 1 /*origidx*/, + 1 /*origidx*/, 0 /*partOffs*/)); // Create SDNode for the swifterror virtual register. OutVals.push_back( @@ -9106,8 +9106,9 @@ for (unsigned j = 0; j != NumParts; ++j) { // if it isn't first piece, alignment must be 1 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT, - i < CLI.NumFixedArgs, i, j*Parts[j].getValueType().getStoreSize()); + if (i >= CLI.NumFixedArgs) + MyFlags.Flags.setVarArg(); if (NumParts > 1 && j == 0) MyFlags.Flags.setSplit(); else if (j != 0) { Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -1415,7 +1415,7 @@ Flags.setZExt(); for (unsigned i = 0; i < NumParts; ++i) - Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0)); + Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, 0, 0)); } } Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3614,7 +3614,7 @@ MVT ArgVT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, - /*IsVarArg=*/ !Outs[i].IsFixed); + /*IsVarArg=*/ ArgFlags.isVarArg()); bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); assert(!Res && "Call operand has unhandled type"); (void)Res; Index: lib/Target/Hexagon/HexagonCallingConv.td =================================================================== --- lib/Target/Hexagon/HexagonCallingConv.td +++ lib/Target/Hexagon/HexagonCallingConv.td @@ -6,11 +6,6 @@ // //===----------------------------------------------------------------------===// -class CCIfArgIsVarArg - : CCIf<"State.isVarArg() && " - "ValNo >= static_cast(State)" - ".getNumNamedVarArgParams()", A>; - def CC_HexagonStack: CallingConv<[ CCIfType<[i32,v2i16,v4i8], CCAssignToStack<4,4>>, Index: lib/Target/Hexagon/HexagonISelLowering.cpp =================================================================== --- lib/Target/Hexagon/HexagonISelLowering.cpp +++ lib/Target/Hexagon/HexagonISelLowering.cpp @@ -109,23 +109,6 @@ cl::desc("Rewrite unaligned loads as a pair of aligned loads")); -namespace { - - class HexagonCCState : public CCState { - unsigned NumNamedVarArgParams = 0; - - public: - HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF, - SmallVectorImpl &locs, LLVMContext &C, - unsigned NumNamedArgs) - : CCState(CC, IsVarArg, MF, locs, C), - NumNamedVarArgParams(NumNamedArgs) {} - unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; } - }; - -} // end anonymous namespace - - // Implement calling convention for Hexagon. static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, @@ -329,16 +312,12 @@ MachineFrameInfo &MFI = MF.getFrameInfo(); auto PtrVT = getPointerTy(MF.getDataLayout()); - unsigned NumParams = CLI.CS.getInstruction() - ? CLI.CS.getFunctionType()->getNumParams() - : 0; if (GlobalAddressSDNode *GAN = dyn_cast(Callee)) Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32); // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - HexagonCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext(), - NumParams); + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); if (Subtarget.useHVXOps()) CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX); @@ -698,8 +677,7 @@ // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - HexagonCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext(), - MF.getFunction().getFunctionType()->getNumParams()); + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); if (Subtarget.useHVXOps()) CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX); Index: lib/Target/Lanai/LanaiCallingConv.td =================================================================== --- lib/Target/Lanai/LanaiCallingConv.td +++ lib/Target/Lanai/LanaiCallingConv.td @@ -20,8 +20,8 @@ CCIfType<[i8, i16], CCPromoteToType>, // Put argument in registers if marked 'inreg' and not a vararg call. - CCIfNotVarArg>>>, + CCIfFuncNotVarArg>>>, // Otherwise they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> @@ -33,7 +33,7 @@ CCIfType<[ i8, i16 ], CCPromoteToType>, // Put arguments in registers. - CCIfNotVarArg>>, + CCIfFuncNotVarArg>>, // Otherwise they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> Index: lib/Target/Mips/MipsCCState.h =================================================================== --- lib/Target/Mips/MipsCCState.h +++ lib/Target/Mips/MipsCCState.h @@ -71,10 +71,6 @@ /// vector. SmallVector OriginalRetWasFloatVector; - /// Records whether the value was a fixed argument. - /// See ISD::OutputArg::IsFixed, - SmallVector CallOperandIsFixed; - // Used to handle MIPS16-specific calling convention tweaks. // FIXME: This should probably be a fully fledged calling convention. SpecialCallingConvType SpecialCallingConv; @@ -95,11 +91,10 @@ OriginalArgWasF128.clear(); OriginalArgWasFloat.clear(); OriginalArgWasFloatVector.clear(); - CallOperandIsFixed.clear(); } - // The AnalyzeCallOperands in the base class is not usable since we must - // provide a means of accessing ArgListEntry::IsFixed. Delete them from this + // The AnalyzeCallOperands functions in the base class are not usable since + // they don't maintain the OriginalArgWas* vectors. Delete them from this // class. This doesn't stop them being used via the base class though. void AnalyzeCallOperands(const SmallVectorImpl &Outs, CCAssignFn Fn) = delete; @@ -158,7 +153,6 @@ bool WasOriginalRetVectorFloat(unsigned ValNo) const { return OriginalRetWasFloatVector[ValNo]; } - bool IsCallOperandFixed(unsigned ValNo) { return CallOperandIsFixed[ValNo]; } SpecialCallingConvType getSpecialCallingConv() { return SpecialCallingConv; } }; } Index: lib/Target/Mips/MipsCCState.cpp =================================================================== --- lib/Target/Mips/MipsCCState.cpp +++ lib/Target/Mips/MipsCCState.cpp @@ -138,7 +138,6 @@ OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.Ty, Func)); OriginalArgWasFloat.push_back(FuncArg.Ty->isFloatingPointTy()); OriginalArgWasFloatVector.push_back(FuncArg.Ty->isVectorTy()); - CallOperandIsFixed.push_back(Outs[i].IsFixed); } } Index: lib/Target/Mips/MipsCallLowering.h =================================================================== --- lib/Target/Mips/MipsCallLowering.h +++ lib/Target/Mips/MipsCallLowering.h @@ -76,10 +76,11 @@ /// Based on registers available on target machine split or extend /// type if needed, also change pointer type to appropriate integer /// type. - template - void subTargetRegTypeForCallingConv(const Function &F, ArrayRef Args, - ArrayRef OrigArgIndices, - SmallVectorImpl &ISDArgs) const; + void subTargetRegTypeForCallingConv( + const Function &F, ArrayRef Args, + ArrayRef OrigArgIndices, + function_ref emplaceBack) + const; /// Split structures and arrays, save original argument indices since /// Mips calling convention needs info about original argument type. Index: lib/Target/Mips/MipsCallLowering.cpp =================================================================== --- lib/Target/Mips/MipsCallLowering.cpp +++ lib/Target/Mips/MipsCallLowering.cpp @@ -424,7 +424,11 @@ } SmallVector Outs; - subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs); + subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, + [&](ISD::ArgFlagsTy Flags, MVT RegisterVT, + EVT VT, unsigned OrigIdx) { + Outs.emplace_back(Flags, RegisterVT, VT, OrigIdx, 0); + }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, @@ -473,7 +477,11 @@ } SmallVector Ins; - subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins); + subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, + [&](ISD::ArgFlagsTy Flags, MVT RegisterVT, + EVT VT, unsigned OrigIdx) { + Ins.emplace_back(Flags, RegisterVT, VT, true, OrigIdx, 0); + }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, @@ -550,7 +558,11 @@ } SmallVector Outs; - subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs); + subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, + [&](ISD::ArgFlagsTy Flags, MVT RegisterVT, + EVT VT, unsigned OrigIdx) { + Outs.emplace_back(Flags, RegisterVT, VT, OrigIdx, 0); + }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, @@ -582,7 +594,11 @@ splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices); SmallVector Ins; - subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); + subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, + [&](ISD::ArgFlagsTy Flags, MVT RegisterVT, + EVT VT, unsigned OrigIdx) { + Ins.emplace_back(Flags, RegisterVT, VT, true, OrigIdx, 0); + }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, @@ -601,10 +617,10 @@ return true; } -template void MipsCallLowering::subTargetRegTypeForCallingConv( const Function &F, ArrayRef Args, - ArrayRef OrigArgIndices, SmallVectorImpl &ISDArgs) const { + ArrayRef OrigArgIndices, + function_ref AddISDArg) const { const DataLayout &DL = F.getParent()->getDataLayout(); const MipsTargetLowering &TLI = *getTLI(); @@ -625,8 +641,7 @@ else Flags.setOrigAlign(1); - ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], - 0); + AddISDArg(Flags, RegisterVT, VT, OrigArgIndices[ArgNo]); } ++ArgNo; } Index: lib/Target/Mips/MipsCallingConv.td =================================================================== --- lib/Target/Mips/MipsCallingConv.td +++ lib/Target/Mips/MipsCallingConv.td @@ -30,12 +30,6 @@ class CCIfOrigArgWasF128 : CCIf<"static_cast(&State)->WasOriginalArgF128(ValNo)", A>; -/// Match if this specific argument is a vararg. -/// This is slightly different fro CCIfIsVarArg which matches if any argument is -/// a vararg. -class CCIfArgIsVarArg - : CCIf<"!static_cast(&State)->IsCallOperandFixed(ValNo)", A>; - /// Match if the return was a floating point vector. class CCIfOrigArgWasNotVectorFloat : CCIf<"!static_cast(&State)" @@ -350,7 +344,7 @@ ]>; def CC_Mips : CallingConv<[ - CCIfVarArg>>, + CCIfFuncIsVarArg>>, CCDelegateTo ]>; Index: lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- lib/Target/PowerPC/PPCISelLowering.cpp +++ lib/Target/PowerPC/PPCISelLowering.cpp @@ -5331,12 +5331,12 @@ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; bool Result; - if (Outs[i].IsFixed) { - Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, - CCInfo); - } else { + if (ArgFlags.isVarArg()) { Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); + } else { + Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, + CCInfo); } if (Result) { Index: lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVISelLowering.cpp +++ lib/Target/RISCV/RISCVISelLowering.cpp @@ -1363,7 +1363,7 @@ RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, - ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { + ArgFlags, CCInfo, ArgFlags.isFixed(), IsRet, OrigTy)) { LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " << EVT(ArgVT).getEVTString() << "\n"); llvm_unreachable(nullptr); Index: lib/Target/Sparc/SparcISelLowering.cpp =================================================================== --- lib/Target/Sparc/SparcISelLowering.cpp +++ lib/Target/Sparc/SparcISelLowering.cpp @@ -1053,7 +1053,7 @@ if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128)) continue; // The fixed arguments to a varargs function still go in FP registers. - if (Outs[VA.getValNo()].IsFixed) + if (!Outs[VA.getValNo()].Flags.isVarArg()) continue; // This floating point argument should be reassigned. Index: lib/Target/SystemZ/SystemZCallingConv.h =================================================================== --- lib/Target/SystemZ/SystemZCallingConv.h +++ lib/Target/SystemZ/SystemZCallingConv.h @@ -24,10 +24,6 @@ class SystemZCCState : public CCState { private: - /// Records whether the value was a fixed argument. - /// See ISD::OutputArg::IsFixed. - SmallVector ArgIsFixed; - /// Records whether the value was widened from a short vector type. SmallVector ArgIsShortVector; @@ -43,10 +39,6 @@ void AnalyzeFormalArguments(const SmallVectorImpl &Ins, CCAssignFn Fn) { - // Formal arguments are always fixed. - ArgIsFixed.clear(); - for (unsigned i = 0; i < Ins.size(); ++i) - ArgIsFixed.push_back(true); // Record whether the call operand was a short vector. ArgIsShortVector.clear(); for (unsigned i = 0; i < Ins.size(); ++i) @@ -57,10 +49,6 @@ void AnalyzeCallOperands(const SmallVectorImpl &Outs, CCAssignFn Fn) { - // Record whether the call operand was a fixed argument. - ArgIsFixed.clear(); - for (unsigned i = 0; i < Outs.size(); ++i) - ArgIsFixed.push_back(Outs[i].IsFixed); // Record whether the call operand was a short vector. ArgIsShortVector.clear(); for (unsigned i = 0; i < Outs.size(); ++i) @@ -69,13 +57,13 @@ CCState::AnalyzeCallOperands(Outs, Fn); } - // This version of AnalyzeCallOperands in the base class is not usable - // since we must provide a means of accessing ISD::OutputArg::IsFixed. + // The AnalyzeCallOperands functions in the base class are not usable since + // they do not provide a means of accessing ISD::OutputArg::ArgVT, which we + // need to determine whether the argument is a short vector. void AnalyzeCallOperands(const SmallVectorImpl &Outs, SmallVectorImpl &Flags, CCAssignFn Fn) = delete; - bool IsFixed(unsigned ValNo) { return ArgIsFixed[ValNo]; } bool IsShortVector(unsigned ValNo) { return ArgIsShortVector[ValNo]; } }; Index: lib/Target/SystemZ/SystemZCallingConv.td =================================================================== --- lib/Target/SystemZ/SystemZCallingConv.td +++ lib/Target/SystemZ/SystemZCallingConv.td @@ -16,10 +16,6 @@ "(State.getMachineFunction().getSubtarget()).", F), A>; -// Match if this specific argument is a fixed (i.e. named) argument. -class CCIfFixed - : CCIf<"static_cast(&State)->IsFixed(ValNo)", A>; - // Match if this specific argument was widened from a short vector type. class CCIfShortVector : CCIf<"static_cast(&State)->IsShortVector(ValNo)", A>; @@ -93,8 +89,8 @@ // during type legalization. CCIfSubtarget<"hasVector()", CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], - CCIfFixed>>>, + CCIfArgNotVarArg>>>, // However, sub-128 vectors which need to go on the stack occupy just a // single 8-byte-aligned 8-byte stack slot. Pass as i64. Index: lib/Target/WebAssembly/WebAssemblyISelLowering.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -678,7 +678,7 @@ OutVal = FINode; } // Count the number of fixed args *after* legalization. - NumFixedArgs += Out.IsFixed; + NumFixedArgs += !Out.Flags.isVarArg(); } bool IsVarArg = CLI.IsVarArg; @@ -816,7 +816,7 @@ for (const ISD::OutputArg &Out : Outs) { assert(!Out.Flags.isByVal() && "byval is not valid for return values"); assert(!Out.Flags.isNest() && "nest is not valid for return values"); - assert(Out.IsFixed && "non-fixed return value is not valid"); + assert(!Out.Flags.isVarArg() && "non-fixed return value is not valid"); if (Out.Flags.isInAlloca()) fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); if (Out.Flags.isInConsecutiveRegs()) Index: lib/Target/X86/X86CallingConv.td =================================================================== --- lib/Target/X86/X86CallingConv.td +++ lib/Target/X86/X86CallingConv.td @@ -541,15 +541,15 @@ // FIXME: This isn't precisely correct; the x86-64 ABI document says that // fixed arguments to vararg functions are supposed to be passed in // registers. Actually modeling that would be a lot of work, though. - CCIfNotVarArg>>>, + CCIfFuncNotVarArg>>>, // The first 8 512-bit vector arguments are passed in ZMM registers. - CCIfNotVarArg>>>, + CCIfFuncNotVarArg>>>, // Integer/FP values get stored in stack slots that are 8 bytes in size and // 8-byte aligned if there are no more registers to hold them. @@ -738,17 +738,17 @@ // vector registers def CC_X86_32_Vector_Standard : CallingConv<[ // SSE vector arguments are passed in XMM registers. - CCIfNotVarArg>>, + CCIfFuncNotVarArg>>, // AVX 256-bit vector arguments are passed in YMM registers. - CCIfNotVarArg>>>, + CCIfFuncNotVarArg>>>, // AVX 512-bit vector arguments are passed in ZMM registers. - CCIfNotVarArg>>, + CCIfFuncNotVarArg>>, CCDelegateTo ]>; @@ -757,17 +757,17 @@ // vector registers. def CC_X86_32_Vector_Darwin : CallingConv<[ // SSE vector arguments are passed in XMM registers. - CCIfNotVarArg>>, + CCIfFuncNotVarArg>>, // AVX 256-bit vector arguments are passed in YMM registers. - CCIfNotVarArg>>>, + CCIfFuncNotVarArg>>>, // AVX 512-bit vector arguments are passed in ZMM registers. - CCIfNotVarArg>>, + CCIfFuncNotVarArg>>, CCDelegateTo ]>; @@ -780,14 +780,14 @@ // The first 3 float or double arguments, if marked 'inreg' and if the call // is not a vararg call and if SSE2 is available, are passed in SSE registers. - CCIfNotVarArg>>>>, + CCIfFuncNotVarArg>>>>, // The first 3 __m64 vector arguments are passed in mmx registers if the // call is not a vararg call. - CCIfNotVarArg>>, + CCIfFuncNotVarArg>>, // Integer/Float values get stored in stack slots that are 4 bytes in // size and 4-byte aligned. @@ -829,7 +829,7 @@ // The first 3 integer arguments, if marked 'inreg' and if the call is not // a vararg call, are passed in integer registers. - CCIfNotVarArg>>>, + CCIfFuncNotVarArg>>>, // Otherwise, same as everything else. CCDelegateTo @@ -846,7 +846,7 @@ // If the call is not a vararg call, some arguments may be passed // in integer registers. - CCIfNotVarArg>>, + CCIfFuncNotVarArg>>, // Otherwise, same as everything else. CCDelegateTo @@ -923,9 +923,9 @@ // The first 3 float or double arguments, if the call is not a vararg // call and if SSE2 is available, are passed in SSE registers. - CCIfNotVarArg>>>, + CCIfFuncNotVarArg>>>, // Doubles get 8-byte slots that are 8-byte aligned. CCIfType<[f64], CCAssignToStack<8, 8>>,