diff --git a/llvm/lib/Target/VE/VECallingConv.td b/llvm/lib/Target/VE/VECallingConv.td --- a/llvm/lib/Target/VE/VECallingConv.td +++ b/llvm/lib/Target/VE/VECallingConv.td @@ -21,7 +21,11 @@ CCAssignToStack<0, 8> ]>; -def CC_VE : CallingConv<[ +///// C Calling Convention (VE ABI v2.1) ///// +// +// Reference: https://www.nec.com/en/global/prod/hpc/aurora/document/VE-ABI_v2.1.pdf +// +def CC_VE_C : CallingConv<[ // All arguments get passed in generic registers if there is space. // Promote i1/i8/i16/i32 arguments to i64. @@ -51,6 +55,7 @@ CCDelegateTo ]>; +///// Standard vararg C Calling Convention (VE ABI v2.1) ///// // All arguments get passed in stack for varargs function or non-prototyped // function. def CC_VE2 : CallingConv<[ @@ -70,7 +75,7 @@ CCAssignToStack<0, 8> ]>; -def RetCC_VE : CallingConv<[ +def RetCC_VE_C : CallingConv<[ // Promote i1/i8/i16/i32 return values to i64. CCIfType<[i1, i8, i16, i32], CCPromoteToType>, diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -40,10 +40,26 @@ #include "VEGenCallingConv.inc" +CCAssignFn *getReturnCC(CallingConv::ID CallConv) { + switch (CallConv) { + default: + return RetCC_VE_C; + } +} + +CCAssignFn *getParamCC(CallingConv::ID CallConv, bool IsVarArg) { + if (IsVarArg) + return CC_VE2; + switch (CallConv) { + default: + return CC_VE_C; + } +} + bool VETargetLowering::CanLowerReturn( CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl &Outs, LLVMContext &Context) const { - CCAssignFn *RetCC = RetCC_VE; + CCAssignFn *RetCC = getReturnCC(CallConv); SmallVector RVLocs; CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC); @@ -276,7 +292,7 @@ *DAG.getContext()); // Analyze return values. - CCInfo.AnalyzeReturn(Outs, RetCC_VE); + CCInfo.AnalyzeReturn(Outs, getReturnCC(CallConv)); SDValue Flag; SmallVector RetOps(1, Chain); @@ -357,7 +373,7 @@ CCInfo.AllocateStack(ArgsPreserved, Align(8)); // We already allocated the preserved area, so the stack offset computed // by CC_VE would be correct now. - CCInfo.AnalyzeFormalArguments(Ins, CC_VE); + CCInfo.AnalyzeFormalArguments(Ins, getParamCC(CallConv, false)); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; @@ -505,7 +521,7 @@ CCInfo.AllocateStack(ArgsPreserved, Align(8)); // We already allocated the preserved area, so the stack offset computed // by CC_VE would be correct now. - CCInfo.AnalyzeCallOperands(CLI.Outs, CC_VE); + CCInfo.AnalyzeCallOperands(CLI.Outs, getParamCC(CLI.CallConv, false)); // VE requires to use both register and stack for varargs or no-prototyped // functions. @@ -516,7 +532,8 @@ CCState CCInfo2(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs2, *DAG.getContext()); if (UseBoth) - CCInfo2.AnalyzeCallOperands(CLI.Outs, CC_VE2); + CCInfo2.AnalyzeCallOperands(CLI.Outs, + getParamCC(CLI.CallConv, CLI.IsVarArg)); // Get the size of the outgoing arguments stack space requirement. unsigned ArgsSize = CCInfo.getNextStackOffset(); @@ -701,7 +718,7 @@ if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB) CLI.Ins[0].Flags.setInReg(); - RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_VE); + RVInfo.AnalyzeCallResult(CLI.Ins, getReturnCC(CLI.CallConv)); // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) {