Index: include/llvm/CodeGen/TargetLowering.h =================================================================== --- include/llvm/CodeGen/TargetLowering.h +++ include/llvm/CodeGen/TargetLowering.h @@ -705,7 +705,9 @@ /// always broken down into scalars in some contexts. This occurs even if the /// vector type is legal. virtual unsigned getVectorTypeBreakdownForCallingConv( - LLVMContext &Context, EVT VT, EVT &IntermediateVT, + LLVMContext &Context, + CallingConv::ID CC, + EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const { return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates, RegisterVT); @@ -1161,6 +1163,7 @@ /// are legal for some operations and not for other operations. /// For MIPS all vector types must be passed through the integer register set. virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { return getRegisterType(Context, VT); } @@ -1169,6 +1172,7 @@ /// this occurs when a vector type is used, as vector are passed through the /// integer register set. virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { return getNumRegisters(Context, VT); } @@ -3672,7 +3676,7 @@ /// Given an LLVM IR type and return type attributes, compute the return value /// EVTs and flags, and optionally also the offsets, if the return value is /// being lowered to memory. -void GetReturnInfo(Type *ReturnType, AttributeList attr, +void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl &Outs, const TargetLowering &TLI, const DataLayout &DL); Index: lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- lib/CodeGen/SelectionDAG/FastISel.cpp +++ lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1130,7 +1130,7 @@ ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys); SmallVector Outs; - GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL); + GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL); bool CanLowerReturn = TLI.CanLowerReturn( CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); Index: lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp =================================================================== --- lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -89,9 +89,11 @@ // Check whether the function can return without sret-demotion. SmallVector Outs; - GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI, + CallingConv::ID CC = Fn->getCallingConv(); + + GetReturnInfo(CC, Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI, mf.getDataLayout()); - CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF, + CanLowerReturn = TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext()); // If this personality uses funclets, we need to do a bit more work. Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -1015,14 +1015,18 @@ /// Records if this value needs to be treated in an ABI dependant manner, /// different to normal type legalization. - bool IsABIMangled = false; + Optional CallConv; RegsForValue() = default; RegsForValue(const SmallVector ®s, MVT regvt, EVT valuevt, - bool IsABIMangledValue = false); + Optional CC = None); RegsForValue(LLVMContext &Context, const TargetLowering &TLI, const DataLayout &DL, unsigned Reg, Type *Ty, - bool IsABIMangledValue = false); + Optional CC); + + bool isABIMangled() const { + return CallConv.hasValue(); + } /// Add the specified values to this one. void append(const RegsForValue &RHS) { Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -157,31 +157,35 @@ // store [4096 x i8] %data, [4096 x i8]* %buffer static const unsigned MaxParallelChains = 64; -// True if the Value passed requires ABI mangling as it is a parameter to a -// function or a return value from a function which is not an intrinsic. -static bool isABIRegCopy(const Value *V) { - const bool IsRetInst = V && isa(V); - const bool IsCallInst = V && isa(V); - const bool IsInLineAsm = - IsCallInst && static_cast(V)->isInlineAsm(); - const bool IsIndirectFunctionCall = - IsCallInst && !IsInLineAsm && - !static_cast(V)->getCalledFunction(); - // It is possible that the call instruction is an inline asm statement or an - // indirect function call in which case the return value of - // getCalledFunction() would be nullptr. - const bool IsInstrinsicCall = - IsCallInst && !IsInLineAsm && !IsIndirectFunctionCall && - static_cast(V)->getCalledFunction()->getIntrinsicID() != - Intrinsic::not_intrinsic; - - return IsRetInst || (IsCallInst && (!IsInLineAsm && !IsInstrinsicCall)); +// Return the calling convention if the Value passed requires ABI mangling as it +// is a parameter to a function or a return value from a function which is not +// an intrinsic. +static Optional getABIRegCopyCC(const Value *V) { + if (auto *R = dyn_cast(V)) + return R->getParent()->getParent()->getCallingConv(); + + if (auto *CI = dyn_cast(V)) { + const bool IsInlineAsm = CI->isInlineAsm(); + const bool IsIndirectFunctionCall = !IsInlineAsm && !CI->getCalledFunction(); + + // It is possible that the call instruction is an inline asm statement or an + // indirect function call in which case the return value of + // getCalledFunction() would be nullptr. + const bool IsInstrinsicCall = + !IsInlineAsm && !IsIndirectFunctionCall && + CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic; + + if (!IsInlineAsm && !IsInstrinsicCall) + return CI->getCallingConv(); + } + + return None; } static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, - bool IsABIRegCopy); + Optional CC); /// getCopyFromParts - Create a value that contains the specified legal parts /// combined into the value they represent. If the parts combine to a type @@ -191,11 +195,11 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, - Optional AssertOp = None, - bool IsABIRegCopy = false) { + Optional CC = None, + Optional AssertOp = None) { if (ValueVT.isVector()) return getCopyFromPartsVector(DAG, DL, Parts, NumParts, - PartVT, ValueVT, V, IsABIRegCopy); + PartVT, ValueVT, V, CC); assert(NumParts > 0 && "No parts to assemble!"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -237,7 +241,7 @@ unsigned OddParts = NumParts - RoundParts; EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); Hi = getCopyFromParts(DAG, DL, - Parts + RoundParts, OddParts, PartVT, OddVT, V); + Parts + RoundParts, OddParts, PartVT, OddVT, V, CC); // Combine the round and odd parts. Lo = Val; @@ -267,7 +271,7 @@ assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && "Unexpected split"); EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); - Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V); + Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC); } } @@ -340,9 +344,11 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, - bool IsABIRegCopy) { + Optional CallConv) { assert(ValueVT.isVector() && "Not a vector value"); assert(NumParts > 0 && "No parts to assemble!"); + const bool IsABIRegCopy = CallConv.hasValue(); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Val = Parts[0]; @@ -355,7 +361,9 @@ if (IsABIRegCopy) { NumRegs = TLI.getVectorTypeBreakdownForCallingConv( - *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates, + *DAG.getContext(), + CallConv.getValue(), + ValueVT, IntermediateVT, NumIntermediates, RegisterVT); } else { NumRegs = @@ -470,7 +478,8 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, - MVT PartVT, const Value *V, bool IsABIRegCopy); + MVT PartVT, const Value *V, + Optional CallConv); /// getCopyToParts - Create a series of nodes that contain the specified value /// split into legal parts. If the parts contain more bits than Val, then, for @@ -478,14 +487,14 @@ static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, - ISD::NodeType ExtendKind = ISD::ANY_EXTEND, - bool IsABIRegCopy = false) { + Optional CallConv = None, + ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { EVT ValueVT = Val.getValueType(); // Handle the vector case separately. if (ValueVT.isVector()) return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V, - IsABIRegCopy); + CallConv); unsigned PartBits = PartVT.getSizeInBits(); unsigned OrigNumParts = NumParts; @@ -564,7 +573,8 @@ unsigned OddParts = NumParts - RoundParts; SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, DAG.getIntPtrConstant(RoundBits, DL)); - getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V); + getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V, + CallConv); if (DAG.getDataLayout().isBigEndian()) // The odd parts were reversed by getCopyToParts - unreverse them. @@ -611,10 +621,11 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, - bool IsABIRegCopy) { + Optional CallConv) { EVT ValueVT = Val.getValueType(); assert(ValueVT.isVector() && "Not a vector"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + const bool IsABIRegCopy = CallConv.hasValue(); if (NumParts == 1) { EVT PartEVT = PartVT; @@ -679,7 +690,9 @@ unsigned NumRegs; if (IsABIRegCopy) { NumRegs = TLI.getVectorTypeBreakdownForCallingConv( - *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates, + *DAG.getContext(), + CallConv.getValue(), + ValueVT, IntermediateVT, NumIntermediates, RegisterVT); } else { NumRegs = @@ -720,7 +733,7 @@ // If the register was not expanded, promote or copy the value, // as appropriate. for (unsigned i = 0; i != NumParts; ++i) - getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V); + getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv); } else if (NumParts > 0) { // If the intermediate type was expanded, split each the value into // legal parts. @@ -729,29 +742,30 @@ "Must expand into a divisible number of parts!"); unsigned Factor = NumParts / NumIntermediates; for (unsigned i = 0; i != NumIntermediates; ++i) - getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V); + getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V, + CallConv); } } RegsForValue::RegsForValue(const SmallVector ®s, MVT regvt, - EVT valuevt, bool IsABIMangledValue) + EVT valuevt, Optional CC) : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs), - RegCount(1, regs.size()), IsABIMangled(IsABIMangledValue) {} + RegCount(1, regs.size()), CallConv(CC) {} RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, const DataLayout &DL, unsigned Reg, Type *Ty, - bool IsABIMangledValue) { + Optional CC) { ComputeValueVTs(TLI, DL, Ty, ValueVTs); - IsABIMangled = IsABIMangledValue; + CallConv = CC; for (EVT ValueVT : ValueVTs) { - unsigned NumRegs = IsABIMangledValue - ? TLI.getNumRegistersForCallingConv(Context, ValueVT) - : TLI.getNumRegisters(Context, ValueVT); - MVT RegisterVT = IsABIMangledValue - ? TLI.getRegisterTypeForCallingConv(Context, ValueVT) - : TLI.getRegisterType(Context, ValueVT); + unsigned NumRegs = isABIMangled() + ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT) + : TLI.getNumRegisters(Context, ValueVT); + MVT RegisterVT = isABIMangled() + ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT) + : TLI.getRegisterType(Context, ValueVT); for (unsigned i = 0; i != NumRegs; ++i) Regs.push_back(Reg + i); RegVTs.push_back(RegisterVT); @@ -777,8 +791,9 @@ // Copy the legal parts from the registers. EVT ValueVT = ValueVTs[Value]; unsigned NumRegs = RegCount[Value]; - MVT RegisterVT = IsABIMangled - ? TLI.getRegisterTypeForCallingConv(*DAG.getContext(), RegVTs[Value]) + MVT RegisterVT = isABIMangled() + ? TLI.getRegisterTypeForCallingConv(*DAG.getContext(), CallConv.getValue(), + RegVTs[Value]) : RegVTs[Value]; Parts.resize(NumRegs); @@ -855,7 +870,7 @@ } Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), - NumRegs, RegisterVT, ValueVT, V); + NumRegs, RegisterVT, ValueVT, V, CallConv); Part += NumRegs; Parts.clear(); } @@ -876,15 +891,16 @@ for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { unsigned NumParts = RegCount[Value]; - MVT RegisterVT = IsABIMangled - ? TLI.getRegisterTypeForCallingConv(*DAG.getContext(), RegVTs[Value]) + MVT RegisterVT = isABIMangled() + ? TLI.getRegisterTypeForCallingConv(*DAG.getContext(), CallConv.getValue(), + RegVTs[Value]) : RegVTs[Value]; if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) ExtendKind = ISD::ZERO_EXTEND; getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), - &Parts[Part], NumParts, RegisterVT, V, ExtendKind); + &Parts[Part], NumParts, RegisterVT, V, CallConv, ExtendKind); Part += NumParts; } @@ -1176,7 +1192,8 @@ unsigned InReg = It->second; RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), - DAG.getDataLayout(), InReg, Ty, isABIRegCopy(V)); + DAG.getDataLayout(), InReg, Ty, + getABIRegCopyCC(V)); SDValue Chain = DAG.getEntryNode(); Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); @@ -1367,7 +1384,8 @@ unsigned InReg = FuncInfo.InitializeRegForValue(Inst); RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, - Inst->getType(), isABIRegCopy(V)); + Inst->getType(), + getABIRegCopyCC(V)); SDValue Chain = DAG.getEntryNode(); return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); } @@ -1601,12 +1619,14 @@ if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind); - unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, VT); - MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, VT); + CallingConv::ID CC = F->getCallingConv(); + + unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT); + MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT); SmallVector Parts(NumParts); getCopyToParts(DAG, getCurSDLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + j), - &Parts[0], NumParts, PartVT, &I, ExtendKind, true); + &Parts[0], NumParts, PartVT, &I, CC, ExtendKind); // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); @@ -4939,7 +4959,7 @@ if (VMI != FuncInfo.ValueMap.end()) { const auto &TLI = DAG.getTargetLoweringInfo(); RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, - V->getType(), isABIRegCopy(V)); + V->getType(), getABIRegCopyCC(V)); if (RFV.occupiesMultipleRegs()) { unsigned Offset = 0; for (auto RegAndSize : RFV.getRegsAndSizes()) { @@ -5290,7 +5310,7 @@ // The PHI node may be split up into several MI PHI nodes (in // FunctionLoweringInfo::set). RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, - V->getType(), false); + V->getType(), None); if (RFV.occupiesMultipleRegs()) { unsigned Offset = 0; unsigned BitsToDescribe = 0; @@ -8230,7 +8250,7 @@ } SmallVector Outs; - GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL); + GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL); bool CanLowerReturn = this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), @@ -8274,9 +8294,9 @@ for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { EVT VT = RetTys[I]; MVT RegisterVT = - getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); + getRegisterTypeForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); unsigned NumRegs = - getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT); + getNumRegistersForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); for (unsigned i = 0; i != NumRegs; ++i) { ISD::InputArg MyFlags; MyFlags.VT = RegisterVT; @@ -8385,9 +8405,9 @@ Flags.setInConsecutiveRegs(); Flags.setOrigAlign(OriginalAlignment); - MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); + MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); unsigned NumParts = - getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT); + getNumRegistersForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); SmallVector Parts(NumParts); ISD::NodeType ExtendKind = ISD::ANY_EXTEND; @@ -8419,7 +8439,7 @@ } getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, - CLI.CS.getInstruction(), ExtendKind, true); + CLI.CS.getInstruction(), CLI.CallConv, ExtendKind); for (unsigned j = 0; j != NumParts; ++j) { // if it isn't first piece, alignment must be 1 @@ -8520,13 +8540,13 @@ for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { EVT VT = RetTys[I]; MVT RegisterVT = - getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); + getRegisterTypeForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); unsigned NumRegs = - getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT); + getNumRegistersForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr, - AssertOp, true)); + CLI.CallConv, AssertOp)); CurReg += NumRegs; } @@ -8566,7 +8586,7 @@ // notional registers required by the type. RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, - V->getType(), isABIRegCopy(V)); + V->getType(), getABIRegCopyCC(V)); SDValue Chain = DAG.getEntryNode(); ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == @@ -8880,9 +8900,11 @@ Flags.setCopyElisionCandidate(); MVT RegisterVT = - TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT); + TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), + F.getCallingConv(), VT); unsigned NumRegs = - TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT); + TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), + F.getCallingConv(), VT); for (unsigned i = 0; i != NumRegs; ++i) { ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, ArgNo, PartBase+i*RegisterVT.getStoreSize()); @@ -8938,7 +8960,8 @@ MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); Optional AssertOp = None; SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, - RegVT, VT, nullptr, AssertOp); + RegVT, VT, nullptr, F.getCallingConv(), + AssertOp); MachineFunction& MF = SDB->DAG.getMachineFunction(); MachineRegisterInfo& RegInfo = MF.getRegInfo(); @@ -8989,9 +9012,11 @@ for (unsigned Val = 0; Val != NumValues; ++Val) { EVT VT = ValueVTs[Val]; MVT PartVT = - TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT); + TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), + F.getCallingConv(), VT); unsigned NumParts = - TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT); + TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), + F.getCallingConv(), VT); // Even an apparant 'unused' swifterror argument needs to be returned. So // we do generate a copy for it that can be used on return from the @@ -9004,8 +9029,9 @@ AssertOp = ISD::AssertZext; ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, - PartVT, VT, nullptr, AssertOp, - true)); + PartVT, VT, nullptr, + F.getCallingConv(), + AssertOp)); } i += NumParts; Index: lib/CodeGen/SelectionDAG/StatepointLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -861,7 +861,8 @@ // completely and make statepoint call to return a tuple. unsigned Reg = FuncInfo.CreateRegs(RetTy); RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), - DAG.getDataLayout(), Reg, RetTy, true); + DAG.getDataLayout(), Reg, RetTy, + ISP.getCallSite().getCallingConv()); SDValue Chain = DAG.getEntryNode(); RFV.getCopyToRegs(ReturnValue, DAG, getCurSDLoc(), Chain, nullptr); Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -1337,7 +1337,8 @@ /// type of the given function. This does not require a DAG or a return value, /// and is suitable for use before any DAGs for the function are constructed. /// TODO: Move this out of TargetLowering.cpp. -void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr, +void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, + AttributeList attr, SmallVectorImpl &Outs, const TargetLowering &TLI, const DataLayout &DL) { SmallVector ValueVTs; @@ -1365,9 +1366,9 @@ } unsigned NumParts = - TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT); + TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT); MVT PartVT = - TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT); + TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT); // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); Index: lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- lib/Target/AArch64/AArch64FastISel.cpp +++ lib/Target/AArch64/AArch64FastISel.cpp @@ -3774,7 +3774,7 @@ if (Ret->getNumOperands() > 0) { CallingConv::ID CC = F.getCallingConv(); SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -907,6 +907,7 @@ LLVMContext &Ctx = Fn.getParent()->getContext(); const AMDGPUSubtarget &ST = MF.getSubtarget(); const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn); + CallingConv::ID CC = Fn.getCallingConv(); unsigned MaxAlign = 1; uint64_t ExplicitArgOffset = 0; @@ -941,9 +942,9 @@ EVT ArgVT = ValueVTs[Value]; EVT MemVT = ArgVT; MVT RegisterVT = - getRegisterTypeForCallingConv(Ctx, ArgVT); + getRegisterTypeForCallingConv(Ctx, CC, ArgVT); unsigned NumRegs = - getNumRegistersForCallingConv(Ctx, ArgVT); + getNumRegistersForCallingConv(Ctx, CC, ArgVT); if (!Subtarget->isAmdHsaOS() && (ArgVT == MVT::i16 || ArgVT == MVT::i8 || ArgVT == MVT::f16)) { Index: lib/Target/ARM/ARMFastISel.cpp =================================================================== --- lib/Target/ARM/ARMFastISel.cpp +++ lib/Target/ARM/ARMFastISel.cpp @@ -2116,7 +2116,7 @@ CallingConv::ID CC = F.getCallingConv(); if (Ret->getNumOperands() > 0) { SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; Index: lib/Target/Mips/MipsCallLowering.cpp =================================================================== --- lib/Target/Mips/MipsCallLowering.cpp +++ lib/Target/Mips/MipsCallLowering.cpp @@ -416,7 +416,8 @@ for (auto &Arg : Args) { EVT VT = TLI.getValueType(DL, Arg.Ty); - MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), VT); + MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), + F.getCallingConv(), VT); ISD::ArgFlagsTy Flags = Arg.Flags; Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); Index: lib/Target/Mips/MipsFastISel.cpp =================================================================== --- lib/Target/Mips/MipsFastISel.cpp +++ lib/Target/Mips/MipsFastISel.cpp @@ -1662,7 +1662,7 @@ return false; SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; Index: lib/Target/Mips/MipsISelLowering.h =================================================================== --- lib/Target/Mips/MipsISelLowering.h +++ lib/Target/Mips/MipsISelLowering.h @@ -283,16 +283,18 @@ /// Return the register type for a given MVT, ensuring vectors are treated /// as a series of gpr sized integers. MVT getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const override; /// Return the number of registers for a given MVT, ensuring vectors are /// treated as a series of gpr sized integers. unsigned getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const override; /// Break down vectors to the correct number of gpr sized integers. unsigned getVectorTypeBreakdownForCallingConv( - LLVMContext &Context, EVT VT, EVT &IntermediateVT, + LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override; /// Return the correct alignment for the current calling convention. Index: lib/Target/Mips/MipsISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsISelLowering.cpp +++ lib/Target/Mips/MipsISelLowering.cpp @@ -111,6 +111,7 @@ // The MIPS MSA ABI passes vector arguments in the integer register set. // The number of integer registers used is dependant on the ABI used. MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT.isVector()) { if (Subtarget.isABI_O32()) { @@ -123,6 +124,7 @@ } unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT.isVector()) return std::max((VT.getSizeInBits() / (Subtarget.isABI_O32() ? 32 : 64)), @@ -131,10 +133,12 @@ } unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv( - LLVMContext &Context, EVT VT, EVT &IntermediateVT, + LLVMContext &Context, + CallingConv::ID CC, + EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const { // Break down vector types to either 2 i64s or 4 i32s. - RegisterVT = getRegisterTypeForCallingConv(Context, VT) ; + RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT) ; IntermediateVT = RegisterVT; NumIntermediates = VT.getSizeInBits() < RegisterVT.getSizeInBits() ? VT.getVectorNumElements() Index: lib/Target/PowerPC/PPCFastISel.cpp =================================================================== --- lib/Target/PowerPC/PPCFastISel.cpp +++ lib/Target/PowerPC/PPCFastISel.cpp @@ -1621,7 +1621,7 @@ if (Ret->getNumOperands() > 0) { SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -1193,7 +1193,7 @@ if (Ret->getNumOperands() > 0) { SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); + GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -1095,9 +1095,11 @@ LegalizeTypeAction getPreferredVectorAction(EVT VT) const override; MVT getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const override; unsigned getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const override; bool isIntDivCheap(EVT VT, AttributeList Attr) const override; Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1800,17 +1800,19 @@ } MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) return MVT::v32i8; - return TargetLowering::getRegisterTypeForCallingConv(Context, VT); + return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); } unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, + CallingConv::ID CC, EVT VT) const { if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) return 1; - return TargetLowering::getNumRegistersForCallingConv(Context, VT); + return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); } EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,