Index: llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -57,6 +57,29 @@ assert((Ty->isVoidTy() == (Regs.empty() || Regs[0] == 0)) && "only void types should have no register"); } + + ArgInfo() : Ty(nullptr), IsFixed(false) {} + }; + + struct CallLoweringInfo { + /// Calling convention to be used for the call. + CallingConv::ID CallConv = CallingConv::C; + + /// Destination of the call. It should be either a register, globaladdress, + /// or externalsymbol. + MachineOperand Callee = MachineOperand::CreateImm(0); + + /// Descriptor for the return type of the function. + ArgInfo OrigRet; + + /// List of descriptors of the arguments passed to the function. + SmallVector OrigArgs; + + /// Valid if the call has a swifterror inout parameter, and contains the + /// vreg that the swifterror should be copied into after the call. + Register SwiftErrorVReg = 0; + + MDNode *KnownCallees = nullptr; }; /// Argument handling is mostly uniform between the four places that @@ -223,39 +246,10 @@ /// This hook must be implemented to lower the given call instruction, /// including argument and return value marshalling. /// - /// \p CallConv is the calling convention to be used for the call. - /// - /// \p Callee is the destination of the call. It should be either a register, - /// globaladdress, or externalsymbol. - /// - /// \p OrigRet is a descriptor for the return type of the function. - /// - /// \p OrigArgs is a list of descriptors of the arguments passed to the - /// function. - /// - /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout - /// parameter, and contains the vreg that the swifterror should be copied into - /// after the call. /// /// \return true if the lowering succeeded, false otherwise. - virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, - const MachineOperand &Callee, const ArgInfo &OrigRet, - ArrayRef OrigArgs, Register SwiftErrorVReg, - const MDNode *KnownCallees = nullptr) const { - if (!supportSwiftError()) { - assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror"); - return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs, - KnownCallees); - } - return false; - } - - /// This hook behaves as the extended lowerCall function, but for targets that - /// do not support swifterror value promotion. - virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, - const MachineOperand &Callee, const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees = nullptr) const { + virtual bool lowerCall(MachineIRBuilder &MIRBuilder, + CallLoweringInfo &Info) const { return false; } Index: llvm/lib/CodeGen/GlobalISel/CallLowering.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -33,37 +33,37 @@ ArrayRef> ArgRegs, Register SwiftErrorVReg, std::function GetCalleeReg) const { + CallLoweringInfo Info; auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); // First step is to marshall all the function's parameters into the correct // physregs and memory locations. Gather the sequence of argument types that // we'll pass to the assigner function. - SmallVector OrigArgs; unsigned i = 0; unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); for (auto &Arg : CS.args()) { ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, i < NumFixedArgs}; setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); - OrigArgs.push_back(OrigArg); + Info.OrigArgs.push_back(OrigArg); ++i; } - MachineOperand Callee = MachineOperand::CreateImm(0); if (const Function *F = CS.getCalledFunction()) - Callee = MachineOperand::CreateGA(F, 0); + Info.Callee = MachineOperand::CreateGA(F, 0); else - Callee = MachineOperand::CreateReg(GetCalleeReg(), false); + Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); - ArgInfo OrigRet{ResRegs, CS.getType(), ISD::ArgFlagsTy{}}; - if (!OrigRet.Ty->isVoidTy()) - setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS); + Info.OrigRet = ArgInfo{ResRegs, CS.getType(), ISD::ArgFlagsTy{}}; + if (!Info.OrigRet.Ty->isVoidTy()) + setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CS); - const MDNode *KnownCallees = + Info.KnownCallees = CS.getInstruction()->getMetadata(LLVMContext::MD_callees); + Info.CallConv = CS.getCallingConv(); + Info.SwiftErrorVReg = SwiftErrorVReg; - return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs, - SwiftErrorVReg, KnownCallees); + return lowerCall(MIRBuilder, Info); } template Index: llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -297,8 +297,13 @@ const char *Name = TLI.getLibcallName(Libcall); MIRBuilder.getMF().getFrameInfo().setHasCalls(true); - if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall), - MachineOperand::CreateES(Name), Result, Args)) + + CallLowering::CallLoweringInfo Info; + Info.CallConv = TLI.getLibcallCallingConv(Libcall); + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = Result; + std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); + if (!CLI.lowerCall(MIRBuilder, Info)) return LegalizerHelper::UnableToLegalize; return LegalizerHelper::Legalized; @@ -358,9 +363,13 @@ MIRBuilder.setInstr(MI); MIRBuilder.getMF().getFrameInfo().setHasCalls(true); - if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(RTLibcall), - MachineOperand::CreateES(Name), - CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx)), Args)) + + CallLowering::CallLoweringInfo Info; + Info.CallConv = TLI.getLibcallCallingConv(RTLibcall); + Info.Callee = MachineOperand::CreateES(Name); + Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx)); + std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs)); + if (!CLI.lowerCall(MIRBuilder, Info)) return LegalizerHelper::UnableToLegalize; return LegalizerHelper::Legalized; Index: llvm/lib/Target/AArch64/AArch64CallLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64CallLowering.h +++ llvm/lib/Target/AArch64/AArch64CallLowering.h @@ -40,18 +40,8 @@ bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef> VRegs) const override; - bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, - const MachineOperand &Callee, const ArgInfo &OrigRet, - ArrayRef OrigArgs, Register SwiftErrorVReg, - const MDNode *KnownCallees = nullptr) const override; - - bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, - const MachineOperand &Callee, const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees = nullptr) const override { - return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs, 0, - KnownCallees); - } + bool lowerCall(MachineIRBuilder &MIRBuilder, + CallLoweringInfo &Info) const override; bool supportSwiftError() const override { return true; } Index: llvm/lib/Target/AArch64/AArch64CallLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64CallLowering.cpp +++ llvm/lib/Target/AArch64/AArch64CallLowering.cpp @@ -403,20 +403,15 @@ } bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, - CallingConv::ID CallConv, - const MachineOperand &Callee, - const ArgInfo &OrigRet, - ArrayRef OrigArgs, - Register SwiftErrorVReg, - const MDNode *KnownCallees) const { +CallLoweringInfo &Info) const { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); MachineRegisterInfo &MRI = MF.getRegInfo(); auto &DL = F.getParent()->getDataLayout(); SmallVector SplitArgs; - for (auto &OrigArg : OrigArgs) { - splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv); + for (auto &OrigArg : Info.OrigArgs) { + splitToValueTypes(OrigArg, SplitArgs, DL, MRI, Info.CallConv); // AAPCS requires that we zero-extend i1 to 8 bits by the caller. if (OrigArg.Ty->isIntegerTy(1)) SplitArgs.back().Flags.setZExt(); @@ -425,17 +420,17 @@ // Find out which ABI gets to decide where things go. const AArch64TargetLowering &TLI = *getTLI(); CCAssignFn *AssignFnFixed = - TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false); + TLI.CCAssignFnForCall(Info.CallConv, /*IsVarArg=*/false); CCAssignFn *AssignFnVarArg = - TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/true); + TLI.CCAssignFnForCall(Info.CallConv, /*IsVarArg=*/true); auto CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN); // Create a temporarily-floating call instruction so we can add the implicit // uses of arg registers. - auto MIB = MIRBuilder.buildInstrNoInsert(Callee.isReg() ? AArch64::BLR - : AArch64::BL); - MIB.add(Callee); + auto MIB = MIRBuilder.buildInstrNoInsert(Info.Callee.isReg() ? AArch64::BLR + : AArch64::BL); + MIB.add(Info.Callee); // Tell the call which registers are clobbered. auto TRI = MF.getSubtarget().getRegisterInfo(); @@ -460,28 +455,29 @@ // If Callee is a reg, since it is used by a target specific // instruction, it must have a register class matching the // constraint of that instruction. - if (Callee.isReg()) + if (Info.Callee.isReg()) MIB->getOperand(0).setReg(constrainOperandRegClass( MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), - *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0)); + *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee, + 0)); // Finally we can copy the returned value back into its virtual-register. In // symmetry with the arugments, the physical register must be an // implicit-define of the call instruction. CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv()); - if (!OrigRet.Ty->isVoidTy()) { + if (!Info.OrigRet.Ty->isVoidTy()) { SplitArgs.clear(); - splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv()); + splitToValueTypes(Info.OrigRet, SplitArgs, DL, MRI, F.getCallingConv()); CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn); if (!handleAssignments(MIRBuilder, SplitArgs, Handler)) return false; } - if (SwiftErrorVReg) { + if (Info.SwiftErrorVReg) { MIB.addDef(AArch64::X21, RegState::Implicit); - MIRBuilder.buildCopy(SwiftErrorVReg, Register(AArch64::X21)); + MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21)); } CallSeqStart.addImm(Handler.StackSize).addImm(0); Index: llvm/lib/Target/ARM/ARMCallLowering.h =================================================================== --- llvm/lib/Target/ARM/ARMCallLowering.h +++ llvm/lib/Target/ARM/ARMCallLowering.h @@ -38,10 +38,8 @@ bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef> VRegs) const override; - bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, - const MachineOperand &Callee, const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees = nullptr) const override; + bool lowerCall(MachineIRBuilder &MIRBuilder, + CallLoweringInfo &Info) const override; private: bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val, Index: llvm/lib/Target/ARM/ARMCallLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMCallLowering.cpp +++ llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -499,12 +499,7 @@ } } // end anonymous namespace -bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, - CallingConv::ID CallConv, - const MachineOperand &Callee, - const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees) const { +bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const { MachineFunction &MF = MIRBuilder.getMF(); const auto &TLI = *getTLI(); const auto &DL = MF.getDataLayout(); @@ -522,7 +517,7 @@ // Create the call instruction so we can add the implicit uses of arg // registers, but don't insert it yet. - bool IsDirect = !Callee.isReg(); + bool IsDirect = !Info.Callee.isReg(); auto CallOpcode = getCallOpcode(STI, IsDirect); auto MIB = MIRBuilder.buildInstrNoInsert(CallOpcode); @@ -530,22 +525,22 @@ if (IsThumb) MIB.add(predOps(ARMCC::AL)); - MIB.add(Callee); + MIB.add(Info.Callee); if (!IsDirect) { - auto CalleeReg = Callee.getReg(); + auto CalleeReg = Info.Callee.getReg(); if (CalleeReg && !Register::isPhysicalRegister(CalleeReg)) { unsigned CalleeIdx = IsThumb ? 2 : 0; MIB->getOperand(CalleeIdx).setReg(constrainOperandRegClass( MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(), - *MIB.getInstr(), MIB->getDesc(), Callee, CalleeIdx)); + *MIB.getInstr(), MIB->getDesc(), Info.Callee, CalleeIdx)); } } - MIB.addRegMask(TRI->getCallPreservedMask(MF, CallConv)); + MIB.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv)); bool IsVarArg = false; SmallVector ArgInfos; - for (auto Arg : OrigArgs) { + for (auto Arg : Info.OrigArgs) { if (!isSupportedType(DL, TLI, Arg.Ty)) return false; @@ -558,7 +553,7 @@ splitToValueTypes(Arg, ArgInfos, MF); } - auto ArgAssignFn = TLI.CCAssignFnForCall(CallConv, IsVarArg); + auto ArgAssignFn = TLI.CCAssignFnForCall(Info.CallConv, IsVarArg); OutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB, ArgAssignFn); if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler)) return false; @@ -566,13 +561,13 @@ // Now we can add the actual call instruction to the correct basic block. MIRBuilder.insertInstr(MIB); - if (!OrigRet.Ty->isVoidTy()) { - if (!isSupportedType(DL, TLI, OrigRet.Ty)) + if (!Info.OrigRet.Ty->isVoidTy()) { + if (!isSupportedType(DL, TLI, Info.OrigRet.Ty)) return false; ArgInfos.clear(); - splitToValueTypes(OrigRet, ArgInfos, MF); - auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, IsVarArg); + splitToValueTypes(Info.OrigRet, ArgInfos, MF); + auto RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, IsVarArg); CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn); if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler)) return false; Index: llvm/lib/Target/Mips/MipsCallLowering.h =================================================================== --- llvm/lib/Target/Mips/MipsCallLowering.h +++ llvm/lib/Target/Mips/MipsCallLowering.h @@ -68,10 +68,8 @@ bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef> VRegs) const override; - bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, - const MachineOperand &Callee, const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees = nullptr) const override; + bool lowerCall(MachineIRBuilder &MIRBuilder, + CallLoweringInfo &Info) const override; private: /// Based on registers available on target machine split or extend Index: llvm/lib/Target/Mips/MipsCallLowering.cpp =================================================================== --- llvm/lib/Target/Mips/MipsCallLowering.cpp +++ llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -500,23 +500,19 @@ } bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, - CallingConv::ID CallConv, - const MachineOperand &Callee, - const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees) const { + CallLoweringInfo &Info) const { - if (CallConv != CallingConv::C) + if (Info.CallConv != CallingConv::C) return false; - for (auto &Arg : OrigArgs) { + for (auto &Arg : Info.OrigArgs) { if (!isSupportedType(Arg.Ty)) return false; if (Arg.Flags.isByVal() || Arg.Flags.isSRet()) return false; } - if (!OrigRet.Ty->isVoidTy() && !isSupportedType(OrigRet.Ty)) + if (!Info.OrigRet.Ty->isVoidTy() && !isSupportedType(Info.OrigRet.Ty)) return false; MachineFunction &MF = MIRBuilder.getMF(); @@ -530,31 +526,31 @@ MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); const bool IsCalleeGlobalPIC = - Callee.isGlobal() && TM.isPositionIndependent(); + Info.Callee.isGlobal() && TM.isPositionIndependent(); MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert( - Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL); + Info.Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL); MIB.addDef(Mips::SP, RegState::Implicit); if (IsCalleeGlobalPIC) { Register CalleeReg = MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32)); MachineInstr *CalleeGlobalValue = - MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal()); - if (!Callee.getGlobal()->hasLocalLinkage()) + MIRBuilder.buildGlobalValue(CalleeReg, Info.Callee.getGlobal()); + if (!Info.Callee.getGlobal()->hasLocalLinkage()) CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL); MIB.addUse(CalleeReg); } else - MIB.add(Callee); + MIB.add(Info.Callee); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); TargetLowering::ArgListTy FuncOrigArgs; - FuncOrigArgs.reserve(OrigArgs.size()); + FuncOrigArgs.reserve(Info.OrigArgs.size()); SmallVector ArgInfos; SmallVector OrigArgIndices; unsigned i = 0; - for (auto &Arg : OrigArgs) { + for (auto &Arg : Info.OrigArgs) { TargetLowering::ArgListEntry Entry; Entry.Ty = Arg.Ty; @@ -571,8 +567,9 @@ MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); - CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); - const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr; + CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), 1); + const char *Call = + Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr; CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); setLocInfo(ArgLocs, Outs); @@ -601,11 +598,11 @@ *STI.getRegBankInfo()); } - if (!OrigRet.Ty->isVoidTy()) { + if (!Info.OrigRet.Ty->isVoidTy()) { ArgInfos.clear(); SmallVector OrigRetIndices; - splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices); + splitToValueTypes(Info.OrigRet, 0, ArgInfos, OrigRetIndices); SmallVector Ins; subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); @@ -614,7 +611,7 @@ MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); - CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call); + CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), Info.OrigRet.Ty, Call); setLocInfo(ArgLocs, Ins); CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); Index: llvm/lib/Target/X86/X86CallLowering.h =================================================================== --- llvm/lib/Target/X86/X86CallLowering.h +++ llvm/lib/Target/X86/X86CallLowering.h @@ -34,10 +34,8 @@ bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef> VRegs) const override; - bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, - const MachineOperand &Callee, const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees = nullptr) const override; + bool lowerCall(MachineIRBuilder &MIRBuilder, + CallLoweringInfo &Info) const override; private: /// A function of this type is used to perform value split action. Index: llvm/lib/Target/X86/X86CallLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86CallLowering.cpp +++ llvm/lib/Target/X86/X86CallLowering.cpp @@ -373,11 +373,7 @@ } bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, - CallingConv::ID CallConv, - const MachineOperand &Callee, - const ArgInfo &OrigRet, - ArrayRef OrigArgs, - const MDNode *KnownCallees) const { + CallLoweringInfo &Info) const { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -387,8 +383,8 @@ auto TRI = STI.getRegisterInfo(); // Handle only Linux C, X86_64_SysV calling conventions for now. - if (!STI.isTargetLinux() || - !(CallConv == CallingConv::C || CallConv == CallingConv::X86_64_SysV)) + if (!STI.isTargetLinux() || !(Info.CallConv == CallingConv::C || + Info.CallConv == CallingConv::X86_64_SysV)) return false; unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); @@ -397,15 +393,16 @@ // Create a temporarily-floating call instruction so we can add the implicit // uses of arg registers. bool Is64Bit = STI.is64Bit(); - unsigned CallOpc = Callee.isReg() + unsigned CallOpc = Info.Callee.isReg() ? (Is64Bit ? X86::CALL64r : X86::CALL32r) : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32); - auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc).add(Callee).addRegMask( - TRI->getCallPreservedMask(MF, CallConv)); + auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc) + .add(Info.Callee) + .addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv)); SmallVector SplitArgs; - for (const auto &OrigArg : OrigArgs) { + for (const auto &OrigArg : Info.OrigArgs) { // TODO: handle not simple cases. if (OrigArg.Flags.isByVal()) @@ -425,8 +422,8 @@ if (!handleAssignments(MIRBuilder, SplitArgs, Handler)) return false; - bool IsFixed = OrigArgs.empty() ? true : OrigArgs.back().IsFixed; - if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(CallConv)) { + bool IsFixed = Info.OrigArgs.empty() ? true : Info.OrigArgs.back().IsFixed; + if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(Info.CallConv)) { // From AMD64 ABI document: // For calls that may call functions that use varargs or stdargs // (prototype-less calls or calls to functions containing ellipsis (...) in @@ -447,23 +444,24 @@ // If Callee is a reg, since it is used by a target specific // instruction, it must have a register class matching the // constraint of that instruction. - if (Callee.isReg()) + if (Info.Callee.isReg()) MIB->getOperand(0).setReg(constrainOperandRegClass( MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), - *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0)); + *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee, + 0)); // Finally we can copy the returned value back into its virtual-register. In // symmetry with the arguments, the physical register must be an // implicit-define of the call instruction. - if (!OrigRet.Ty->isVoidTy()) { - if (OrigRet.Regs.size() > 1) + if (!Info.OrigRet.Ty->isVoidTy()) { + if (Info.OrigRet.Regs.size() > 1) return false; SplitArgs.clear(); SmallVector NewRegs; - if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI, + if (!splitToValueTypes(Info.OrigRet, SplitArgs, DL, MRI, [&](ArrayRef Regs) { NewRegs.assign(Regs.begin(), Regs.end()); })) @@ -474,7 +472,7 @@ return false; if (!NewRegs.empty()) - MIRBuilder.buildMerge(OrigRet.Regs[0], NewRegs); + MIRBuilder.buildMerge(Info.OrigRet.Regs[0], NewRegs); } CallSeqStart.addImm(Handler.getStackSize())