Index: llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -244,6 +244,13 @@ void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const; + /// Break \p OrigArgInfo into one or more pieces the calling convention can + /// process, returned in \p SplitArgs. For example, this should break structs + /// down into individual fields. + void splitToValueTypes(const ArgInfo &OrigArgInfo, + SmallVectorImpl &SplitArgs, + const DataLayout &DL, CallingConv::ID CallConv) const; + /// Generate instructions for packing \p SrcRegs into one big register /// corresponding to the aggregate type \p PackedTy. /// Index: llvm/lib/CodeGen/GlobalISel/CallLowering.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -187,6 +187,43 @@ const DataLayout &DL, const CallBase &FuncInfo) const; +void CallLowering::splitToValueTypes(const ArgInfo &OrigArg, + SmallVectorImpl &SplitArgs, + const DataLayout &DL, + CallingConv::ID CallConv) const { + LLVMContext &Ctx = OrigArg.Ty->getContext(); + + SmallVector SplitVTs; + SmallVector Offsets; + ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0); + + if (SplitVTs.size() == 0) + return; + + if (SplitVTs.size() == 1) { + // No splitting to do, but we want to replace the original type (e.g. [1 x + // double] -> double). + SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx), + OrigArg.Flags[0], OrigArg.IsFixed); + return; + } + + // Create one ArgInfo for each virtual register in the original ArgInfo. + assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch"); + + bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( + OrigArg.Ty, CallConv, false); + for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) { + Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx); + SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0], + OrigArg.IsFixed); + if (NeedsRegBlock) + SplitArgs.back().Flags[0].setInConsecutiveRegs(); + } + + SplitArgs.back().Flags[0].setInConsecutiveRegsLast(); +} + Register CallLowering::packRegs(ArrayRef SrcRegs, Type *PackedTy, MachineIRBuilder &MIRBuilder) const { assert(SrcRegs.size() > 1 && "Nothing to pack"); Index: llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h +++ llvm/lib/Target/AArch64/GISel/AArch64CallLowering.h @@ -64,11 +64,6 @@ using MemHandler = std::function; - void splitToValueTypes(const ArgInfo &OrigArgInfo, - SmallVectorImpl &SplitArgs, - const DataLayout &DL, MachineRegisterInfo &MRI, - CallingConv::ID CallConv) const; - bool lowerTailCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl &OutArgs) const; Index: llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -285,43 +285,6 @@ return CallConv == CallingConv::Fast && TailCallOpt; } -void AArch64CallLowering::splitToValueTypes( - const ArgInfo &OrigArg, SmallVectorImpl &SplitArgs, - const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const { - const AArch64TargetLowering &TLI = *getTLI(); - LLVMContext &Ctx = OrigArg.Ty->getContext(); - - SmallVector SplitVTs; - SmallVector Offsets; - ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0); - - if (SplitVTs.size() == 0) - return; - - if (SplitVTs.size() == 1) { - // No splitting to do, but we want to replace the original type (e.g. [1 x - // double] -> double). - SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx), - OrigArg.Flags[0], OrigArg.IsFixed); - return; - } - - // Create one ArgInfo for each virtual register in the original ArgInfo. - assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch"); - - bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( - OrigArg.Ty, CallConv, false); - for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) { - Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx); - SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0], - OrigArg.IsFixed); - if (NeedsRegBlock) - SplitArgs.back().Flags[0].setInConsecutiveRegs(); - } - - SplitArgs.back().Flags[0].setInConsecutiveRegsLast(); -} - bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, @@ -423,7 +386,7 @@ // Reset the arg flags after modifying CurVReg. setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); } - splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC); + splitToValueTypes(CurArgInfo, SplitArgs, DL, CC); } OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn); @@ -508,7 +471,7 @@ ArgInfo OrigArg{VRegs[i], Arg.getType()}; setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F); - splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv()); + splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv()); ++i; } @@ -986,7 +949,7 @@ SmallVector OutArgs; for (auto &OrigArg : Info.OrigArgs) { - splitToValueTypes(OrigArg, OutArgs, DL, MRI, Info.CallConv); + splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); // AAPCS requires that we zero-extend i1 to 8 bits by the caller. if (OrigArg.Ty->isIntegerTy(1)) OutArgs.back().Flags[0].setZExt(); @@ -994,7 +957,7 @@ SmallVector InArgs; if (!Info.OrigRet.Ty->isVoidTy()) - splitToValueTypes(Info.OrigRet, InArgs, DL, MRI, Info.CallConv); + splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); // If we can lower as a tail call, do that instead. bool CanTailCallOpt = Index: llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h +++ llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h @@ -27,11 +27,6 @@ void lowerParameter(MachineIRBuilder &B, Type *ParamTy, uint64_t Offset, Align Alignment, Register DstReg) const; - - void splitToValueTypes(MachineIRBuilder &B, const ArgInfo &OrigArgInfo, - SmallVectorImpl &SplitArgs, - const DataLayout &DL, CallingConv::ID CallConv) const; - bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl &Outs, bool IsVarArg) const override; Index: llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -278,47 +278,6 @@ } } -// FIXME: This should move to generic code. -void AMDGPUCallLowering::splitToValueTypes(MachineIRBuilder &B, - const ArgInfo &OrigArg, - SmallVectorImpl &SplitArgs, - const DataLayout &DL, - CallingConv::ID CallConv) const { - const SITargetLowering &TLI = *getTLI(); - LLVMContext &Ctx = OrigArg.Ty->getContext(); - - SmallVector SplitVTs; - ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs); - - assert(OrigArg.Regs.size() == SplitVTs.size()); - - if (SplitVTs.size() == 0) - return; - - if (SplitVTs.size() == 1) { - // No splitting to do, but we want to replace the original type (e.g. [1 x - // double] -> double). - SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx), - OrigArg.Flags[0], OrigArg.IsFixed); - return; - } - - // Create one ArgInfo for each virtual register in the original ArgInfo. - assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch"); - - bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( - OrigArg.Ty, CallConv, false); - for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) { - Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx); - SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0], - OrigArg.IsFixed); - if (NeedsRegBlock) - SplitArgs.back().Flags[0].setInConsecutiveRegs(); - } - - SplitArgs.back().Flags[0].setInConsecutiveRegsLast(); -} - bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl &Outs, @@ -390,7 +349,7 @@ setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); } - splitToValueTypes(B, RetInfo, SplitRetInfos, DL, CC); + splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); } CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); @@ -705,7 +664,7 @@ const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; setArgFlags(OrigArg, OrigArgIdx, DL, F); - splitToValueTypes(B, OrigArg, SplitArgs, DL, CC); + splitToValueTypes(OrigArg, SplitArgs, DL, CC); ++Idx; } @@ -996,7 +955,7 @@ SmallVector OutArgs; for (auto &OrigArg : Info.OrigArgs) - splitToValueTypes(MIRBuilder, OrigArg, OutArgs, DL, Info.CallConv); + splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); // If we can lower as a tail call, do that instead. bool CanTailCallOpt = false; @@ -1099,7 +1058,7 @@ insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, Info.DemoteRegister, Info.DemoteStackIndex); } else if (!Info.OrigRet.Ty->isVoidTy()) { - splitToValueTypes(MIRBuilder, Info.OrigRet, InArgs, DL, Info.CallConv); + splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); } // Make sure the raw argument copies are inserted before the marshalling to Index: llvm/lib/Target/ARM/ARMCallLowering.h =================================================================== --- llvm/lib/Target/ARM/ARMCallLowering.h +++ llvm/lib/Target/ARM/ARMCallLowering.h @@ -47,12 +47,6 @@ bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, MachineInstrBuilder &Ret) const; - - /// Split an argument into one or more arguments that the CC lowering can cope - /// with. - void splitToValueTypes(const ArgInfo &OrigArg, - SmallVectorImpl &SplitArgs, - MachineFunction &MF) const; }; } // end namespace llvm Index: llvm/lib/Target/ARM/ARMCallLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMCallLowering.cpp +++ llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -186,51 +186,6 @@ } // end anonymous namespace -void ARMCallLowering::splitToValueTypes(const ArgInfo &OrigArg, - SmallVectorImpl &SplitArgs, - MachineFunction &MF) const { - const ARMTargetLowering &TLI = *getTLI(); - LLVMContext &Ctx = OrigArg.Ty->getContext(); - const DataLayout &DL = MF.getDataLayout(); - const Function &F = MF.getFunction(); - - SmallVector SplitVTs; - ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, nullptr, nullptr, 0); - assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch"); - - if (SplitVTs.size() == 1) { - // Even if there is no splitting to do, we still want to replace the - // original type (e.g. pointer type -> integer). - auto Flags = OrigArg.Flags[0]; - Flags.setOrigAlign(DL.getABITypeAlign(OrigArg.Ty)); - SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx), - Flags, OrigArg.IsFixed); - return; - } - - // Create one ArgInfo for each virtual register. - for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) { - EVT SplitVT = SplitVTs[i]; - Type *SplitTy = SplitVT.getTypeForEVT(Ctx); - auto Flags = OrigArg.Flags[0]; - - Flags.setOrigAlign(DL.getABITypeAlign(SplitTy)); - - bool NeedsConsecutiveRegisters = - TLI.functionArgumentNeedsConsecutiveRegisters( - SplitTy, F.getCallingConv(), F.isVarArg()); - if (NeedsConsecutiveRegisters) { - Flags.setInConsecutiveRegs(); - if (i == e - 1) - Flags.setInConsecutiveRegsLast(); - } - - // FIXME: We also want to split SplitTy further. - Register PartReg = OrigArg.Regs[i]; - SplitArgs.emplace_back(PartReg, SplitTy, Flags, OrigArg.IsFixed); - } -} - /// Lower the return value for the already existing \p Ret. This assumes that /// \p MIRBuilder's insertion point is correct. bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, @@ -243,7 +198,7 @@ auto &MF = MIRBuilder.getMF(); const auto &F = MF.getFunction(); - auto DL = MF.getDataLayout(); + const auto &DL = MF.getDataLayout(); auto &TLI = *getTLI(); if (!isSupportedType(DL, TLI, Val->getType())) return false; @@ -252,7 +207,7 @@ setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); SmallVector SplitRetInfos; - splitToValueTypes(OrigRetInfo, SplitRetInfos, MF); + splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, F.getCallingConv()); CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg()); @@ -430,7 +385,7 @@ auto &MF = MIRBuilder.getMF(); auto &MBB = MIRBuilder.getMBB(); - auto DL = MF.getDataLayout(); + const auto &DL = MF.getDataLayout(); for (auto &Arg : F.args()) { if (!isSupportedType(DL, TLI, Arg.getType())) @@ -451,7 +406,7 @@ ArgInfo OrigArgInfo(VRegs[Idx], Arg.getType()); setArgFlags(OrigArgInfo, Idx + AttributeList::FirstArgIndex, DL, F); - splitToValueTypes(OrigArgInfo, SplitArgInfos, MF); + splitToValueTypes(OrigArgInfo, SplitArgInfos, DL, F.getCallingConv()); Idx++; } @@ -548,7 +503,7 @@ if (Arg.Flags[0].isByVal()) return false; - splitToValueTypes(Arg, ArgInfos, MF); + splitToValueTypes(Arg, ArgInfos, DL, Info.CallConv); } auto ArgAssignFn = TLI.CCAssignFnForCall(Info.CallConv, Info.IsVarArg); @@ -565,7 +520,7 @@ return false; ArgInfos.clear(); - splitToValueTypes(Info.OrigRet, ArgInfos, MF); + splitToValueTypes(Info.OrigRet, ArgInfos, DL, Info.CallConv); auto RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, Info.IsVarArg); CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn); if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler, Info.CallConv, Index: llvm/lib/Target/Mips/MipsCallLowering.cpp =================================================================== --- llvm/lib/Target/Mips/MipsCallLowering.cpp +++ llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -664,6 +664,7 @@ } } +// FIXME: This should be removed and the generic version used void MipsCallLowering::splitToValueTypes( const DataLayout &DL, const ArgInfo &OrigArg, unsigned OriginalIndex, SmallVectorImpl &SplitArgs, Index: llvm/lib/Target/X86/X86CallLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86CallLowering.cpp +++ llvm/lib/Target/X86/X86CallLowering.cpp @@ -50,6 +50,7 @@ X86CallLowering::X86CallLowering(const X86TargetLowering &TLI) : CallLowering(&TLI) {} +// FIXME: This should be removed and the generic version used bool X86CallLowering::splitToValueTypes(const ArgInfo &OrigArg, SmallVectorImpl &SplitArgs, const DataLayout &DL,