diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h --- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h +++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h @@ -16,11 +16,12 @@ #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/GlobalISel/CallLowering.h" -#include "llvm/CodeGen/ValueTypes.h" namespace llvm { class RISCVTargetLowering; +class MachineInstrBuilder; +class MachineIRBuilder; class RISCVCallLowering : public CallLowering { @@ -37,6 +38,10 @@ bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; + +private: + bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val, + ArrayRef VRegs, MachineInstrBuilder &Ret) const; }; } // end namespace llvm diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp --- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp @@ -14,20 +14,114 @@ #include "RISCVCallLowering.h" #include "RISCVISelLowering.h" +#include "RISCVSubtarget.h" +#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" using namespace llvm; +namespace { + +struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner { +private: + // The function used internally to assign args - we ignore the AssignFn stored + // by OutgoingValueAssigner since RISC-V implements its CC using a custom + // function with a different signature. + RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn; + + // Whether this is assigning args for a return. + bool IsRet; + +public: + RISCVOutgoingValueAssigner( + RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet) + : CallLowering::OutgoingValueAssigner(nullptr), + RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {} + + bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, + const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags, + CCState &State) override { + MachineFunction &MF = State.getMachineFunction(); + const DataLayout &DL = MF.getDataLayout(); + const RISCVSubtarget &Subtarget = MF.getSubtarget(); + + return RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT, + LocInfo, Flags, State, /*IsFixed=*/true, IsRet, + Info.Ty, *Subtarget.getTargetLowering(), + /*FirstMaskArgument=*/std::nullopt); + } +}; + +struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler { + RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, + MachineInstrBuilder MIB) + : OutgoingValueHandler(B, MRI), MIB(MIB) {} + + MachineInstrBuilder MIB; + + Register getStackAddress(uint64_t MemSize, int64_t Offset, + MachinePointerInfo &MPO, + ISD::ArgFlagsTy Flags) override { + llvm_unreachable("not implemented"); + } + + void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, + MachinePointerInfo &MPO, CCValAssign &VA) override { + llvm_unreachable("not implemented"); + } + + void assignValueToReg(Register ValVReg, Register PhysReg, + CCValAssign VA) override { + Register ExtReg = extendRegister(ValVReg, VA); + MIRBuilder.buildCopy(PhysReg, ExtReg); + MIB.addUse(PhysReg, RegState::Implicit); + } +}; + +} // namespace +// + RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI) : CallLowering(&TLI) {} +bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, + const Value *Val, + ArrayRef VRegs, + MachineInstrBuilder &Ret) const { + if (!Val) + return true; + + // TODO: Only integer, pointer and aggregate types are supported now. + if (!Val->getType()->isIntOrPtrTy() && !Val->getType()->isAggregateType()) + return false; + + MachineFunction &MF = MIRBuilder.getMF(); + const DataLayout &DL = MF.getDataLayout(); + const Function &F = MF.getFunction(); + CallingConv::ID CC = F.getCallingConv(); + + ArgInfo OrigRetInfo(VRegs, Val->getType(), 0); + setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); + + SmallVector SplitRetInfos; + splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC); + + RISCVOutgoingValueAssigner Assigner( + CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV, + /*IsRet=*/true); + RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret); + return determineAndHandleAssignments(Handler, Assigner, SplitRetInfos, + MIRBuilder, CC, F.isVarArg()); +} + bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, FunctionLoweringInfo &FLI) const { - + assert(!Val == VRegs.empty() && "Return value without a vreg"); MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET); - if (Val != nullptr) { + if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret)) { return false; } MIRBuilder.insertInstr(Ret); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -674,7 +674,6 @@ /// returns the address of that location. Otherwise, returns nullptr. Value *getIRStackGuard(IRBuilderBase &IRB) const override; -private: /// RISCVCCAssignFn - This target-specific function extends the default /// CCValAssign with additional information used to lower RISC-V calling /// conventions. @@ -686,6 +685,7 @@ const RISCVTargetLowering &TLI, std::optional FirstMaskArgument); +private: void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo, const SmallVectorImpl &Ins, bool IsRet, RISCVCCAssignFn Fn) const; @@ -811,6 +811,26 @@ /// faster than two FDIVs. unsigned combineRepeatedFPDivisors() const override; }; + +namespace RISCV { + +bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, + MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, + bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, + std::optional FirstMaskArgument); + +bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, + MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, + bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, + std::optional FirstMaskArgument); + +bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, + CCState &State); +} // end namespace RISCV + namespace RISCVVIntrinsicsTable { struct RISCVVIntrinsicInfo { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12647,7 +12647,7 @@ } // Implements the RISC-V calling convention. Returns true upon failure. -static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, +bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, @@ -13100,7 +13100,7 @@ // FastCC has less than 1% performance improvement for some particular // benchmark. But theoretically, it may has benenfit for some cases. -static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, +bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, @@ -13202,7 +13202,7 @@ return true; // CC didn't match. } -static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, +bool RISCV::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) { @@ -13296,11 +13296,11 @@ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); if (CallConv == CallingConv::GHC) - CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); + CCInfo.AnalyzeFormalArguments(Ins, RISCV::CC_RISCV_GHC); else analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false, - CallConv == CallingConv::Fast ? CC_RISCV_FastCC - : CC_RISCV); + CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC + : RISCV::CC_RISCV); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; @@ -13501,11 +13501,11 @@ CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); if (CallConv == CallingConv::GHC) - ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); + ArgCCInfo.AnalyzeCallOperands(Outs, RISCV::CC_RISCV_GHC); else analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI, - CallConv == CallingConv::Fast ? CC_RISCV_FastCC - : CC_RISCV); + CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC + : RISCV::CC_RISCV); // Check if it's really possible to do a tail call. if (IsTailCall) @@ -13747,7 +13747,7 @@ // Assign locations to each value returned by this call. SmallVector RVLocs; CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); - analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV); + analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, RISCV::CC_RISCV); // Copy all of the result registers out of their specified physreg. for (auto &VA : RVLocs) { @@ -13790,7 +13790,7 @@ MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); - if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, + if (RISCV::CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, *this, FirstMaskArgument)) return false; @@ -13815,7 +13815,7 @@ *DAG.getContext()); analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, - nullptr, CC_RISCV); + nullptr, RISCV::CC_RISCV); if (CallConv == CallingConv::GHC && !RVLocs.empty()) report_fatal_error("GHC functions return void only"); diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + +define void @test_ret_void() { + ; RV32I-LABEL: name: test_ret_void + ; RV32I: bb.1.entry: + ; RV32I-NEXT: PseudoRET + ; RV64I-LABEL: name: test_ret_void + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoRET +entry: + ret void +} + +define i8 @test_ret_i8() { + ; RV32I-LABEL: name: test_ret_i8 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8) + ; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV64I-LABEL: name: test_ret_i8 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s8) + ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i8 1 +} + +define zeroext i8 @test_ret_i8_zext() { + ; RV32I-LABEL: name: test_ret_i8_zext + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s8) + ; RV32I-NEXT: $x10 = COPY [[ZEXT]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV64I-LABEL: name: test_ret_i8_zext + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s8) + ; RV64I-NEXT: $x10 = COPY [[ZEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i8 1 +} + +define signext i16 @test_ret_i16_sext() { + ; RV32I-LABEL: name: test_ret_i16_sext + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 + ; RV32I-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[C]](s16) + ; RV32I-NEXT: $x10 = COPY [[SEXT]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV64I-LABEL: name: test_ret_i16_sext + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 + ; RV64I-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C]](s16) + ; RV64I-NEXT: $x10 = COPY [[SEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i16 1 +} + +define i32 @test_ret_i32() { + ; RV32I-LABEL: name: test_ret_i32 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I-NEXT: $x10 = COPY [[C]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV64I-LABEL: name: test_ret_i32 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i32 1 +} + +define i64 @test_ret_i64() { + ; RV32I-LABEL: name: test_ret_i64 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967296 + ; RV32I-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) + ; RV32I-NEXT: $x10 = COPY [[UV]](s32) + ; RV32I-NEXT: $x11 = COPY [[UV1]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV64I-LABEL: name: test_ret_i64 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967296 + ; RV64I-NEXT: $x10 = COPY [[C]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i64 4294967296 +} + +define i32* @test_ret_i32_ptr() { + ; RV32I-LABEL: name: test_ret_i32_ptr + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF + ; RV32I-NEXT: $x10 = COPY [[DEF]](p0) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV64I-LABEL: name: test_ret_i32_ptr + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF + ; RV64I-NEXT: $x10 = COPY [[DEF]](p0) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i32* undef +} + +define [2 x i32] @test_ret_2xi32() { + ; RV32I-LABEL: name: test_ret_2xi32 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; RV32I-NEXT: $x10 = COPY [[C]](s32) + ; RV32I-NEXT: $x11 = COPY [[C1]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV64I-LABEL: name: test_ret_2xi32 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) + ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) + ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64) + ; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11 +entry: + ret [2 x i32] [i32 1, i32 2] +}