diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -234,8 +234,9 @@ // this physreg. const TargetRegisterClass *BestRC = nullptr; for (const TargetRegisterClass *RC : regclasses()) { - if ((!Ty.isValid() || isTypeLegalForClass(*RC, Ty)) && RC->contains(reg) && - (!BestRC || BestRC->hasSubClass(RC))) + if ((!Ty.isValid() || + (RC->isAllocatable() && isTypeLegalForClass(*RC, Ty))) && + RC->contains(reg) && (!BestRC || BestRC->hasSubClass(RC))) BestRC = RC; } diff --git a/llvm/lib/Target/RISCV/RISCVCallLowering.h b/llvm/lib/Target/RISCV/RISCVCallLowering.h --- a/llvm/lib/Target/RISCV/RISCVCallLowering.h +++ b/llvm/lib/Target/RISCV/RISCVCallLowering.h @@ -21,6 +21,8 @@ namespace llvm { class RISCVTargetLowering; +class MachineInstrBuilder; +class MachineIRBuilder; class RISCVCallLowering : public CallLowering { @@ -37,6 +39,27 @@ bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override; + +private: + bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val, + ArrayRef VRegs, MachineInstrBuilder &Ret) const; + + /// A function of this type is used to perform value split action. + using SplitArgTy = std::function, int)>; + + template + void setISDArgsForCallingConv(const Function &F, const ArgInfo &OrigArg, + SmallVectorImpl &SplitVTs, + SmallVectorImpl &ISDArgs, bool isRet) const; + + void splitToValueTypes(const ArgInfo &OrigArg, + SmallVectorImpl &SplitArgs, + SmallVectorImpl &SplitVTs, MachineFunction &MF, + SplitArgTy PerformArgSplit) const; + + template + void updateArgLocInfo(SmallVectorImpl &ArgLocs, + const SmallVectorImpl &Arguments) const; }; } // end namespace llvm diff --git a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp @@ -14,22 +14,100 @@ #include "RISCVCallLowering.h" #include "RISCVISelLowering.h" +#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" using namespace llvm; +namespace { + +struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler { + RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, + MachineInstrBuilder MIB) + : OutgoingValueHandler(B, MRI), MIB(MIB) {} + + MachineInstrBuilder MIB; + + Register getStackAddress(uint64_t MemSize, int64_t Offset, + MachinePointerInfo &MPO, + ISD::ArgFlagsTy Flags) override { + llvm_unreachable("not implemented"); + } + + void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, + MachinePointerInfo &MPO, CCValAssign &VA) override { + llvm_unreachable("not implemented"); + } + + void assignValueToReg(Register ValVReg, Register PhysReg, + CCValAssign VA) override { + Register ExtReg = extendRegister(ValVReg, VA); + MIRBuilder.buildCopy(PhysReg, ExtReg); + MIB.addUse(PhysReg, RegState::Implicit); + } +}; + +} // namespace + RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI) : CallLowering(&TLI) {} +bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, + const Value *Val, + ArrayRef VRegs, + MachineInstrBuilder &Ret) const { + if (!Val) + return true; + + // TODO: Only integer, pointer and aggregate types are supported now. + if (!Val->getType()->isIntOrPtrTy() && !Val->getType()->isAggregateType()) + return false; + + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + const DataLayout &DL = MF.getDataLayout(); + const RISCVTargetLowering &TLI = *getTLI(); + + SmallVector SplitEVTs; + ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); + assert(VRegs.size() == SplitEVTs.size() && + "For each split Type there should be exactly one VReg."); + + ArgInfo OrigRetInfo(VRegs, Val->getType(), 0); + setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); + + SmallVector SplitRetInfos; + splitToValueTypes(OrigRetInfo, SplitRetInfos, SplitEVTs, MF, + [&](ArrayRef Regs, int SplitIdx) { + MIRBuilder.buildUnmerge(Regs, VRegs[SplitIdx]); + }); + + SmallVector Outs; + setISDArgsForCallingConv(F, OrigRetInfo, SplitEVTs, Outs, true); + + SmallVector ArgLocs; + CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); + + TLI.analyzeOutputArgs(MF, CCInfo, Outs, /*IsRet=*/true, nullptr, + F.getCallingConv() == CallingConv::Fast + ? RISCV::CC_RISCV_FastCC + : RISCV::CC_RISCV); + updateArgLocInfo(ArgLocs, Outs); + + RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret); + return handleAssignments(Handler, SplitRetInfos, CCInfo, ArgLocs, MIRBuilder); +} + bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, FunctionLoweringInfo &FLI) const { + assert(!Val == VRegs.empty() && "Return value without a vreg"); MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET); - if (Val != nullptr) { + if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret)) return false; - } + MIRBuilder.insertInstr(Ret); return true; } @@ -49,3 +127,107 @@ CallLoweringInfo &Info) const { return false; } + +template +void RISCVCallLowering::setISDArgsForCallingConv(const Function &F, + const ArgInfo &OrigArg, + SmallVectorImpl &SplitVTs, + SmallVectorImpl &ISDArgs, + bool isRet) const { + const DataLayout &DL = F.getParent()->getDataLayout(); + LLVMContext &Ctx = F.getContext(); + CallingConv::ID CC = F.getCallingConv(); + const RISCVTargetLowering &TLI = *getTLI(); + + for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) { + EVT VT = SplitVTs[i]; + Type *SplitTy = VT.getTypeForEVT(Ctx); + MVT RegisterVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, VT); + unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CC, VT); + + for (unsigned j = 0; j < NumParts; ++j) { + auto Flags = OrigArg.Flags[0]; + + if (j == 0) + Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(SplitTy, DL)); + else + Flags.setOrigAlign(Align(1)); + + ISDArgs.emplace_back(Flags, RegisterVT, VT, true, isRet ? 0 : i, 0); + } + } +} + +void RISCVCallLowering::splitToValueTypes(const ArgInfo &OrigArg, + SmallVectorImpl &SplitArgs, + SmallVectorImpl &SplitVTs, + MachineFunction &MF, + SplitArgTy PerformArgSplit) const { + const RISCVTargetLowering &TLI = *getTLI(); + LLVMContext &Ctx = OrigArg.Ty->getContext(); + CallingConv::ID CC = MF.getFunction().getCallingConv(); + const DataLayout &DL = MF.getDataLayout(); + + // Create one ArgInfo for each virtual register in the original ArgInfo. + for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) { + EVT VT = SplitVTs[i]; + Type *SplitTy = VT.getTypeForEVT(Ctx); + auto Flags = OrigArg.Flags[0]; + Flags.setOrigAlign(Align(DL.getABITypeAlignment(SplitTy))); + + unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CC, VT); + if (NumParts == 1) { + SplitArgs.emplace_back(OrigArg.Regs[i], VT.getTypeForEVT(Ctx), i, Flags, + OrigArg.IsFixed); + continue; + } + + SmallVector SplitRegs; + + EVT PartVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, VT); + Type *PartTy = PartVT.getTypeForEVT(Ctx); + + for (unsigned j = 0; j < NumParts; ++j) { + ArgInfo Info = ArgInfo{MF.getRegInfo().createGenericVirtualRegister( + getLLTForType(*PartTy, DL)), + PartTy, i, Flags}; + SplitArgs.push_back(Info); + SplitRegs.push_back(Info.Regs[0]); + } + + PerformArgSplit(SplitRegs, i); + } +} + +template +void RISCVCallLowering::updateArgLocInfo( + SmallVectorImpl &ArgLocs, + const SmallVectorImpl &Arguments) const { + for (unsigned i = 0; i < ArgLocs.size(); ++i) { + const CCValAssign &VA = ArgLocs[i]; + CCValAssign::LocInfo LocInfo = VA.getLocInfo(); + // TODO: LocInfo type for BCvt and Indirect need be changed? + if (LocInfo == CCValAssign::BCvt || LocInfo == CCValAssign::Indirect) + continue; + + if (Arguments[i].ArgVT.getSizeInBits() >= Arguments[i].VT.getSizeInBits()) { + assert(LocInfo == CCValAssign::Full && "Unexpected CCValAssign::LocInfo"); + continue; + } + + if (Arguments[i].Flags.isSExt()) + LocInfo = CCValAssign::LocInfo::SExt; + else if (Arguments[i].Flags.isZExt()) + LocInfo = CCValAssign::LocInfo::ZExt; + else + LocInfo = CCValAssign::LocInfo::AExt; + + if (VA.isMemLoc()) + ArgLocs[i] = + CCValAssign::getMem(VA.getValNo(), VA.getValVT(), + VA.getLocMemOffset(), VA.getLocVT(), LocInfo); + else + ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), + VA.getLocReg(), VA.getLocVT(), LocInfo); + } +} diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -538,7 +538,6 @@ unsigned uid, MCContext &Ctx) const override; -private: /// RISCVCCAssignFn - This target-specific function extends the default /// CCValAssign with additional information used to lower RISC-V calling /// conventions. @@ -558,6 +557,7 @@ bool IsRet, CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const; +private: SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG, bool UseGOT) const; SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const; @@ -656,6 +656,22 @@ namespace RISCV { // We use 64 bits as the known part in the scalable vector types. static constexpr unsigned RVVBitsPerBlock = 64; + +bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, + MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, + bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, + Optional FirstMaskArgument); + +bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, + MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, + bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, + Optional FirstMaskArgument); + +bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, + CCState &State); } // namespace RISCV namespace RISCVVIntrinsicsTable { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8398,7 +8398,7 @@ } // Implements the RISC-V calling convention. Returns true upon failure. -static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, +bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, @@ -8826,7 +8826,7 @@ // FastCC has less than 1% performance improvement for some particular // benchmark. But theoretically, it may has benenfit for some cases. -static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, +bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, @@ -8928,9 +8928,9 @@ return true; // CC didn't match. } -static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, - CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { +bool RISCV::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, + CCState &State) { if (LocVT == MVT::i32 || LocVT == MVT::i64) { // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim @@ -9018,11 +9018,11 @@ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); if (CallConv == CallingConv::GHC) - CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); + CCInfo.AnalyzeFormalArguments(Ins, RISCV::CC_RISCV_GHC); else analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false, - CallConv == CallingConv::Fast ? CC_RISCV_FastCC - : CC_RISCV); + CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC + : RISCV::CC_RISCV); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; @@ -9230,11 +9230,11 @@ CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); if (CallConv == CallingConv::GHC) - ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); + ArgCCInfo.AnalyzeCallOperands(Outs, RISCV::CC_RISCV_GHC); else analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI, - CallConv == CallingConv::Fast ? CC_RISCV_FastCC - : CC_RISCV); + CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC + : RISCV::CC_RISCV); // Check if it's really possible to do a tail call. if (IsTailCall) @@ -9479,7 +9479,7 @@ // Assign locations to each value returned by this call. SmallVector RVLocs; CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); - analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV); + analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, RISCV::CC_RISCV); // Copy all of the result registers out of their specified physreg. for (auto &VA : RVLocs) { @@ -9522,9 +9522,9 @@ MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; RISCVABI::ABI ABI = MF.getSubtarget().getTargetABI(); - if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, - ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, - *this, FirstMaskArgument)) + if (RISCV::CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, + ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, + nullptr, *this, FirstMaskArgument)) return false; } return true; @@ -9547,7 +9547,7 @@ *DAG.getContext()); analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, - nullptr, CC_RISCV); + nullptr, RISCV::CC_RISCV); if (CallConv == CallingConv::GHC && !RVLocs.empty()) report_fatal_error("GHC functions return void only"); diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator-calllowering.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator-calllowering.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator-calllowering.ll +++ /dev/null @@ -1,17 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefix=RV32I %s -; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefix=RV64I %s - -define void @foo() { - ; RV32I-LABEL: name: foo - ; RV32I: bb.1.entry: - ; RV32I-NEXT: PseudoRET - - ; RV64I-LABEL: name: foo - ; RV64I: bb.1.entry: - ; RV64I-NEXT: PseudoRET -entry: - ret void -} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll @@ -0,0 +1,143 @@ +; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + +define void @test_ret_void() { + ; RV32I-LABEL: name: test_ret_void + ; RV32I: bb.1.entry: + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_ret_void + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoRET +entry: + ret void +} + +define i8 @test_ret_i8() { + ; RV32I-LABEL: name: test_ret_i8 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV32I-NEXT: [[AEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[CST]](s8) + ; RV32I-NEXT: $x10 = COPY [[AEXT]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + + ; RV64I-LABEL: name: test_ret_i8 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV64I-NEXT: [[AEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[CST]](s8) + ; RV64I-NEXT: $x10 = COPY [[AEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i8 1 +} + +define zeroext i8 @test_ret_i8_zext() { + ; RV32I-LABEL: name: test_ret_i8_zext + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[CST]](s8) + ; RV32I-NEXT: $x10 = COPY [[ZEXT]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + + ; RV64I-LABEL: name: test_ret_i8_zext + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[CST]](s8) + ; RV64I-NEXT: $x10 = COPY [[ZEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i8 1 +} + +define signext i16 @test_ret_i16_sext() { + ; RV32I-LABEL: name: test_ret_i16_sext + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 + ; RV32I-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[CST]](s16) + ; RV32I-NEXT: $x10 = COPY [[SEXT]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + + ; RV64I-LABEL: name: test_ret_i16_sext + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 + ; RV64I-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[CST]](s16) + ; RV64I-NEXT: $x10 = COPY [[SEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i16 1 +} + +define i32 @test_ret_i32() { + ; RV32I-LABEL: name: test_ret_i32 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I-NEXT: $x10 = COPY [[CST]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + + ; RV64I-LABEL: name: test_ret_i32 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64I-NEXT: [[AEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[CST]](s32) + ; RV64I-NEXT: $x10 = COPY [[AEXT]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i32 1 +} + +define i64 @test_ret_i64() { + ; RV32I-LABEL: name: test_ret_i64 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967296 + ; RV32I-NEXT: [[CST1:%[0-9]+]]:_(s32), [[CST2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CST]](s64) + ; RV32I-NEXT: $x10 = COPY [[CST1]](s32) + ; RV32I-NEXT: $x11 = COPY [[CST2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11 + + ; RV64I-LABEL: name: test_ret_i64 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967296 + ; RV64I-NEXT: $x10 = COPY [[CST]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i64 4294967296 +} + +define i32* @test_ret_i32_ptr() { + ; RV32I-LABEL: name: test_ret_i32_ptr + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[UDEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF + ; RV32I-NEXT: $x10 = COPY [[UDEF]](p0) + ; RV32I-NEXT: PseudoRET implicit $x10 + + ; RV64I-LABEL: name: test_ret_i32_ptr + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[UDEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF + ; RV64I-NEXT: $x10 = COPY [[UDEF]](p0) + ; RV64I-NEXT: PseudoRET implicit $x10 +entry: + ret i32* undef +} + +define [2 x i32] @test_ret_2xi32() { + ; RV32I-LABEL: name: test_ret_2xi32 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I-NEXT: [[CST2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; RV32I-NEXT: $x10 = COPY [[CST1]](s32) + ; RV32I-NEXT: $x11 = COPY [[CST2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11 + + ; RV64I-LABEL: name: test_ret_2xi32 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64I-NEXT: [[CST2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; RV64I-NEXT: [[AEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[CST1]](s32) + ; RV64I-NEXT: [[AEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[CST2]](s32) + ; RV64I-NEXT: $x10 = COPY [[AEXT1]](s64) + ; RV64I-NEXT: $x11 = COPY [[AEXT2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11 +entry: + ret [2 x i32] [i32 1, i32 2] +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/calllowering-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/ret-roundtrip.ll rename from llvm/test/CodeGen/RISCV/GlobalISel/calllowering-ret.ll rename to llvm/test/CodeGen/RISCV/GlobalISel/ret-roundtrip.ll