diff --git a/llvm/lib/Target/RISCV/RISCVCallLowering.h b/llvm/lib/Target/RISCV/RISCVCallLowering.h --- a/llvm/lib/Target/RISCV/RISCVCallLowering.h +++ b/llvm/lib/Target/RISCV/RISCVCallLowering.h @@ -50,7 +50,8 @@ template void setISDArgsForCallingConv(const Function &F, const ArgInfo &OrigArg, SmallVectorImpl &SplitVTs, - SmallVectorImpl &ISDArgs, bool isRet) const; + SmallVectorImpl &ISDArgs, CallingConv::ID CC, + bool isRet) const; void splitToValueTypes(const ArgInfo &OrigArg, SmallVectorImpl &SplitArgs, diff --git a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp @@ -70,6 +70,21 @@ } }; +struct RISCVCallReturnHandler : public RISCVIncomingValueHandler { + RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, + MachineInstrBuilder &MIB) + : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {} + + MachineInstrBuilder MIB; + + void assignValueToReg(Register ValVReg, Register PhysReg, + CCValAssign VA) override { + // Copy argument received in physical register to desired VReg. + MIB.addDef(PhysReg, RegState::Implicit); + MIRBuilder.buildCopy(ValVReg, PhysReg); + } +}; + } // namespace RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI) @@ -106,7 +121,8 @@ }); SmallVector Outs; - setISDArgsForCallingConv(F, OrigRetInfo, SplitEVTs, Outs, true); + setISDArgsForCallingConv(F, OrigRetInfo, SplitEVTs, Outs, F.getCallingConv(), + true); SmallVector ArgLocs; CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); @@ -173,7 +189,8 @@ assert(VRegs[Index].size() == SplitEVTs.size() && "For each split Type there should be exactly one VReg."); - setISDArgsForCallingConv(F, AInfo, SplitEVTs, Ins, /*isRet=*/false); + setISDArgsForCallingConv(F, AInfo, SplitEVTs, Ins, F.getCallingConv(), + /*isRet=*/false); // Handle any required merging from split value types - as indicated in // SplitEVTs - from physical registers into the desired VReg. ArgInfo @@ -206,18 +223,128 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const { - return false; + + MachineFunction &MF = MIRBuilder.getMF(); + const Function &F = MF.getFunction(); + const DataLayout &DL = MF.getDataLayout(); + const RISCVTargetLowering &TLI = *getTLI(); + + // TODO: Support vararg functions. + if (Info.IsVarArg) + return false; + + // TODO: Support all argument types. + for (auto &AInfo : Info.OrigArgs) { + if (AInfo.Ty->isIntegerTy()) + continue; + if (AInfo.Ty->isPointerTy()) + continue; + if (AInfo.Ty->isFloatingPointTy()) + continue; + return false; + } + + SmallVector SplitArgInfos; + SmallVector Outs; + unsigned Index = 0; + for (auto &AInfo : Info.OrigArgs) { + SmallVector SplitEVTs; + ComputeValueVTs(TLI, DL, AInfo.Ty, SplitEVTs); + assert(AInfo.Regs.size() == SplitEVTs.size() && + "For each split Type there should be exactly one VReg."); + + setISDArgsForCallingConv(F, AInfo, SplitEVTs, Outs, Info.CallConv, + /*isRet=*/false); + + // Handle any required unmerging of split value types - as indicated in + // SplitEVTs - from a given VReg into physical registers. ArgInfo objects + // are constructed correspondingly and appended to SplitArgInfos. + splitToValueTypes(AInfo, SplitArgInfos, SplitEVTs, MF, + [&](ArrayRef Regs, int SplitIdx) { + MIRBuilder.buildUnmerge(Regs, AInfo.Regs[SplitIdx]); + }); + + ++Index; + } + + SmallVector ArgLocs; + CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); + + TLI.analyzeOutputArgs(MF, CCInfo, Outs, /*IsRet=*/false, nullptr, + Info.CallConv == CallingConv::Fast + ? RISCV::CC_RISCV_FastCC + : RISCV::CC_RISCV); + updateArgLocInfo(ArgLocs, Outs); + + if (!Info.Callee.isReg()) + Info.Callee.setTargetFlags(RISCVII::MO_CALL); + + MachineInstrBuilder Call = + MIRBuilder + .buildInstrNoInsert(Info.Callee.isReg() ? RISCV::PseudoCALLIndirect + : RISCV::PseudoCALL) + .add(Info.Callee); + + RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call); + if (!handleAssignments(ArgHandler, SplitArgInfos, CCInfo, ArgLocs, + MIRBuilder)) + return false; + + MIRBuilder.insertInstr(Call); + + if (Info.OrigRet.Ty->isVoidTy()) + return true; + + // TODO: Only integer, pointer and aggregate types are supported now. + if (!Info.OrigRet.Ty->isIntOrPtrTy() && !Info.OrigRet.Ty->isAggregateType()) + return false; + + SmallVector SplitRetEVTs; + ComputeValueVTs(TLI, DL, Info.OrigRet.Ty, SplitRetEVTs); + assert(Info.OrigRet.Regs.size() == SplitRetEVTs.size() && + "For each split Type there should be exactly one VReg."); + + SmallVector SplitRetInfos; + splitToValueTypes(Info.OrigRet, SplitRetInfos, SplitRetEVTs, MF, + [&](ArrayRef Regs, int SplitIdx) { + MIRBuilder.buildMerge(Info.OrigRet.Regs[SplitIdx], Regs); + }); + + SmallVector Ins; + setISDArgsForCallingConv(F, Info.OrigRet, SplitRetEVTs, Ins, Info.CallConv, + /*isRet=*/true); + + SmallVector RVLocs; + CCState RetCCInfo(Info.CallConv, Info.IsVarArg, MF, RVLocs, F.getContext()); + + TLI.analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, + Info.CallConv == CallingConv::Fast + ? RISCV::CC_RISCV_FastCC + : RISCV::CC_RISCV); + updateArgLocInfo(RVLocs, Ins); + + // Assignments should be handled *before* the merging of values takes place. + // To ensure this, the insert point is temporarily adjusted to just after the + // call instruction. + MachineBasicBlock::iterator CallInsertPt = Call; + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), std::next(CallInsertPt)); + + RISCVCallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), Call); + if (!handleAssignments(Handler, SplitRetInfos, RetCCInfo, RVLocs, MIRBuilder)) + return false; + + // Readjust insert point to end of basic block. + MIRBuilder.setMBB(MIRBuilder.getMBB()); + + return true; } template -void RISCVCallLowering::setISDArgsForCallingConv(const Function &F, - const ArgInfo &OrigArg, - SmallVectorImpl &SplitVTs, - SmallVectorImpl &ISDArgs, - bool isRet) const { +void RISCVCallLowering::setISDArgsForCallingConv( + const Function &F, const ArgInfo &OrigArg, SmallVectorImpl &SplitVTs, + SmallVectorImpl &ISDArgs, CallingConv::ID CC, bool isRet) const { const DataLayout &DL = F.getParent()->getDataLayout(); LLVMContext &Ctx = F.getContext(); - CallingConv::ID CC = F.getCallingConv(); const RISCVTargetLowering &TLI = *getTLI(); for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) { diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll @@ -0,0 +1,265 @@ +; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + +declare void @void_noargs() + +define void @test_call_void_noargs() { + ; RV32I-LABEL: name: test_call_void_noargs + ; RV32I: bb.1.entry: + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_noargs, implicit-def $x1 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_void_noargs + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_noargs, implicit-def $x1 + ; RV64I-NEXT: PseudoRET +entry: + call void @void_noargs() + ret void +} + +declare void @void_args_i8(i8, i8) + +define void @test_call_void_args_i8() { + ; RV32I-LABEL: name: test_call_void_args_i8 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST1:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32I-NEXT: [[CST2:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV32I-NEXT: [[AEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[CST1]](s8) + ; RV32I-NEXT: [[AEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[CST2]](s8) + ; RV32I-NEXT: $x10 = COPY [[AEXT1]](s32) + ; RV32I-NEXT: $x11 = COPY [[AEXT2]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8, implicit-def $x1, implicit $x10, implicit $x11 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_void_args_i8 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST1:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64I-NEXT: [[CST2:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV64I-NEXT: [[AEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[CST1]](s8) + ; RV64I-NEXT: [[AEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[CST2]](s8) + ; RV64I-NEXT: $x10 = COPY [[AEXT1]](s64) + ; RV64I-NEXT: $x11 = COPY [[AEXT2]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8, implicit-def $x1, implicit $x10, implicit $x11 + ; RV64I-NEXT: PseudoRET +entry: + call void @void_args_i8(i8 0, i8 1) + ret void +} + +declare void @void_args_i8_zext(i8 zeroext, i8 zeroext) + +define void @test_call_void_args_i8_zext() { + ; RV32I-LABEL: name: test_call_void_args_i8_zext + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST1:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV32I-NEXT: [[CST2:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV32I-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[CST1]](s8) + ; RV32I-NEXT: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[CST2]](s8) + ; RV32I-NEXT: $x10 = COPY [[ZEXT1]](s32) + ; RV32I-NEXT: $x11 = COPY [[ZEXT2]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8_zext, implicit-def $x1, implicit $x10, implicit $x11 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_void_args_i8_zext + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST1:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 + ; RV64I-NEXT: [[CST2:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[CST1]](s8) + ; RV64I-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[CST2]](s8) + ; RV64I-NEXT: $x10 = COPY [[ZEXT1]](s64) + ; RV64I-NEXT: $x11 = COPY [[ZEXT2]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8_zext, implicit-def $x1, implicit $x10, implicit $x11 + ; RV64I-NEXT: PseudoRET +entry: + call void @void_args_i8_zext(i8 zeroext 0, i8 zeroext 1) + ret void +} + +declare void @void_args_i16_sext(i16 signext, i16 signext) + +define void @test_call_void_args_i16_sext() { + ; RV32I-LABEL: name: test_call_void_args_i16_sext + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST1:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV32I-NEXT: [[CST2:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 + ; RV32I-NEXT: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[CST1]](s16) + ; RV32I-NEXT: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT [[CST2]](s16) + ; RV32I-NEXT: $x10 = COPY [[SEXT1]](s32) + ; RV32I-NEXT: $x11 = COPY [[SEXT2]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i16_sext, implicit-def $x1, implicit $x10, implicit $x11 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_void_args_i16_sext + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST1:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; RV64I-NEXT: [[CST2:%[0-9]+]]:_(s16) = G_CONSTANT i16 1 + ; RV64I-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[CST1]](s16) + ; RV64I-NEXT: [[SEXT2:%[0-9]+]]:_(s64) = G_SEXT [[CST2]](s16) + ; RV64I-NEXT: $x10 = COPY [[SEXT1]](s64) + ; RV64I-NEXT: $x11 = COPY [[SEXT2]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i16_sext, implicit-def $x1, implicit $x10, implicit $x11 + ; RV64I-NEXT: PseudoRET +entry: + call void @void_args_i16_sext(i16 signext 0, i16 signext 1) + ret void +} + +declare void @void_args_i32(i32, i32) + +define void @test_call_void_args_i32() { + ; RV32I-LABEL: name: test_call_void_args_i32 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32I-NEXT: [[CST2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I-NEXT: $x10 = COPY [[CST1]](s32) + ; RV32I-NEXT: $x11 = COPY [[CST2]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i32, implicit-def $x1, implicit $x10, implicit $x11 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_void_args_i32 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV64I-NEXT: [[CST2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV64I-NEXT: [[AEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[CST1]](s32) + ; RV64I-NEXT: [[AEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[CST2]](s32) + ; RV64I-NEXT: $x10 = COPY [[AEXT1]](s64) + ; RV64I-NEXT: $x11 = COPY [[AEXT2]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i32, implicit-def $x1, implicit $x10, implicit $x11 + ; RV64I-NEXT: PseudoRET +entry: + call void @void_args_i32(i32 0, i32 1) + ret void +} + +declare void @void_args_i64(i64, i64) + +define void @test_call_void_args_i64() { + ; RV32I-LABEL: name: test_call_void_args_i64 + ; RV32I: bb.1.entry: + ; RV32I-NEXT: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV32I-NEXT: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV32I-NEXT: [[CST1_1:%[0-9]+]]:_(s32), [[CST1_2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CST1]](s64) + ; RV32I-NEXT: [[CST2_1:%[0-9]+]]:_(s32), [[CST2_2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CST2]](s64) + ; RV32I-NEXT: $x10 = COPY [[CST1_1]](s32) + ; RV32I-NEXT: $x11 = COPY [[CST1_2]](s32) + ; RV32I-NEXT: $x12 = COPY [[CST2_1]](s32) + ; RV32I-NEXT: $x13 = COPY [[CST2_2]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_void_args_i64 + ; RV64I: bb.1.entry: + ; RV64I-NEXT: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64I-NEXT: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64I-NEXT: $x10 = COPY [[CST1]](s64) + ; RV64I-NEXT: $x11 = COPY [[CST2]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i64, implicit-def $x1, implicit $x10, implicit $x11 + ; RV64I-NEXT: PseudoRET +entry: + call void @void_args_i64(i64 0, i64 1) + ret void +} + +declare i8 @i8_noargs() + +define void @test_call_i8_noargs() { + ; RV32I-LABEL: name: test_call_i8_noargs + ; RV32I: bb.1.entry: + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i8_noargs, implicit-def $x1 + ; RV32I-NEXT: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG:%[0-9]+]]:_(s8) = G_TRUNC [[VREG1]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_i8_noargs + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i8_noargs, implicit-def $x1 + ; RV64I-NEXT: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG:%[0-9]+]]:_(s8) = G_TRUNC [[VREG1]](s64) + ; RV64I-NEXT: PseudoRET +entry: + %a = call i8 @i8_noargs() + ret void +} + +declare i16 @i16_noargs() + +define void @test_call_i16_noargs() { + ; RV32I-LABEL: name: test_call_i16_noargs + ; RV32I: bb.1.entry: + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i16_noargs, implicit-def $x1 + ; RV32I-NEXT: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG:%[0-9]+]]:_(s16) = G_TRUNC [[VREG1]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_i16_noargs + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i16_noargs, implicit-def $x1 + ; RV64I-NEXT: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG:%[0-9]+]]:_(s16) = G_TRUNC [[VREG1]](s64) + ; RV64I-NEXT: PseudoRET +entry: + %a = call i16 @i16_noargs() + ret void +} + +declare i32 @i32_noargs() + +define void @test_call_i32_noargs() { + ; RV32I-LABEL: name: test_call_i32_noargs + ; RV32I: PseudoCALL target-flags(riscv-call) @i32_noargs, implicit-def $x1 + ; RV32I-NEXT: [[VREG:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_i32_noargs + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i32_noargs, implicit-def $x1 + ; RV64I-NEXT: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG:%[0-9]+]]:_(s32) = G_TRUNC [[VREG1]](s64) + ; RV64I-NEXT: PseudoRET +entry: + %a = call i32 @i32_noargs() + ret void +} + +declare i64 @i64_noargs() + +define void @test_call_i64_noargs() { + ; RV32I-LABEL: name: test_call_i64_noargs + ; RV32I: bb.1.entry: + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i64_noargs, implicit-def $x1 + ; RV32I-NEXT: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREG1]](s32), [[VREG2]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_i64_noargs + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i64_noargs, implicit-def $x1 + ; RV64I-NEXT: [[VREG:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: PseudoRET +entry: + %a = call i64 @i64_noargs() + ret void +} + +declare i32* @i32_ptr_noargs() + +define void @test_call_i32_ptr_noargs() { +entry: + ; RV32I-LABEL: name: test_call_i32_ptr_noargs + ; RV32I: bb.1.entry: + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i32_ptr_noargs, implicit-def $x1 + ; RV32I-NEXT: [[VREG:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32I-NEXT: PseudoRET + + ; RV64I-LABEL: name: test_call_i32_ptr_noargs + ; RV64I: bb.1.entry: + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i32_ptr_noargs, implicit-def $x1 + ; RV64I-NEXT: [[VREG:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64I-NEXT: PseudoRET + %a = call i32* @i32_ptr_noargs() + ret void +}