diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -62,7 +62,8 @@ private: void ExpandICallBranchFunnel(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI); - + void expandCALL_RVMARKER(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI); bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); bool ExpandMBB(MachineBasicBlock &MBB); @@ -186,6 +187,78 @@ JTMBB->erase(JTInst); } +void X86ExpandPseudo::expandCALL_RVMARKER(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) { + // Expand CALL_RVMARKER pseudo to call instruction, followed by the special + //"movq %rax, %rdi" marker. + // TODO: Mark the sequence as bundle, to avoid passes moving other code + // in between. + MachineInstr &MI = *MBBI; + + MachineInstr *OriginalCall; + MachineOperand &CallTarget = MI.getOperand(1); + assert((CallTarget.isGlobal() || CallTarget.isReg()) && + "invalid operand for regular call"); + unsigned Opc = -1; + if (MI.getOpcode() == X86::CALL64m_RVMARKER) + Opc = X86::CALL64m; + else if (MI.getOpcode() == X86::CALL64r_RVMARKER) + Opc = X86::CALL64r; + else if (MI.getOpcode() == X86::CALL64pcrel32_RVMARKER) + Opc = X86::CALL64pcrel32; + else + llvm_unreachable("unexpected opcode"); + + OriginalCall = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)).getInstr(); + unsigned OpStart = 1; + bool RAXImplicitDead = false; + for (; OpStart < MI.getNumOperands(); ++OpStart) { + MachineOperand &Op = MI.getOperand(OpStart); + // RAX may be 'implicit dead', if there are no other users of the return + // value. We introduce a new use, so change it to 'implicit def'. + if (Op.isReg() && Op.isImplicit() && Op.isDead() && + TRI->regsOverlap(Op.getReg(), X86::RAX)) { + Op.setIsDead(false); + Op.setIsDef(true); + RAXImplicitDead = true; + } + OriginalCall->addOperand(Op); + } + + // Emit marker "movq %rax, %rdi". %rdi is not callee-saved, so it cannot be + // live across the earlier call. The call to the ObjC runtime function returns + // the first argument, so the value of %rax is unchanged after the ObjC + // runtime call. + auto *Marker = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(X86::MOV64rr)) + .addReg(X86::RDI, RegState::Define) + .addReg(X86::RAX) + .getInstr(); + if (MI.shouldUpdateCallSiteInfo()) + MBB.getParent()->moveCallSiteInfo(&MI, Marker); + + // Emit call to ObjC runtime. + unsigned RuntimeCallType = MI.getOperand(0).getImm(); + assert(RuntimeCallType <= 1 && "objc runtime call type must be 0 or 1"); + Module *M = MBB.getParent()->getFunction().getParent(); + auto &Context = M->getContext(); + auto *I8PtrTy = PointerType::get(IntegerType::get(Context, 8), 0); + FunctionCallee Fn = M->getOrInsertFunction( + RuntimeCallType == 0 ? "objc_retainAutoreleasedReturnValue" + : "objc_unsafeClaimAutoreleasedReturnValue", + FunctionType::get(I8PtrTy, {I8PtrTy}, false)); + const uint32_t *RegMask = + TRI->getCallPreservedMask(*MBB.getParent(), CallingConv::C); + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(X86::CALL64pcrel32)) + .addGlobalAddress(cast(Fn.getCallee()), 0, 0) + .addRegMask(RegMask) + .addReg(X86::RAX, + RegState::Implicit | + (RAXImplicitDead ? (RegState::Dead | RegState::Define) + : RegState::Define)) + .getInstr(); + MI.eraseFromParent(); +} + /// If \p MBBI is a pseudo instruction, this method expands /// it to the corresponding (sequence of) actual instruction(s). /// \returns true if \p MBBI has been expanded. @@ -521,6 +594,11 @@ MI.setDesc(TII->get(X86::TILEZERO)); return true; } + case X86::CALL64pcrel32_RVMARKER: + case X86::CALL64r_RVMARKER: + case X86::CALL64m_RVMARKER: + expandCALL_RVMARKER(MBB, MBBI); + return true; } llvm_unreachable("Previous switch has a fallthrough?"); } diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -76,6 +76,10 @@ /// Same as call except it adds the NoTrack prefix. NT_CALL, + // Pseudo for a OBJC call that gets emitted together with a special + // marker instruction. + CALL_RVMARKER, + /// X86 compare and logical compare instructions. CMP, FCMP, diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -28,6 +28,7 @@ #include "llvm/ADT/StringSwitch.h" #include "llvm/Analysis/BlockFrequencyInfo.h" #include "llvm/Analysis/EHPersonalities.h" +#include "llvm/Analysis/ObjCARCUtil.h" #include "llvm/Analysis/ProfileSummaryInfo.h" #include "llvm/Analysis/VectorUtils.h" #include "llvm/CodeGen/IntrinsicLowering.h" @@ -4430,9 +4431,27 @@ if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) { Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops); + } else if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) { + // Calls with a "clang.arc.attachedcall" bundle are special. They should be + // expanded to the call, directly followed by a special marker sequence and + // a call to a ObjC library function. Use the CALL_RVMARKER to do that. + assert(!isTailCall && + "tail calls cannot be marked with clang.arc.attachedcall"); + assert(Is64Bit && "clang.arc.attachedcall is only supported in 64bit mode"); + + // Add target constant to select ObjC runtime call just before the call + // target. RuntimeCallType == 0 selects objc_retainAutoreleasedReturnValue, + // RuntimeCallType == 0 selects objc_unsafeClaimAutoreleasedReturnValue when + // epxanding the pseudo. + unsigned RuntimeCallType = + objcarc::hasAttachedCallOpBundle(CLI.CB, true) ? 0 : 1; + Ops.insert(Ops.begin() + 1, + DAG.getTargetConstant(RuntimeCallType, dl, MVT::i32)); + Chain = DAG.getNode(X86ISD::CALL_RVMARKER, dl, NodeTys, Ops); } else { Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops); } + InFlag = Chain.getValue(1); DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); @@ -31285,6 +31304,7 @@ NODE_NAME_CASE(FLD) NODE_NAME_CASE(FST) NODE_NAME_CASE(CALL) + NODE_NAME_CASE(CALL_RVMARKER) NODE_NAME_CASE(BT) NODE_NAME_CASE(CMP) NODE_NAME_CASE(FCMP) diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -1197,6 +1197,12 @@ def : Pat<(X86call (i64 texternalsym:$dst)), (CALL64pcrel32 texternalsym:$dst)>; +def : Pat<(X86call_rvmarker (timm:$sel), (i64 texternalsym:$dst)), + (CALL64pcrel32_RVMARKER timm:$sel, texternalsym:$dst)>; +def : Pat<(X86call_rvmarker (timm:$sel), (i64 tglobaladdr:$dst)), + (CALL64pcrel32_RVMARKER timm:$sel, tglobaladdr:$dst)>; + + // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they // can never use callee-saved registers. That is the purpose of the GR64_TC // register classes. diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td --- a/llvm/lib/Target/X86/X86InstrControl.td +++ b/llvm/lib/Target/X86/X86InstrControl.td @@ -415,6 +415,22 @@ } } +let isPseudo = 1, isCall = 1, isCodeGenOnly = 1, + Uses = [RSP, SSP], + SchedRW = [WriteJump] in { + def CALL64m_RVMARKER : + PseudoI<(outs), (ins i32imm:$sel, i64mem:$dst), [(X86call_rvmarker timm:$sel, (loadi64 addr:$dst))]>, + Requires<[In64BitMode]>; + + def CALL64r_RVMARKER : + PseudoI<(outs), (ins i32imm:$sel, GR64:$dst), [(X86call_rvmarker timm:$sel, GR64:$dst)]>, + Requires<[In64BitMode]>; + + def CALL64pcrel32_RVMARKER : + PseudoI<(outs), (ins i32imm:$sel, i64i32imm_brtarget:$dst), []>, + Requires<[In64BitMode]>; +} + // Conditional tail calls are similar to the above, but they are branches // rather than barriers, and they use EFLAGS. let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1, diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -204,6 +204,11 @@ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; +def X86call_rvmarker : SDNode<"X86ISD::CALL_RVMARKER", SDT_X86Call, + [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, + SDNPVariadic]>; + + def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call, [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; diff --git a/llvm/test/CodeGen/X86/call-rv-marker.ll b/llvm/test/CodeGen/X86/call-rv-marker.ll --- a/llvm/test/CodeGen/X86/call-rv-marker.ll +++ b/llvm/test/CodeGen/X86/call-rv-marker.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=x86_64-apple-macosx -o - %s | FileCheck --check-prefix=CHECK %s +; RUN: llc -mtriple=x86_64-apple-macosx -verify-machineinstrs -o - %s | FileCheck --check-prefix=CHECK %s ; TODO: support marker generation with GlobalISel target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" @@ -23,16 +23,33 @@ @g = global i8* null, align 8 @fptr = global i8* ()* null, align 8 -define i8* @rv_marker_1() { -; CHECK-LABEL: rv_marker_1: +define i8* @rv_marker_1_retain() { +; CHECK-LABEL: rv_marker_1_retain: ; CHECK: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: callq _foo1 +; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue +; CHECK-NEXT: popq %rcx +; CHECK-NEXT: retq +; +entry: + %call = call i8* @foo1() [ "clang.arc.attachedcall"(i64 0) ] + ret i8* %call +} + +define i8* @rv_marker_1_claim() { +; CHECK-LABEL: rv_marker_1_claim: +; CHECK: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: callq _foo1 +; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_unsafeClaimAutoreleasedReturnValue ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: retq ; entry: - %call = call "rv_marker" i8* @foo1() + %call = call i8* @foo1() [ "clang.arc.attachedcall"(i64 1) ] ret i8* %call } @@ -45,13 +62,15 @@ ; CHECK-NEXT: adcl $0, %edi ; CHECK-NEXT: callq _foo0 ; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue +; CHECK-NEXT: movq %rax, %rdi ; CHECK-NEXT: popq %rax ; CHECK-NEXT: jmp _foo2 ; entry: %tobool.not = icmp eq i32 %c, 0 %.sink = select i1 %tobool.not, i32 2, i32 1 - %call1 = call "rv_marker" i8* @foo0(i32 %.sink) + %call1 = call i8* @foo0(i32 %.sink) [ "clang.arc.attachedcall"(i64 0) ] tail call void @foo2(i8* %call1) ret void } @@ -67,11 +86,13 @@ ; CHECK-NEXT: .cfi_offset %rbx, -24 ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: callq _foo1 +; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: Ltmp0: ; entry: - %call = call "rv_marker" i8* @foo1() + %call = call i8* @foo1() [ "clang.arc.attachedcall"(i64 0) ] invoke void @objc_object(i8* %call) #5 to label %invoke.cont unwind label %lpad @@ -98,13 +119,15 @@ ; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: Ltmp3: ; CHECK-NEXT: callq _foo1 +; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue ; CHECK-NEXT: Ltmp4: ; entry: %s = alloca %struct.S, align 1 %0 = getelementptr inbounds %struct.S, %struct.S* %s, i64 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %0) #2 - %call = invoke "rv_marker" i8* @foo1() + %call = invoke i8* @foo1() [ "clang.arc.attachedcall"(i64 0) ] to label %invoke.cont unwind label %lpad invoke.cont: ; preds = %entry @@ -135,12 +158,16 @@ resume { i8*, i32 } %.pn } +; TODO: This should use "callq *_fptr(%rip)". define i8* @rv_marker_5_indirect_call() { ; CHECK-LABEL: rv_marker_5_indirect_call ; CHECK: pushq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset %rbx, -16 -; CHECK-NEXT: callq *_fptr(%rip) +; CHECK-NEXT: movq _fptr(%rip), %rax +; CHECK-NEXT: callq *%rax +; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue ; CHECK-NEXT: movq %rax, %rbx ; CHECK-NEXT: movq %rax, %rdi ; CHECK-NEXT: callq _foo2 @@ -149,13 +176,13 @@ ; CHECK-NEXT: retq ; entry: - %0 = load i8* ()*, i8* ()** @fptr, align 8 - %call = call "rv_marker" i8* %0() + %lv = load i8* ()*, i8* ()** @fptr, align 8 + %call = call i8* %lv() [ "clang.arc.attachedcall"(i64 0) ] tail call void @foo2(i8* %call) ret i8* %call } -declare void @foo(i64, i64, i64) +declare i8* @foo(i64, i64, i64) define void @rv_marker_multiarg(i64 %a, i64 %b, i64 %c) { ; CHECK-LABEL: rv_marker_multiarg @@ -165,11 +192,28 @@ ; CHECK-NEXT: movq %rdx, %rdi ; CHECK-NEXT: movq %rax, %rdx ; CHECK-NEXT: callq _foo +; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq ; - call "rv_marker" void @foo(i64 %c, i64 %b, i64 %a) + %r = call i8* @foo(i64 %c, i64 %b, i64 %a) [ "clang.arc.attachedcall"(i64 0) ] ret void } +define void @test_nonlazybind() { +; CHECK-LABEL: _test_nonlazybind: +; CHECK: bb.0: +; CHECK-NEXT: pushq %rax +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: callq *_foo_nonlazybind@GOTPCREL(%rip) +; CHECK-NEXT: movq %rax, %rdi +; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue +; + %call1 = notail call i8* @foo_nonlazybind() [ "clang.arc.attachedcall"(i64 0) ] + ret void +} + +declare i8* @foo_nonlazybind() nonlazybind + declare i32 @__gxx_personality_v0(...) diff --git a/llvm/test/CodeGen/X86/expand-call-rvmarker.mir b/llvm/test/CodeGen/X86/expand-call-rvmarker.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/expand-call-rvmarker.mir @@ -0,0 +1,155 @@ +# RUN: llc -o - -run-pass=x86-pseudo -verify-machineinstrs %s | FileCheck %s + +--- | + target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + target triple = "x86_64-apple-macosx11.0.0" + + declare i8* @objc_retainAutoreleasedReturnValue(i8*) + declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8*) + declare i8* @fn() + + define void @test_objc_retainAutoreleaseReturnedValue() { + ret void + } + + define void @test_objc_unsafeClaimAutoreleasedReturnValue() { + ret void + } + + define void @test_objc_unsafeClaimAutoreleasedReturnValue_2_args() { + ret void + } + + define void @test_ret_void() { + ret void + } + +... +--- +# CHECK-LABEL: name: test_objc_retainAutoreleaseReturnedValue +# CHECK: bb.0 +# CHECK-NEXT: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp +# CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: CALL64pcrel32 @fn, csr_64, implicit $rsp, implicit $ssp, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax +# CHECK-NEXT: $rdi = MOV64rr $rax +# CHECK-NEXT: CALL64pcrel32 @objc_retainAutoreleasedReturnValue, csr_64, implicit $rsp, implicit $ssp, implicit-def $rax +# CHECK-NEXT: $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp +# CHECK-NEXT: RETQ +# + +name: test_objc_retainAutoreleaseReturnedValue +alignment: 16 +tracksRegLiveness: true +frameInfo: + stackSize: 8 + offsetAdjustment: -8 + maxAlignment: 1 + adjustsStack: true + hasCalls: true + maxCallFrameSize: 0 +machineFunctionInfo: {} +body: | + bb.0 (%ir-block.0): + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp + CFI_INSTRUCTION def_cfa_offset 16 + CALL64pcrel32_RVMARKER 0, @fn, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax + $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp + RET 0, $rax + +... +--- +# CHECK-LABEL: name: test_objc_unsafeClaimAutoreleasedReturnValue +# CHECK: bb.0 +# CHECK-NEXT: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp +# CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: CALL64pcrel32 @fn, csr_64, implicit $rsp, implicit $ssp, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax +# CHECK-NEXT: $rdi = MOV64rr $rax +# CHECK-NEXT: CALL64pcrel32 @objc_unsafeClaimAutoreleasedReturnValue, csr_64, implicit $rsp, implicit $ssp, implicit-def $rax +# CHECK-NEXT: $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp +# CHECK-NEXT: RETQ +# +name: test_objc_unsafeClaimAutoreleasedReturnValue +alignment: 16 +tracksRegLiveness: true +frameInfo: + stackSize: 8 + offsetAdjustment: -8 + maxAlignment: 1 + adjustsStack: true + hasCalls: true + maxCallFrameSize: 0 +machineFunctionInfo: {} +body: | + bb.0 (%ir-block.0): + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp + CFI_INSTRUCTION def_cfa_offset 16 + CALL64pcrel32_RVMARKER 1, @fn, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax + $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp + RET 0, $rax + +... +--- +# CHECK-LABEL: name: test_objc_unsafeClaimAutoreleasedReturnValue_2_args +# CHECK: bb.0 +# CHECK: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp +# CHECK-NEXT: $rax = MOV64rr $rdi +# CHECK-NEXT: $rdi = MOV64rr killed $rdx +# CHECK-NEXT: $rdx = MOV64rr killed $rax +# CHECK-NEXT: CALL64pcrel32 @fn, csr_64, implicit $rsp, implicit $ssp, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def $rax +# CHECK-NEXT: $rdi = MOV64rr $rax +# CHECK-NEXT: CALL64pcrel32 @objc_retainAutoreleasedReturnValue, csr_64, implicit $rsp, implicit $ssp, implicit-def dead $rax +# CHECK-NEXT: $rax = frame-destroy POP64r implicit-def $rsp, implicit $rsp +# CHECK-NEXT: RETQ +# +name: test_objc_unsafeClaimAutoreleasedReturnValue_2_args +alignment: 16 +tracksRegLiveness: true +frameInfo: + stackSize: 8 + offsetAdjustment: -8 + maxAlignment: 1 + adjustsStack: true + hasCalls: true + maxCallFrameSize: 0 +machineFunctionInfo: {} +body: | + bb.0 (%ir-block.0): + liveins: $rdi, $rdx, $rsi + + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp + $rax = MOV64rr $rdi + $rdi = MOV64rr killed $rdx + $rdx = MOV64rr killed $rax + CALL64pcrel32_RVMARKER 0, @fn, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax + $rax = frame-destroy POP64r implicit-def $rsp, implicit $rsp + RET 0, $rax + +... +# CHECK-LABEL: name: test_ret_void +# CHECK: bb.0 +# CHECK-NEXT: frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp +# CHECK-NEXT: CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: CALL64pcrel32 @fn, csr_64, implicit $rsp, implicit $ssp, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax +# CHECK-NEXT: $rdi = MOV64rr $rax +# CHECK-NEXT: CALL64pcrel32 @objc_retainAutoreleasedReturnValue, csr_64, implicit $rsp, implicit $ssp, implicit-def dead $rax +# CHECK-NEXT: $rax = frame-destroy POP64r implicit-def $rsp, implicit $rsp +# CHECK-NEXT: RETQ +# +name: test_ret_void +alignment: 16 +tracksRegLiveness: true +frameInfo: + stackSize: 8 + offsetAdjustment: -8 + maxAlignment: 1 + adjustsStack: true + hasCalls: true + maxCallFrameSize: 0 +machineFunctionInfo: {} +body: | + bb.0 (%ir-block.0): + frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp + CFI_INSTRUCTION def_cfa_offset 16 + CALL64pcrel32_RVMARKER 0, @fn, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax + $rax = frame-destroy POP64r implicit-def $rsp, implicit $rsp + RET 0