Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3343,9 +3343,15 @@ assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); } + // We can omit callseq_start/callseq_end if there is no callframe to setup. + // Do not omit for patchpoints as SelectionDAGBuilder::visitPatchpoint() + // currently expects it. + bool OmitCallSeq = NumBytes == 0 && !CLI.IsPatchPoint; + assert((!IsSibCall || OmitCallSeq) && "Should not get callseq for sibcalls"); + // Adjust the stack pointer for the new arguments... // These operations are automatically eliminated by the prolog/epilog pass - if (!IsSibCall) + if (!OmitCallSeq) Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, @@ -3511,7 +3517,7 @@ // the frame up *after* the call, however in the ABI-changing tail-call case // we've carefully laid out the parameters so that when sp is reset they'll be // in the correct location. - if (IsTailCall && !IsSibCall) { + if (IsTailCall && !OmitCallSeq) { Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), DAG.getIntPtrConstant(0, DL, true), InFlag, DL); InFlag = Chain.getValue(1); @@ -3569,9 +3575,11 @@ uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0; - Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), - DAG.getIntPtrConstant(CalleePopBytes, DL, true), - InFlag, DL); + if (!OmitCallSeq) + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), + DAG.getIntPtrConstant(CalleePopBytes, DL, true), + InFlag, DL); + if (!Ins.empty()) InFlag = Chain.getValue(1); Index: llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-hello.ll @@ -5,9 +5,9 @@ ; CHECK: sub sp, sp, #32 ; CHECK-NEXT: stp x29, x30, [sp, #16] ; CHECK-NEXT: add x29, sp, #16 -; CHECK-NEXT: stur wzr, [x29, #-4] ; CHECK: adrp x0, l_.str@PAGE ; CHECK: add x0, x0, l_.str@PAGEOFF +; CHECK-NEXT: stur wzr, [x29, #-4] ; CHECK-NEXT: bl _puts ; CHECK-NEXT: ldp x29, x30, [sp, #16] ; CHECK-NEXT: add sp, sp, #32 @@ -15,9 +15,9 @@ ; CHECK-LINUX-LABEL: main: ; CHECK-LINUX: str x30, [sp, #-16]! -; CHECK-LINUX-NEXT: str wzr, [sp, #12] ; CHECK-LINUX: adrp x0, .L.str ; CHECK-LINUX: add x0, x0, :lo12:.L.str +; CHECK-LINUX-NEXT: str wzr, [sp, #12] ; CHECK-LINUX-NEXT: bl puts ; CHECK-LINUX-NEXT: ldr x30, [sp], #16 ; CHECK-LINUX-NEXT: ret Index: llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-shrink-wrapping.ll @@ -22,10 +22,10 @@ ; DISABLE: cmp w0, w1 ; DISABLE-NEXT: b.ge [[EXIT_LABEL:LBB[0-9_]+]] ; -; Store %a in the alloca. -; CHECK: stur w0, {{\[}}[[SAVE_SP]], #-4] ; Set the alloca address in the second argument. -; CHECK-NEXT: sub x1, [[SAVE_SP]], #4 +; CHECK: sub x1, [[SAVE_SP]], #4 +; Store %a in the alloca. +; CHECK-NEXT: stur w0, {{\[}}[[SAVE_SP]], #-4] ; Set the first argument to zero. ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: bl _doSomething Index: llvm/trunk/test/CodeGen/AArch64/big-byval.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/big-byval.ll +++ llvm/trunk/test/CodeGen/AArch64/big-byval.ll @@ -0,0 +1,13 @@ +; RUN: llc -o - %s -verify-machineinstrs | FileCheck %s +target triple = "aarch64--" + +; Make sure we don't fail machine verification because the memcpy callframe +; setup is nested inside the extfunc callframe setup. +; CHECK-LABEL: func: +; CHECK: bl memcpy +; CHECK: bl extfunc +declare void @extfunc([4096 x i64]* byval %p) +define void @func([4096 x i64]* %z) { + call void @extfunc([4096 x i64]* byval %z) + ret void +} Index: llvm/trunk/test/CodeGen/AArch64/func-calls.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/func-calls.ll +++ llvm/trunk/test/CodeGen/AArch64/func-calls.ll @@ -63,10 +63,10 @@ store [2 x i64] %arr, [2 x i64]* @varsmallstruct ; CHECK: bl return_smallstruct ; CHECK: add x[[VARSMALLSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varsmallstruct +; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct ; CHECK: stp x0, x1, [x[[VARSMALLSTRUCT]]] call void @return_large_struct(%myStruct* sret @varstruct) -; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct ; CHECK: bl return_large_struct ret void Index: llvm/trunk/test/CodeGen/AArch64/nontemporal.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/nontemporal.ll +++ llvm/trunk/test/CodeGen/AArch64/nontemporal.ll @@ -313,8 +313,8 @@ define void @test_stnp_v4f32_offset_alloca(<4 x float> %v) #0 { ; CHECK-LABEL: test_stnp_v4f32_offset_alloca: -; CHECK: stnp d0, d{{.*}}, [sp] -; CHECK-NEXT: mov x0, sp +; CHECK: mov x0, sp +; CHECK-NEXT: stnp d0, d{{.*}}, [sp] ; CHECK-NEXT: bl _dummy %tmp0 = alloca <4 x float> store <4 x float> %v, <4 x float>* %tmp0, align 1, !nontemporal !0 @@ -324,8 +324,8 @@ define void @test_stnp_v4f32_offset_alloca_2(<4 x float> %v) #0 { ; CHECK-LABEL: test_stnp_v4f32_offset_alloca_2: -; CHECK: stnp d0, d{{.*}}, [sp, #16] -; CHECK-NEXT: mov x0, sp +; CHECK: mov x0, sp +; CHECK-NEXT: stnp d0, d{{.*}}, [sp, #16] ; CHECK-NEXT: bl _dummy %tmp0 = alloca <4 x float>, i32 2 %tmp1 = getelementptr <4 x float>, <4 x float>* %tmp0, i32 1 Index: llvm/trunk/test/CodeGen/AArch64/swifterror.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/swifterror.ll +++ llvm/trunk/test/CodeGen/AArch64/swifterror.ll @@ -223,8 +223,8 @@ ; parameter. define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) { ; CHECK-APPLE-LABEL: foo_sret: -; CHECK-APPLE: mov [[SRET:x[0-9]+]], x8 ; CHECK-APPLE: orr w0, wzr, #0x10 +; CHECK-APPLE: mov [[SRET:x[0-9]+]], x8 ; CHECK-APPLE: malloc ; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1 ; CHECK-APPLE: strb [[ID]], [x0, #8] @@ -406,7 +406,7 @@ ret float %0 } -; CHECK-APPLE-LABEL: swifterror_clobber +; CHECK-APPLE-LABEL: swifterror_clobber: ; CHECK-APPLE: mov [[REG:x[0-9]+]], x21 ; CHECK-APPLE: nop ; CHECK-APPLE: mov x21, [[REG]] @@ -415,7 +415,7 @@ ret void } -; CHECK-APPLE-LABEL: swifterror_reg_clobber +; CHECK-APPLE-LABEL: swifterror_reg_clobber: ; CHECK-APPLE: stp {{.*}}x21 ; CHECK-APPLE: nop ; CHECK-APPLE: ldp {{.*}}x21 @@ -423,7 +423,7 @@ call void asm sideeffect "nop", "~{x21}"() ret void } -; CHECK-APPLE-LABEL: params_in_reg +; CHECK-APPLE-LABEL: params_in_reg: ; Save callee saved registers and swifterror since it will be clobbered by the first call to params_in_reg2. ; CHECK-APPLE: stp x21, x28, [sp ; CHECK-APPLE: stp x27, x26, [sp @@ -431,16 +431,15 @@ ; CHECK-APPLE: stp x23, x22, [sp ; CHECK-APPLE: stp x20, x19, [sp ; CHECK-APPLE: stp x29, x30, [sp -; CHECK-APPLE: str x20, [sp +; CHECK-APPLE: str x7, [sp ; Store argument registers. -; CHECK-APPLE: mov x23, x7 -; CHECK-APPLE: mov x24, x6 -; CHECK-APPLE: mov x25, x5 -; CHECK-APPLE: mov x26, x4 -; CHECK-APPLE: mov x27, x3 -; CHECK-APPLE: mov x28, x2 -; CHECK-APPLE: mov x19, x1 -; CHECK-APPLE: mov x22, x0 +; CHECK-APPLE: mov x23, x6 +; CHECK-APPLE: mov x24, x5 +; CHECK-APPLE: mov x25, x4 +; CHECK-APPLE: mov x26, x3 +; CHECK-APPLE: mov x27, x2 +; CHECK-APPLE: mov x28, x1 +; CHECK-APPLE: mov x19, x0 ; Setup call. ; CHECK-APPLE: orr w0, wzr, #0x1 ; CHECK-APPLE: orr w1, wzr, #0x2 @@ -450,20 +449,20 @@ ; CHECK-APPLE: orr w5, wzr, #0x6 ; CHECK-APPLE: orr w6, wzr, #0x7 ; CHECK-APPLE: orr w7, wzr, #0x8 +; CHECK-APPLE: mov x22, x20 ; CHECK-APPLE: mov x20, xzr ; CHECK-APPLE: mov x21, xzr ; CHECK-APPLE: bl _params_in_reg2 ; Restore original arguments for next call. -; CHECK-APPLE: mov x0, x22 -; CHECK-APPLE: mov x1, x19 -; CHECK-APPLE: mov x2, x28 -; CHECK-APPLE: mov x3, x27 -; CHECK-APPLE: mov x4, x26 -; CHECK-APPLE: mov x5, x25 -; CHECK-APPLE: mov x6, x24 -; CHECK-APPLE: mov x7, x23 +; CHECK-APPLE: mov x0, x19 +; CHECK-APPLE: mov x1, x28 +; CHECK-APPLE: mov x2, x27 +; CHECK-APPLE: mov x3, x26 +; CHECK-APPLE: mov x4, x25 +; CHECK-APPLE: mov x5, x24 ; Restore original swiftself argument and swifterror %err. -; CHECK-APPLE: ldp x20, x21, [sp +; CHECK-APPLE: ldp x7, x21, [sp +; CHECK-APPLE: mov x20, x22 ; CHECK-APPLE: bl _params_in_reg2 ; Restore calle save registers but don't clober swifterror x21. ; CHECK-APPLE-NOT: x21 @@ -489,9 +488,9 @@ } declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err) -; CHECK-APPLE-LABEL: params_and_return_in_reg +; CHECK-APPLE-LABEL: params_and_return_in_reg: ; Store callee saved registers. -; CHECK-APPLE: stp x20, x28, [sp, #24 +; CHECK-APPLE: stp x7, x28, [sp, #24 ; CHECK-APPLE: stp x27, x26, [sp ; CHECK-APPLE: stp x25, x24, [sp ; CHECK-APPLE: stp x23, x22, [sp @@ -499,14 +498,13 @@ ; CHECK-APPLE: stp x29, x30, [sp ; Save original arguments. ; CHECK-APPLE: mov x23, x21 -; CHECK-APPLE: str x7, [sp, #16] -; CHECK-APPLE: mov x24, x6 -; CHECK-APPLE: mov x25, x5 -; CHECK-APPLE: mov x26, x4 -; CHECK-APPLE: mov x27, x3 -; CHECK-APPLE: mov x28, x2 -; CHECK-APPLE: mov x19, x1 -; CHECK-APPLE: mov x22, x0 +; CHECK-APPLE: str x6, [sp, #16] +; CHECK-APPLE: mov x24, x5 +; CHECK-APPLE: mov x25, x4 +; CHECK-APPLE: mov x26, x3 +; CHECK-APPLE: mov x27, x2 +; CHECK-APPLE: mov x28, x1 +; CHECK-APPLE: mov x19, x0 ; Setup call arguments. ; CHECK-APPLE: orr w0, wzr, #0x1 ; CHECK-APPLE: orr w1, wzr, #0x2 @@ -516,24 +514,26 @@ ; CHECK-APPLE: orr w5, wzr, #0x6 ; CHECK-APPLE: orr w6, wzr, #0x7 ; CHECK-APPLE: orr w7, wzr, #0x8 +; CHECK-APPLE: mov x22, x20 ; CHECK-APPLE: mov x20, xzr ; CHECK-APPLE: mov x21, xzr ; CHECK-APPLE: bl _params_in_reg2 ; Store swifterror %error_ptr_ref. ; CHECK-APPLE: str x21, [sp, #8] ; Setup call arguments from original arguments. -; CHECK-APPLE: mov x0, x22 -; CHECK-APPLE: mov x1, x19 -; CHECK-APPLE: mov x2, x28 -; CHECK-APPLE: mov x3, x27 -; CHECK-APPLE: mov x4, x26 -; CHECK-APPLE: mov x5, x25 -; CHECK-APPLE: mov x6, x24 -; CHECK-APPLE: ldp x7, x20, [sp, #16] +; CHECK-APPLE: mov x0, x19 +; CHECK-APPLE: mov x1, x28 +; CHECK-APPLE: mov x2, x27 +; CHECK-APPLE: mov x3, x26 +; CHECK-APPLE: mov x4, x25 +; CHECK-APPLE: mov x5, x24 +; CHECK-APPLE: ldp x6, x7, [sp, #16] +; CHECK-APPLE: mov x20, x22 ; CHECK-APPLE: mov x21, x23 ; CHECK-APPLE: bl _params_and_return_in_reg2 +; Save swifterror %err. +; CHECK-APPLE: str x0, [sp, #24] ; Store return values. -; CHECK-APPLE: mov x19, x0 ; CHECK-APPLE: mov x22, x1 ; CHECK-APPLE: mov x24, x2 ; CHECK-APPLE: mov x25, x3 @@ -541,8 +541,6 @@ ; CHECK-APPLE: mov x27, x5 ; CHECK-APPLE: mov x28, x6 ; CHECK-APPLE: mov x23, x7 -; Save swifterror %err. -; CHECK-APPLE: str x21, [sp, #24] ; Setup call. ; CHECK-APPLE: orr w0, wzr, #0x1 ; CHECK-APPLE: orr w1, wzr, #0x2 @@ -552,12 +550,12 @@ ; CHECK-APPLE: orr w5, wzr, #0x6 ; CHECK-APPLE: orr w6, wzr, #0x7 ; CHECK-APPLE: orr w7, wzr, #0x8 +; CHECK-APPLE: mov x19, x21 ; CHECK-APPLE: mov x20, xzr ; ... setup call with swiferror %error_ptr_ref. ; CHECK-APPLE: ldr x21, [sp, #8] ; CHECK-APPLE: bl _params_in_reg2 ; Restore return values for return from this function. -; CHECK-APPLE: mov x0, x19 ; CHECK-APPLE: mov x1, x22 ; CHECK-APPLE: mov x2, x24 ; CHECK-APPLE: mov x3, x25 @@ -565,13 +563,14 @@ ; CHECK-APPLE: mov x5, x27 ; CHECK-APPLE: mov x6, x28 ; CHECK-APPLE: mov x7, x23 +; CHECK-APPLE: mov x21, x19 ; Restore swifterror %err and callee save registers. -; CHECK-APPLE: ldp x21, x28, [sp, #24 ; CHECK-APPLE: ldp x29, x30, [sp ; CHECK-APPLE: ldp x20, x19, [sp ; CHECK-APPLE: ldp x23, x22, [sp ; CHECK-APPLE: ldp x25, x24, [sp ; CHECK-APPLE: ldp x27, x26, [sp +; CHECK-APPLE: ldp x0, x28, [sp, #24 ; CHECK-APPLE: ret define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err) { %error_ptr_ref = alloca swifterror %swift_error*, align 8 @@ -601,14 +600,14 @@ declare swiftcc void @foo2(%swift_error** swifterror) ; Make sure we properly assign registers during fast-isel. -; CHECK-O0-LABEL: testAssign +; CHECK-O0-LABEL: testAssign: ; CHECK-O0: mov [[TMP:x.*]], xzr ; CHECK-O0: mov x21, [[TMP]] ; CHECK-O0: bl _foo2 ; CHECK-O0: str x21, [s[[STK:.*]]] ; CHECK-O0: ldr x0, [s[[STK]]] -; CHECK-APPLE-LABEL: testAssign +; CHECK-APPLE-LABEL: testAssign: ; CHECK-APPLE: mov x21, xzr ; CHECK-APPLE: bl _foo2 ; CHECK-APPLE: mov x0, x21 Index: llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll +++ llvm/trunk/test/CodeGen/AArch64/tailcall-explicit-sret.ll @@ -36,9 +36,9 @@ ; CHECK-LABEL: _test_tailcall_explicit_sret_alloca_dummyusers: ; CHECK: ldr [[PTRLOAD1:q[0-9]+]], [x0] -; CHECK: str [[PTRLOAD1]], [sp] ; CHECK: mov x8, sp -; CHECK-NEXT: bl _test_explicit_sret +; CHECK: str [[PTRLOAD1]], [sp] +; CHECK: bl _test_explicit_sret ; CHECK: ret define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 { %l = alloca i1024, align 8 @@ -75,10 +75,10 @@ } ; CHECK-LABEL: _test_indirect_tailcall_explicit_sret_nosret_arg: -; CHECK-DAG: mov x[[CALLERX8NUM:[0-9]+]], x8 -; CHECK-DAG: mov [[FPTR:x[0-9]+]], x0 +; CHECK: mov [[FPTR:x[0-9]+]], x0 ; CHECK: mov x0, sp -; CHECK-NEXT: blr [[FPTR]] +; CHECK: mov x[[CALLERX8NUM:[0-9]+]], x8 +; CHECK: blr [[FPTR]] ; CHECK: ldr [[CALLERSRET1:q[0-9]+]], [sp] ; CHECK: str [[CALLERSRET1:q[0-9]+]], [x[[CALLERX8NUM]]] ; CHECK: ret