Index: include/llvm/CodeGen/MachineFunction.h =================================================================== --- include/llvm/CodeGen/MachineFunction.h +++ include/llvm/CodeGen/MachineFunction.h @@ -328,7 +328,6 @@ bool CallsUnwindInit = false; bool HasEHScopes = false; bool HasEHFunclets = false; - bool HasLocalEscape = false; /// List of C++ TypeInfo used. std::vector TypeInfos; @@ -811,9 +810,6 @@ bool hasEHFunclets() const { return HasEHFunclets; } void setHasEHFunclets(bool V) { HasEHFunclets = V; } - bool hasLocalEscape() const { return HasLocalEscape; } - void setHasLocalEscape(bool V) { HasLocalEscape = V; } - /// Find or create an LandingPadInfo for the specified MachineBasicBlock. LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad); Index: include/llvm/CodeGen/TargetFrameLowering.h =================================================================== --- include/llvm/CodeGen/TargetFrameLowering.h +++ include/llvm/CodeGen/TargetFrameLowering.h @@ -261,6 +261,17 @@ return getFrameIndexReference(MF, FI, FrameReg); } + /// getEHFrameIndexReference - This method returns the offset used to + /// reference a frame index location. The offset can be from either FP/BP/SP + /// based on which base register is returned by llvm.localaddress. + virtual int getEHFrameIndexReference(const MachineFunction &MF, + int FI) const { + // By default, dispatch to getFrameIndexReference. Interested targets can + // override this. + unsigned FrameReg; + return getFrameIndexReference(MF, FI, FrameReg); + } + /// This method determines which of the registers reported by /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved. /// The default implementation checks populates the \p SavedRegs bitset with Index: lib/CodeGen/AsmPrinter/WinException.cpp =================================================================== --- lib/CodeGen/AsmPrinter/WinException.cpp +++ lib/CodeGen/AsmPrinter/WinException.cpp @@ -937,11 +937,7 @@ int FI = FuncInfo.EHRegNodeFrameIndex; if (FI != INT_MAX) { const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering(); - unsigned UnusedReg; - // FIXME: getFrameIndexReference needs to match the behavior of - // AArch64RegisterInfo::hasBasePointer in which one of the scenarios where - // SP is used is if frame size >= 256. - Offset = TFI->getFrameIndexReference(*Asm->MF, FI, UnusedReg); + Offset = TFI->getEHFrameIndexReference(*Asm->MF, FI); } MCContext &Ctx = Asm->OutContext; Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6172,8 +6172,6 @@ .addFrameIndex(FI); } - MF.setHasLocalEscape(true); - return nullptr; } Index: lib/Target/AArch64/AArch64FrameLowering.h =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.h +++ lib/Target/AArch64/AArch64FrameLowering.h @@ -78,6 +78,9 @@ int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, unsigned &FrameReg, bool IgnoreSPUpdates) const override; + int getEHFrameIndexReference(const MachineFunction &MF, + int FI) const override; + int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const; private: bool shouldCombineCSRLocalStackBump(MachineFunction &MF, Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -227,10 +227,6 @@ MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) return true; - // Win64 SEH requires frame pointer if funclets are present. - if (MF.hasLocalEscape()) - return true; - return false; } @@ -1469,19 +1465,44 @@ return resolveFrameIndexReference(MF, FI, FrameReg); } +int AArch64FrameLowering::getEHFrameIndexReference(const MachineFunction &MF, + int FI) const { + return getSEHFrameIndexOffset(MF, FI); +} + +static int getFPOffset(const MachineFunction &MF, int FI) { + const auto &MFI = MF.getFrameInfo(); + const auto *AFI = MF.getInfo(); + const auto &Subtarget = MF.getSubtarget(); + bool IsWin64 = + Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); + unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; + return MFI.getObjectOffset(FI) + FixedObject + 16; +} + +static int getStackOffset(const MachineFunction &MF, int FI) { + const auto &MFI = MF.getFrameInfo(); + return MFI.getObjectOffset(FI) + MFI.getStackSize(); +} + +int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF, + int FI) const { + const auto *RegInfo = static_cast( + MF.getSubtarget().getRegisterInfo()); + return RegInfo->getSEHFrameRegister(MF) == AArch64::FP ? + getFPOffset(MF, FI) : getStackOffset(MF, FI); +} + int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg, bool PreferFP) const { - const MachineFrameInfo &MFI = MF.getFrameInfo(); - const AArch64RegisterInfo *RegInfo = static_cast( + const auto &MFI = MF.getFrameInfo(); + const auto *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); - const AArch64FunctionInfo *AFI = MF.getInfo(); - const AArch64Subtarget &Subtarget = MF.getSubtarget(); - bool IsWin64 = - Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); - unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; - int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16; - int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize(); + const auto *AFI = MF.getInfo(); + const auto &Subtarget = MF.getSubtarget(); + int FPOffset = getFPOffset(MF, FI); + int Offset = getStackOffset(MF, FI); bool isFixed = MFI.isFixedObjectIndex(FI); bool isCSR = !isFixed && MFI.getObjectOffset(FI) >= -((int)AFI->getCalleeSavedStackSize()); Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2744,15 +2744,9 @@ Op.getOperand(1), Op.getOperand(2)); case Intrinsic::localaddress: { - // Returns one of the stack, base, or frame pointer registers, depending on - // which is used to reference local variables. - MachineFunction &MF = DAG.getMachineFunction(); - const AArch64RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); - unsigned Reg; - if (RegInfo->hasBasePointer(MF)) - Reg = RegInfo->getBaseRegister(); - else // This function handles the SP or FP case. - Reg = RegInfo->getFrameRegister(MF); + const auto &MF = DAG.getMachineFunction(); + const auto *RegInfo = Subtarget->getRegisterInfo(); + unsigned Reg = RegInfo->getSEHFrameRegister(MF); return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, Op.getSimpleValueType()); } Index: lib/Target/AArch64/AArch64RegisterInfo.h =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.h +++ lib/Target/AArch64/AArch64RegisterInfo.h @@ -121,6 +121,8 @@ bool trackLivenessAfterRegAlloc(const MachineFunction&) const override { return true; } + + unsigned getSEHFrameRegister(const MachineFunction &MF) const; }; } // end namespace llvm Index: lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.cpp +++ lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -463,15 +463,16 @@ return; } - // Modify MI as necessary to handle as much of 'Offset' as possible - Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg); - if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) { MachineOperand &FI = MI.getOperand(FIOperandNum); + Offset = TFI->getEHFrameIndexReference(MF, FrameIndex); FI.ChangeToImmediate(Offset); return; } + // Modify MI as necessary to handle as much of 'Offset' as possible + Offset = TFI->getFrameIndexReference(MF, FrameIndex, FrameReg); + if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII)) return; @@ -525,3 +526,13 @@ return 16; } } + +unsigned AArch64RegisterInfo::getSEHFrameRegister( + const MachineFunction &MF) const { + const auto &MFI = MF.getFrameInfo(); + if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects()) + return AArch64::SP; + else if (needsStackRealignment(MF)) + return getBaseRegister(); + return getFrameRegister(MF); +} Index: test/CodeGen/AArch64/seh-finally.ll =================================================================== --- test/CodeGen/AArch64/seh-finally.ll +++ test/CodeGen/AArch64/seh-finally.ll @@ -1,67 +1,283 @@ ; RUN: llc -mtriple arm64-windows -o - %s | FileCheck %s -; Function Attrs: noinline optnone uwtable -define dso_local i32 @foo() { +; struct S { int x; }; +; void foo(int n); +; void foo(struct S o); +; void simple_seh() { +; struct S o; +; +; __try { foo(o.x); } +; __finally { foo(o.x); } +; } +; void stack_realign() { +; struct S __declspec(align(32)) o; +; +; __try { foo(o.x); } +; __finally { foo(o.x); } +; } +; void vla_present(int n) { +; int vla[n]; +; +; __try { foo(n); } +; __finally { foo(n); } +; } +; void vla_and_realign(int n) { +; struct S __declspec(align(32)) o; +; int vla[n]; +; +; __try { foo(o.x); } +; __finally { foo(o.x); } +; } + +%struct.S = type { i32 } + +; Test simple SEH (__try/__finally). +define void @simple_seh() #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { entry: -; CHECK-LABEL: foo -; CHECK: orr w8, wzr, #0x1 -; CHECK: mov w0, wzr -; CHECK: mov x1, x29 -; CHECK: .set .Lfoo$frame_escape_0, -4 -; CHECK: stur w8, [x29, #-4] -; CHECK: bl "?fin$0@0@foo@@" -; CHECK: ldur w0, [x29, #-4] +; CHECK-LABEL: simple_seh +; CHECK: add x29, sp, #16 +; CHECK: orr x1, xzr, #0xfffffffffffffffe +; CHECK: stur x1, [x29, #-16] +; CHECK: .set .Lsimple_seh$frame_escape_0, -8 +; CHECK: ldur w0, [x29, #-8] +; CHECK: bl foo + + %o = alloca %struct.S, align 4 + call void (...) @llvm.localescape(%struct.S* %o) + %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0 + %0 = load i32, i32* %x, align 4 + invoke void @foo(i32 %0) #5 + to label %invoke.cont unwind label %ehcleanup - %count = alloca i32, align 4 - call void (...) @llvm.localescape(i32* %count) - store i32 0, i32* %count, align 4 - %0 = load i32, i32* %count, align 4 - %add = add nsw i32 %0, 1 - store i32 %add, i32* %count, align 4 +invoke.cont: ; preds = %entry %1 = call i8* @llvm.localaddress() - call void @"?fin$0@0@foo@@"(i8 0, i8* %1) - %2 = load i32, i32* %count, align 4 - ret i32 %2 + call void @fin_simple_seh(i8 0, i8* %1) + ret void + +ehcleanup: ; preds = %entry + %2 = cleanuppad within none [] + %3 = call i8* @llvm.localaddress() + call void @fin_simple_seh(i8 1, i8* %3) [ "funclet"(token %2) ] + cleanupret from %2 unwind to caller } -define internal void @"?fin$0@0@foo@@"(i8 %abnormal_termination, i8* %frame_pointer) { +define void @fin_simple_seh(i8 %abnormal_termination, i8* %frame_pointer) { entry: -; CHECK-LABEL: @"?fin$0@0@foo@@" -; CHECK: sub sp, sp, #16 -; CHECK: str x1, [sp, #8] -; CHECK: strb w0, [sp, #7] -; CHECK: movz x8, #:abs_g1_s:.Lfoo$frame_escape_0 -; CHECK: movk x8, #:abs_g0_nc:.Lfoo$frame_escape_0 -; CHECK: add x8, x1, x8 -; CHECK: ldr w9, [x8] -; CHECK: add w9, w9, #1 -; CHECK: str w9, [x8] +; CHECK-LABEL: fin_simple_seh +; CHECK: movz x8, #:abs_g1_s:.Lsimple_seh$frame_escape_0 +; CHECK: movk x8, #:abs_g0_nc:.Lsimple_seh$frame_escape_0 +; CHECK: strb w0, [sp, #15] +; CHECK: ldr w0, [x1, x8] +; CHECK: bl foo %frame_pointer.addr = alloca i8*, align 8 %abnormal_termination.addr = alloca i8, align 1 - %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @foo to i8*), i8* %frame_pointer, i32 0) - %count = bitcast i8* %0 to i32* + %0 = call i8* @llvm.localrecover(i8* bitcast (void ()* @simple_seh to i8*), i8* %frame_pointer, i32 0) + %o = bitcast i8* %0 to %struct.S* store i8* %frame_pointer, i8** %frame_pointer.addr, align 8 store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1 - %1 = zext i8 %abnormal_termination to i32 - %cmp = icmp eq i32 %1, 0 - br i1 %cmp, label %if.then, label %if.end + %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0 + %1 = load i32, i32* %x, align 4 + call void @foo(i32 %1) + ret void +} -if.then: ; preds = %entry - %2 = load i32, i32* %count, align 4 - %add = add nsw i32 %2, 1 - store i32 %add, i32* %count, align 4 - br label %if.end +; Test SEH when stack realignment is needed in case highly aligned stack objects are present. +define void @stack_realign() #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { +entry: +; CHECK-LABEL: stack_realign +; CHECK: add x29, sp, #16 +; CHECK: sub x9, sp, #64 +; CHECK: and sp, x9, #0xffffffffffffffe0 +; CHECK: mov x19, sp +; CHECK: orr x1, xzr, #0xfffffffffffffffe +; CHECK: stur x1, [x19, #16] +; CHECK: .set .Lstack_realign$frame_escape_0, 32 +; CHECK: ldr w0, [x19, #32] +; CHECK: bl foo + + %o = alloca %struct.S, align 32 + call void (...) @llvm.localescape(%struct.S* %o) + %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0 + %0 = load i32, i32* %x, align 32 + invoke void @foo(i32 %0) #5 + to label %invoke.cont unwind label %ehcleanup -if.end: ; preds = %if.then, %entry +invoke.cont: ; preds = %entry + %1 = call i8* @llvm.localaddress() + call void @fin_stack_realign(i8 0, i8* %1) ret void + +ehcleanup: ; preds = %entry + %2 = cleanuppad within none [] + %3 = call i8* @llvm.localaddress() + call void @fin_stack_realign(i8 1, i8* %3) [ "funclet"(token %2) ] + cleanupret from %2 unwind to caller } -; Function Attrs: nounwind readnone -declare i8* @llvm.localrecover(i8*, i8*, i32) +define void @fin_stack_realign(i8 %abnormal_termination, i8* %frame_pointer) { +entry: +; CHECK-LABEL: fin_stack_realign +; CHECK: movz x8, #:abs_g1_s:.Lstack_realign$frame_escape_0 +; CHECK: movk x8, #:abs_g0_nc:.Lstack_realign$frame_escape_0 +; CHECK: strb w0, [sp, #15] +; CHECK: ldr w0, [x1, x8] +; CHECK: bl foo -; Function Attrs: nounwind readnone -declare i8* @llvm.localaddress() + %frame_pointer.addr = alloca i8*, align 8 + %abnormal_termination.addr = alloca i8, align 1 + %0 = call i8* @llvm.localrecover(i8* bitcast (void ()* @stack_realign to i8*), i8* %frame_pointer, i32 0) + %o = bitcast i8* %0 to %struct.S* + store i8* %frame_pointer, i8** %frame_pointer.addr, align 8 + store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1 + %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0 + %1 = load i32, i32* %x, align 32 + call void @foo(i32 %1) + ret void +} + +; Test SEH when variable size objects are present on the stack. Note: Escaped vla's are current not supported by SEH. +define void @vla_present(i32 %n) #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { +entry: +; CHECK-LABEL: vla_present +; CHECK: add x29, sp, #32 +; CHECK: orr x1, xzr, #0xfffffffffffffffe +; CHECK: stur x1, [x29, #-32] +; CHECK: .set .Lvla_present$frame_escape_0, -4 +; CHECK: stur w0, [x29, #-4] +; CHECK: ldur w8, [x29, #-4] +; CHECK: mov x9, sp +; CHECK: stur x9, [x29, #-16] +; CHECK: stur x8, [x29, #-24] +; CHECK: ldur w0, [x29, #-4] +; CHECK: bl foo + + %n.addr = alloca i32, align 4 + %saved_stack = alloca i8*, align 8 + %__vla_expr0 = alloca i64, align 8 + call void (...) @llvm.localescape(i32* %n.addr) + store i32 %n, i32* %n.addr, align 4 + %0 = load i32, i32* %n.addr, align 4 + %1 = zext i32 %0 to i64 + %2 = call i8* @llvm.stacksave() + store i8* %2, i8** %saved_stack, align 8 + %vla = alloca i32, i64 %1, align 4 + store i64 %1, i64* %__vla_expr0, align 8 + %3 = load i32, i32* %n.addr, align 4 + invoke void @foo(i32 %3) #5 + to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + %4 = call i8* @llvm.localaddress() + call void @fin_vla_present(i8 0, i8* %4) + %5 = load i8*, i8** %saved_stack, align 8 + call void @llvm.stackrestore(i8* %5) + ret void + +ehcleanup: ; preds = %entry + %6 = cleanuppad within none [] + %7 = call i8* @llvm.localaddress() + call void @fin_vla_present(i8 1, i8* %7) [ "funclet"(token %6) ] + cleanupret from %6 unwind to caller +} -; Function Attrs: nounwind +define void @fin_vla_present(i8 %abnormal_termination, i8* %frame_pointer) { +entry: +; CHECK-LABEL: fin_vla_present +; CHECK: movz x8, #:abs_g1_s:.Lvla_present$frame_escape_0 +; CHECK: movk x8, #:abs_g0_nc:.Lvla_present$frame_escape_0 +; CHECK: strb w0, [sp, #15] +; CHECK: ldr w0, [x1, x8] +; CHECK: bl foo + + %frame_pointer.addr = alloca i8*, align 8 + %abnormal_termination.addr = alloca i8, align 1 + %0 = call i8* @llvm.localrecover(i8* bitcast (void (i32)* @vla_present to i8*), i8* %frame_pointer, i32 0) + %n.addr = bitcast i8* %0 to i32* + store i8* %frame_pointer, i8** %frame_pointer.addr, align 8 + store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1 + %1 = load i32, i32* %n.addr, align 4 + call void @foo(i32 %1) + ret void +} + +; Test when both vla's and highly aligned objects are present on stack. +define void @vla_and_realign(i32 %n) #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { +entry: +; CHECK-LABEL: vla_and_realign +; CHECK: add x29, sp, #16 +; CHECK: sub x9, sp, #64 +; CHECK: and sp, x9, #0xffffffffffffffe0 +; CHECK: mov x19, sp +; CHECK: orr x1, xzr, #0xfffffffffffffffe +; CHECK: stur x1, [x19] +; CHECK: .set .Lvla_and_realign$frame_escape_0, 32 +; CHECK: stur w0, [x29, #-4] +; CHECK: ldur w8, [x29, #-4] +; CHECK: mov x9, sp +; CHECK: str x9, [x19, #24] +; CHECK: str x8, [x19, #16] +; CHECK: ldr w0, [x19, #32] +; CHECK: bl foo + + %n.addr = alloca i32, align 4 + %o = alloca %struct.S, align 32 + %saved_stack = alloca i8*, align 8 + %__vla_expr0 = alloca i64, align 8 + call void (...) @llvm.localescape(%struct.S* %o) + store i32 %n, i32* %n.addr, align 4 + %0 = load i32, i32* %n.addr, align 4 + %1 = zext i32 %0 to i64 + %2 = call i8* @llvm.stacksave() + store i8* %2, i8** %saved_stack, align 8 + %vla = alloca i32, i64 %1, align 4 + store i64 %1, i64* %__vla_expr0, align 8 + %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0 + %3 = load i32, i32* %x, align 32 + invoke void @foo(i32 %3) #5 + to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + %4 = call i8* @llvm.localaddress() + call void @fin_vla_and_realign(i8 0, i8* %4) + %5 = load i8*, i8** %saved_stack, align 8 + call void @llvm.stackrestore(i8* %5) + ret void + +ehcleanup: ; preds = %entry + %6 = cleanuppad within none [] + %7 = call i8* @llvm.localaddress() + call void @fin_vla_and_realign(i8 1, i8* %7) [ "funclet"(token %6) ] + cleanupret from %6 unwind to caller +} + +define void @fin_vla_and_realign(i8 %abnormal_termination, i8* %frame_pointer) { +entry: +; CHECK-LABEL: fin_vla_and_realign +; CHECK: movz x8, #:abs_g1_s:.Lvla_and_realign$frame_escape_0 +; CHECK: movk x8, #:abs_g0_nc:.Lvla_and_realign$frame_escape_0 +; CHECK: strb w0, [sp, #15] +; CHECK: ldr w0, [x1, x8] +; CHECK: bl foo + + %frame_pointer.addr = alloca i8*, align 8 + %abnormal_termination.addr = alloca i8, align 1 + %0 = call i8* @llvm.localrecover(i8* bitcast (void (i32)* @vla_and_realign to i8*), i8* %frame_pointer, i32 0) + %o = bitcast i8* %0 to %struct.S* + store i8* %frame_pointer, i8** %frame_pointer.addr, align 8 + store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1 + %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0 + %1 = load i32, i32* %x, align 32 + call void @foo(i32 %1) + ret void +} + +declare void @foo(i32) +declare void @llvm.stackrestore(i8*) +declare i8* @llvm.stacksave() +declare i8* @llvm.localrecover(i8*, i8*, i32) +declare i8* @llvm.localaddress() declare void @llvm.localescape(...) +declare i32 @__C_specific_handler(...) + +attributes #0 = { noinline optnone } Index: test/CodeGen/AArch64/seh-localescape.ll =================================================================== --- test/CodeGen/AArch64/seh-localescape.ll +++ /dev/null @@ -1,30 +0,0 @@ -; RUN: llc -mtriple arm64-windows %s -o - | FileCheck %s - -; Function Attrs: noinline nounwind optnone uwtable -define dso_local i32 @foo() { -entry: -; CHECK-LABEL: foo -; CHECK: .set .Lfoo$frame_escape_0, -4 - - %count = alloca i32, align 4 - call void (...) @llvm.localescape(i32* %count) - ret i32 0 -} - -define internal i32 @"?filt$0@0@foo@@"(i8* %exception_pointers, i8* %frame_pointer) { -entry: -; CHECK-LABEL: @"?filt$0@0@foo@@" -; CHECK: movz x8, #:abs_g1_s:.Lfoo$frame_escape_0 -; CHECK: movk x8, #:abs_g0_nc:.Lfoo$frame_escape_0 - - %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @foo to i8*), i8* %frame_pointer, i32 0) - %count = bitcast i8* %0 to i32* - %1 = load i32, i32* %count, align 4 - ret i32 %1 -} - -; Function Attrs: nounwind readnone -declare i8* @llvm.localrecover(i8*, i8*, i32) #2 - -; Function Attrs: nounwind -declare void @llvm.localescape(...) #3