Index: llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp +++ llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp @@ -54,10 +54,116 @@ Constant *GuardFnCFGlobal = nullptr; Constant *GuardFnGlobal = nullptr; Module *M = nullptr; + + void mangleArm64EcThunk(CallBase *CB, bool entry, raw_ostream &Out); + void mangleArm64EcThunkRetType(CallBase *CB, bool EntryThunk, + raw_ostream &Out); + void mangleArm64EcThunkArgs(CallBase *CB, bool EntryThunk, raw_ostream &Out); + void mangleArm64ECThunkType(Type *T, Align Alignment, bool EntryThunk, + bool Ret, uint64_t X86SignBytes, + raw_ostream &Out); }; } // end anonymous namespace +// FIXME: For now we have two kinds of functions generate different +// types from MSVC +// 1. Exit thunk ret type with struct size >= 16 +// MSVC generate thunk: $iexit_thunk$cdecl$i8$i8 +// This pass generate thunk: $iexit_thunk$cdecl$v$i8i8 +// 2. Exit thunk ret type with struct size align >= 32 +// MSVC generate thunk: $iexit_thunk$cdecl$i8$i8i8 +// This pass generate thunk: $iexit_thunk$cdecl$v$i8i8 +// Not sure we need to match MSVC or not here. +void AArch64Arm64ECCallLowering::mangleArm64EcThunk(CallBase *CB, + bool EntryThunk, + raw_ostream &Out) { + Out << (EntryThunk ? "$ientry_thunk$cdecl$" : "$iexit_thunk$cdecl$"); + + mangleArm64EcThunkRetType(CB, EntryThunk, Out); + mangleArm64EcThunkArgs(CB, EntryThunk, Out); +} + +void AArch64Arm64ECCallLowering::mangleArm64EcThunkArgs(CallBase *CB, + bool EntryThunk, + raw_ostream &Out) { + FunctionType *FT = CB->getFunctionType(); + Out << "$"; + if (FT->isVarArg()) { + Out << "varargs"; + return; + } + + for (unsigned I = 0, E = FT->getNumParams(); I != E; ++I) { + Align ParamAlign = CB->getParamAlign(I).valueOrOne(); + uint64_t X86SignBytes = CB->getParamArm64ECArgX86SignBytes(I); + mangleArm64ECThunkType(FT->getParamType(I), ParamAlign, EntryThunk, + /*Ret*/ false, X86SignBytes, Out); + } +} + +void AArch64Arm64ECCallLowering::mangleArm64EcThunkRetType(CallBase *CB, + bool EntryThunk, + raw_ostream &Out) { + Type *T = CB->getFunctionType()->getReturnType(); + if (T->isVoidTy()) { + Out << "v"; + return; + } + + uint64_t X86SignBytes = CB->getRetArm64ECArgX86SignBytes(); + mangleArm64ECThunkType(T, Align(), EntryThunk, /*Ret*/ true, X86SignBytes, + Out); +} + +// FIXME: is there any other rules? +// Thunk mangle rules: +// float ----> f +// double ----> d +// all other int builtin types ----> i8 +// i128 ----> m16a16 +// struct types: +// m4 ----> m +// entry thunk ret struct ----> m##align(size, 4) {m, m8, m12, m16, ...} +// entry thunk param struct +// if param align <=8 ----> m##size +// if param align > 8 ----> m##size#a#align +// exit thunk ret struct +// if struct size <=16 ----> m##align(size, 4) {m, m8, m12, m16} +// if struct size > 16 ----> i8 +// exit thunk param struct +// if struct size <=16 ----> m##size +// if struct size > 16 ----> i8 +void AArch64Arm64ECCallLowering::mangleArm64ECThunkType( + Type *T, Align Alignment, bool EntryThunk, bool Ret, uint64_t X86SignBytes, + raw_ostream &Out) { + auto &DL = M->getDataLayout(); + uint64_t Size = DL.getTypeSizeInBits(T); + Size = (Size + 7) / 8; + if (T->isFloatTy()) { + Out << "f"; + } else if (T->isDoubleTy()) { + Out << "d"; + } else if (T->isIntegerTy(128)) { + Out << "m16a16"; + } else if (X86SignBytes) { + if (!EntryThunk && X86SignBytes > 16) { + Out << "i8"; + } else { + Out << "m"; + unsigned StructSize = + Ret ? alignToPowerOf2(X86SignBytes, 4) : X86SignBytes; + if (StructSize != 4) + Out << StructSize; + if (Alignment.value() >= 8 && !T->isPointerTy()) + Out << "a" << Alignment.value(); + } + } else { + assert(T->isIntOrPtrTy()); + Out << "i8"; + } +} + Function *AArch64Arm64ECCallLowering::buildExitThunk(CallBase *CB) { auto &DL = M->getDataLayout(); FunctionType *FT = CB->getFunctionType(); @@ -94,9 +200,13 @@ } } + SmallString<256> ExitThunkName; + llvm::raw_svector_ostream Out(ExitThunkName); + mangleArm64EcThunk(CB, /*EntryThunk*/ false, Out); + FunctionType *Ty = FunctionType::get(RetTy, DefArgTypes, false); Function *F = - Function::Create(Ty, GlobalValue::InternalLinkage, 0, "exit_thunk", M); + Function::Create(Ty, GlobalValue::InternalLinkage, 0, ExitThunkName, M); F->setCallingConv(CallingConv::ARM64EC_Thunk_Native); // Copy MSVC, and always set up a frame pointer. (Maybe this isn't necessary.) F->addFnAttr("frame-pointer", "all"); Index: llvm/test/CodeGen/AArch64/arm64ec-mangle-align.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/arm64ec-mangle-align.ll @@ -0,0 +1,1346 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --include-generated-funcs +; RUN: llc -mtriple=aarch64-pc-windows-msvc_arm64ec < %s | FileCheck %s + +%struct.s3 = type { [3 x i8], [61 x i8] } +%struct.s4 = type { [4 x i8], [28 x i8] } +%struct.s12 = type { [12 x i8], [52 x i8] } +%struct.s16 = type { [16 x i8], [16 x i8] } +%struct.s17 = type { [17 x i8], [47 x i8] } +%struct.s33 = type { [33 x i8], [3 x i8] } +%struct.s65 = type { [65 x i8], i8 } +%struct.s129 = type { [129 x i8], [15 x i8] } +%struct.s257 = type { [257 x i8], [31 x i8] } + +@pfnstruct1 = global ptr null, align 8 +@pfnstruct2 = global ptr null, align 8 +@pfnstruct3 = global ptr null, align 8 +@pfnstruct4 = global ptr null, align 8 +@pfnstruct5 = global ptr null, align 8 +@pfnstruct6 = global ptr null, align 8 +@pfnstruct7 = global ptr null, align 8 +@pfnstruct8 = global ptr null, align 8 +@pfnstruct9 = global ptr null, align 8 +@pfnstruct10 = global ptr null, align 8 +@pfnstruct11 = global ptr null, align 8 +@pfnstruct12 = global ptr null, align 8 +@pfnstruct13 = global ptr null, align 8 +@pfnstruct14 = global ptr null, align 8 +@pfnstruct15 = global ptr null, align 8 +@pfnstruct16 = global ptr null, align 8 +@pfnstruct17 = global ptr null, align 8 +@pfnstruct33 = global ptr null, align 8 +@pfnstruct65 = global ptr null, align 8 +@pfnstruct129 = global ptr null, align 8 +@pfnstruct257 = global ptr null, align 8 + + +define dso_local arm64ec_x86sign(1) i8 @callstruct1(i64 arm64ec_x86sign(1) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct1, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 255 + %call = tail call arm64ec_x86sign(1) i8 %0(i64 arm64ec_x86sign(1) %coerce.dive1.coerce.0.insert.ext) + ret i8 %call +} + + +declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1 immarg) + + +define dso_local arm64ec_x86sign(8) i64 @callstruct2(i64 arm64ec_x86sign(8) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct2, align 8 + %call = tail call arm64ec_x86sign(8) i64 %0(i64 arm64ec_x86sign(8) %x.coerce) + ret i64 %call +} + + +define dso_local arm64ec_x86sign(64) void @callstruct3(ptr sret(%struct.s3) align 64 %agg.result, ptr arm64ec_x86sign(64) %x) { +entry: + %agg.tmp = alloca %struct.s3, align 64 + %0 = load ptr, ptr @pfnstruct3, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 64 dereferenceable(64) %agg.tmp, ptr align 64 dereferenceable(64) %x, i64 64, i1 false) + call arm64ec_x86sign(64) void %0(ptr sret(%struct.s3) align 64 %agg.result, ptr arm64ec_x86sign(64) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(32) void @callstruct4(ptr sret(%struct.s4) align 32 %agg.result, ptr arm64ec_x86sign(32) %x) { +entry: + %agg.tmp = alloca %struct.s4, align 32 + %0 = load ptr, ptr @pfnstruct4, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 32 dereferenceable(32) %agg.tmp, ptr align 32 dereferenceable(32) %x, i64 32, i1 false) + call arm64ec_x86sign(32) void %0(ptr sret(%struct.s4) align 32 %agg.result, ptr arm64ec_x86sign(32) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(6) i48 @callstruct5(i64 arm64ec_x86sign(6) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct5, align 8 + %agg.tmp.coerce.0.insert.ext = and i64 %x.coerce, 281474976710655 + %call = tail call arm64ec_x86sign(6) i48 %0(i64 arm64ec_x86sign(6) %agg.tmp.coerce.0.insert.ext) + ret i48 %call +} + + +define dso_local arm64ec_x86sign(8) i64 @callstruct6(i64 arm64ec_x86sign(8) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct6, align 8 + %call = tail call arm64ec_x86sign(8) i64 %0(i64 arm64ec_x86sign(8) %x.coerce) + ret i64 %call +} + + +define dso_local arm64ec_x86sign(8) i64 @callstruct7(i64 arm64ec_x86sign(8) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct7, align 8 + %call = tail call arm64ec_x86sign(8) i64 %0(i64 arm64ec_x86sign(8) %x.coerce) + ret i64 %call +} + + +define dso_local arm64ec_x86sign(16) i128 @callstruct8(i128 arm64ec_x86sign(16) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct8, align 8 + %call = tail call arm64ec_x86sign(16) i128 %0(i128 arm64ec_x86sign(16) %x.coerce) + ret i128 %call +} + + +define dso_local arm64ec_x86sign(12) [2 x i64] @callstruct9([2 x i64] arm64ec_x86sign(12) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct9, align 8 + %agg.tmp.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 4294967295 + %.fca.1.insert3 = insertvalue [2 x i64] %x.coerce, i64 %agg.tmp.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(12) [2 x i64] %0([2 x i64] arm64ec_x86sign(12) %.fca.1.insert3) + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %retval.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 4294967295 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %retval.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(10) [2 x i64] @callstruct10([2 x i64] arm64ec_x86sign(10) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct10, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 65535 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(10) [2 x i64] %0([2 x i64] arm64ec_x86sign(10) %.fca.1.insert6) + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 65535 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(11) [2 x i64] @callstruct11([2 x i64] arm64ec_x86sign(11) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct11, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 16777215 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(11) [2 x i64] %0([2 x i64] arm64ec_x86sign(11) %.fca.1.insert6) + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 16777215 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(64) void @callstruct12(ptr sret(%struct.s12) align 64 %agg.result, ptr arm64ec_x86sign(64) %x) { +entry: + %agg.tmp = alloca %struct.s12, align 64 + %0 = load ptr, ptr @pfnstruct12, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 64 dereferenceable(64) %agg.tmp, ptr align 64 dereferenceable(64) %x, i64 64, i1 false) + call arm64ec_x86sign(64) void %0(ptr sret(%struct.s12) align 64 %agg.result, ptr arm64ec_x86sign(64) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(16) [2 x i64] @callstruct13([2 x i64] arm64ec_x86sign(16) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct13, align 8 + %call = tail call arm64ec_x86sign(16) [2 x i64] %0([2 x i64] arm64ec_x86sign(16) %x.coerce) + ret [2 x i64] %call +} + + +define dso_local arm64ec_x86sign(16) [2 x i64] @callstruct14([2 x i64] arm64ec_x86sign(16) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct14, align 8 + %call = tail call arm64ec_x86sign(16) [2 x i64] %0([2 x i64] arm64ec_x86sign(16) %x.coerce) + ret [2 x i64] %call +} + + +define dso_local arm64ec_x86sign(16) i128 @callstruct15(i128 arm64ec_x86sign(16) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct15, align 8 + %call = tail call arm64ec_x86sign(16) i128 %0(i128 arm64ec_x86sign(16) %x.coerce) + ret i128 %call +} + + +define dso_local arm64ec_x86sign(32) void @callstruct16(ptr sret(%struct.s16) align 32 %agg.result, ptr arm64ec_x86sign(32) %x) { +entry: + %agg.tmp = alloca %struct.s16, align 32 + %0 = load ptr, ptr @pfnstruct16, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 32 dereferenceable(32) %agg.tmp, ptr align 32 dereferenceable(32) %x, i64 32, i1 false) + call arm64ec_x86sign(32) void %0(ptr sret(%struct.s16) align 32 %agg.result, ptr arm64ec_x86sign(32) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(64) void @callstruct17(ptr sret(%struct.s17) align 64 %agg.result, ptr arm64ec_x86sign(64) %x) { +entry: + %agg.tmp = alloca %struct.s17, align 64 + %0 = load ptr, ptr @pfnstruct17, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 64 dereferenceable(64) %agg.tmp, ptr align 64 dereferenceable(64) %x, i64 64, i1 false) + call arm64ec_x86sign(64) void %0(ptr sret(%struct.s17) align 64 %agg.result, ptr arm64ec_x86sign(64) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(36) void @callstruct33(ptr sret(%struct.s33) align 4 %agg.result, ptr arm64ec_x86sign(36) %x) { +entry: + %agg.tmp = alloca %struct.s33, align 4 + %0 = load ptr, ptr @pfnstruct33, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 4 dereferenceable(36) %agg.tmp, ptr align 4 dereferenceable(36) %x, i64 36, i1 false) + call arm64ec_x86sign(36) void %0(ptr sret(%struct.s33) align 4 %agg.result, ptr arm64ec_x86sign(36) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(66) void @callstruct65(ptr sret(%struct.s65) align 2 %agg.result, ptr arm64ec_x86sign(66) %x) { +entry: + %agg.tmp = alloca %struct.s65, align 2 + %0 = load ptr, ptr @pfnstruct65, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 2 dereferenceable(66) %agg.tmp, ptr align 2 dereferenceable(66) %x, i64 66, i1 false) + call arm64ec_x86sign(66) void %0(ptr sret(%struct.s65) align 2 %agg.result, ptr arm64ec_x86sign(66) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(144) void @callstruct129(ptr sret(%struct.s129) align 16 %agg.result, ptr arm64ec_x86sign(144) %x) { +entry: + %agg.tmp = alloca %struct.s129, align 16 + %0 = load ptr, ptr @pfnstruct129, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 16 dereferenceable(144) %agg.tmp, ptr align 16 dereferenceable(144) %x, i64 144, i1 false) + call arm64ec_x86sign(144) void %0(ptr sret(%struct.s129) align 16 %agg.result, ptr arm64ec_x86sign(144) %agg.tmp) + ret void +} + + +define dso_local arm64ec_x86sign(288) void @callstruct257(ptr sret(%struct.s257) align 32 %agg.result, ptr arm64ec_x86sign(288) %x) { +entry: + %agg.tmp = alloca %struct.s257, align 32 + %0 = load ptr, ptr @pfnstruct257, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 32 dereferenceable(288) %agg.tmp, ptr align 32 dereferenceable(288) %x, i64 288, i1 false) + call arm64ec_x86sign(288) void %0(ptr sret(%struct.s257) align 32 %agg.result, ptr arm64ec_x86sign(288) %agg.tmp) + ret void +} +; CHECK-LABEL: callstruct1: +; CHECK: .seh_proc callstruct1 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct1 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m$m1) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m$m1) +; CHECK-NEXT: and x0, x0, #0xff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct1] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct2: +; CHECK: .seh_proc callstruct2 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct2 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m8) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m8) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct2] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct3: +; CHECK: .seh_proc callstruct3 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: sub x9, sp, #112 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffc0 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct3 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q3, [x0, #32] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct3] +; CHECK-NEXT: stp q2, q3, [sp, #32] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct4: +; CHECK: .seh_proc callstruct4 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct4 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct4] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.1) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.1) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct5: +; CHECK: .seh_proc callstruct5 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct5 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m6) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m6) +; CHECK-NEXT: and x0, x0, #0xffffffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct5] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct6: +; CHECK: .seh_proc callstruct6 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct6 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m8.2) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m8.2) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct6] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct7: +; CHECK: .seh_proc callstruct7 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct7 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m8.3) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m8.3) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct7] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct8: +; CHECK: .seh_proc callstruct8 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct8 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16a16$m16a16) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16a16$m16a16) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct8] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct9: +; CHECK: .seh_proc callstruct9 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct9 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m12$m12) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m12$m12) +; CHECK-NEXT: and x1, x1, #0xffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct9] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffffffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct10: +; CHECK: .seh_proc callstruct10 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct10 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m12$m10) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m12$m10) +; CHECK-NEXT: and x1, x1, #0xffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct10] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct11: +; CHECK: .seh_proc callstruct11 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct11 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m12$m11) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m12$m11) +; CHECK-NEXT: and x1, x1, #0xffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct11] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct12: +; CHECK: .seh_proc callstruct12 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: sub x9, sp, #112 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffc0 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct12 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q3, [x0, #32] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct12] +; CHECK-NEXT: stp q2, q3, [sp, #32] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.4) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.4) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct13: +; CHECK: .seh_proc callstruct13 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct13 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16$m16) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16$m16) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct13] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct14: +; CHECK: .seh_proc callstruct14 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct14 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16$m16.5) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16$m16.5) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct14] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct15: +; CHECK: .seh_proc callstruct15 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct15 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16a16$m16a16.6) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16a16$m16a16.6) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct15] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct16: +; CHECK: .seh_proc callstruct16 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: sub x9, sp, #48 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct16 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct16] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.7) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.7) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct17: +; CHECK: .seh_proc callstruct17 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: sub x9, sp, #112 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffc0 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct17 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q3, [x0, #32] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct17] +; CHECK-NEXT: stp q2, q3, [sp, #32] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.8) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.8) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: .seh_set_fp +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr_x 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct33: +; CHECK: .seh_proc callstruct33 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct33 +; CHECK-NEXT: ldr w10, [x0, #32] +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: str w10, [sp, #32] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.9) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.9) +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct33] +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x9, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 48 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct65: +; CHECK: .seh_proc callstruct65 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: .seh_stackalloc 96 +; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 80 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct65 +; CHECK-NEXT: ldrh w10, [x0, #64] +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: adrp x12, __os_arm64x_check_icall +; CHECK-NEXT: strh w10, [sp, #64] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.10) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.10) +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: stp q0, q1, [sp, #32] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct65] +; CHECK-NEXT: stp q3, q2, [sp] +; CHECK-NEXT: ldr x9, [x12, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 80 +; CHECK-NEXT: add sp, sp, #96 +; CHECK-NEXT: .seh_stackalloc 96 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct129: +; CHECK: .seh_proc callstruct129 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #160 +; CHECK-NEXT: .seh_stackalloc 160 +; CHECK-NEXT: str x30, [sp, #144] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 144 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: ldp q0, q1, [x0, #96] +; CHECK-NEXT: adrp x9, pfnstruct129 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldr q2, [x0, #128] +; CHECK-NEXT: stp q0, q1, [sp, #96] +; CHECK-NEXT: ldp q3, q0, [x0, #16] +; CHECK-NEXT: str q2, [sp, #128] +; CHECK-NEXT: ldp q4, q2, [x0, #48] +; CHECK-NEXT: stp q0, q4, [sp, #32] +; CHECK-NEXT: ldr q1, [x0, #80] +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct129] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.11) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.11) +; CHECK-NEXT: stp q0, q3, [sp] +; CHECK-NEXT: stp q2, q1, [sp, #64] +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #144] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 144 +; CHECK-NEXT: add sp, sp, #160 +; CHECK-NEXT: .seh_stackalloc 160 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct257: +; CHECK: .seh_proc callstruct257 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: stp x19, x20, [sp, #-48]! // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_regp_x x19, 48 +; CHECK-NEXT: str x27, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x27, 16 +; CHECK-NEXT: stp x29, x30, [sp, #24] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 24 +; CHECK-NEXT: add x29, sp, #24 +; CHECK-NEXT: .seh_add_fp 24 +; CHECK-NEXT: sub x9, sp, #304 +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x19, x8 +; CHECK-NEXT: adrp x8, pfnstruct257 +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: mov w2, #288 +; CHECK-NEXT: ldr x20, [x8, :lo12:pfnstruct257] +; CHECK-NEXT: bl "#memcpy" +; CHECK-NEXT: adrp x8, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.12) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.12) +; CHECK-NEXT: mov x11, x20 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: mov x8, x19 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: sub sp, x29, #24 +; CHECK-NEXT: .seh_add_fp 24 +; CHECK-NEXT: ldp x29, x30, [sp, #24] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 24 +; CHECK-NEXT: ldr x27, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x27, 16 +; CHECK-NEXT: ldp x19, x20, [sp], #48 // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_regp_x x19, 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m$m1: +; CHECK: .seh_proc $iexit_thunk$cdecl$m$m1 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m8: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m8 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.1: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.1 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m6: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m6 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m8.2: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m8.2 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m8.3: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m8.3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16a16$m16a16: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16a16$m16a16 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m12$m12: +; CHECK: .seh_proc $iexit_thunk$cdecl$m12$m12 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m12$m10: +; CHECK: .seh_proc $iexit_thunk$cdecl$m12$m10 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m12$m11: +; CHECK: .seh_proc $iexit_thunk$cdecl$m12$m11 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.4: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.4 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16$m16: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16$m16 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16$m16.5: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16$m16.5 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16a16$m16a16.6: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16a16$m16a16.6 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.7: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.7 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.8: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.8 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.9: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.9 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.10: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.10 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.11: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.11 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.12: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.12 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc Index: llvm/test/CodeGen/AArch64/arm64ec-mangle-basic.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/arm64ec-mangle-basic.ll @@ -0,0 +1,543 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --include-generated-funcs +; RUN: llc -mtriple=aarch64-pc-windows-msvc_arm64ec < %s | FileCheck %s + +@pfnbool = global ptr null, align 8 +@pfnchar = global ptr null, align 8 +@pfnshort = global ptr null, align 8 +@pfnwchar_t = global ptr null, align 8 +@pfnint = global ptr null, align 8 +@pfni64 = global ptr null, align 8 +@pfnfloat = global ptr null, align 8 +@pfndouble = global ptr null, align 8 +@pfnlongdouble = global ptr null, align 8 +@pfnVOIDP = global ptr null, align 8 + + +define dso_local i1 @callbool(i1 %x) { +entry: + %0 = load ptr, ptr @pfnbool, align 8 + %call = tail call i1 %0(i1 %x) + ret i1 %call +} + + +define dso_local i8 @callchar(i8 %x) { +entry: + %0 = load ptr, ptr @pfnchar, align 8 + %call = tail call i8 %0(i8 %x) + ret i8 %call +} + + +define dso_local i16 @callshort(i16 %x) { +entry: + %0 = load ptr, ptr @pfnshort, align 8 + %call = tail call i16 %0(i16 %x) + ret i16 %call +} + + +define dso_local i16 @callwchar_t(i16 %x) { +entry: + %0 = load ptr, ptr @pfnwchar_t, align 8 + %call = tail call i16 %0(i16 %x) + ret i16 %call +} + + +define dso_local i32 @callint(i32 %x) { +entry: + %0 = load ptr, ptr @pfnint, align 8 + %call = tail call i32 %0(i32 %x) + ret i32 %call +} + + +define dso_local i64 @calli64(i64 %x) { +entry: + %0 = load ptr, ptr @pfni64, align 8 + %call = tail call i64 %0(i64 %x) + ret i64 %call +} + + +define dso_local float @callfloat(float %x) { +entry: + %0 = load ptr, ptr @pfnfloat, align 8 + %call = tail call float %0(float %x) + ret float %call +} + + +define dso_local double @calldouble(double %x) { +entry: + %0 = load ptr, ptr @pfndouble, align 8 + %call = tail call double %0(double %x) + ret double %call +} + + +define dso_local double @calllongdouble(double %x) { +entry: + %0 = load ptr, ptr @pfnlongdouble, align 8 + %call = tail call double %0(double %x) + ret double %call +} + + +define dso_local ptr @callVOIDP(ptr %x) { +entry: + %0 = load ptr, ptr @pfnVOIDP, align 8 + %call = tail call ptr %0(ptr %x) + ret ptr %call +} +; CHECK-LABEL: callbool: +; CHECK: .seh_proc callbool +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnbool +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$i8$i8) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$i8$i8) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnbool] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callchar: +; CHECK: .seh_proc callchar +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnchar +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$i8$i8.1) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$i8$i8.1) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnchar] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callshort: +; CHECK: .seh_proc callshort +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnshort +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$i8$i8.2) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$i8$i8.2) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnshort] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callwchar_t: +; CHECK: .seh_proc callwchar_t +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnwchar_t +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$i8$i8.3) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$i8$i8.3) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnwchar_t] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callint: +; CHECK: .seh_proc callint +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnint +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$i8$i8.4) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$i8$i8.4) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnint] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: calli64: +; CHECK: .seh_proc calli64 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfni64 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$i8$i8.5) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$i8$i8.5) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfni64] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callfloat: +; CHECK: .seh_proc callfloat +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnfloat +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$f$f) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$f$f) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnfloat] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: calldouble: +; CHECK: .seh_proc calldouble +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfndouble +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$d$d) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$d$d) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfndouble] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: calllongdouble: +; CHECK: .seh_proc calllongdouble +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnlongdouble +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$d$d.6) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$d$d.6) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnlongdouble] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callVOIDP: +; CHECK: .seh_proc callVOIDP +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnVOIDP +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$i8$i8.7) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$i8$i8.7) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnVOIDP] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$i8$i8: +; CHECK: .seh_proc $iexit_thunk$cdecl$i8$i8 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add x29, sp, #48 +; CHECK-NEXT: .seh_add_fp 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: strb w0, [sp, #32] +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: and w0, w8, #0x1 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$i8$i8.1: +; CHECK: .seh_proc $iexit_thunk$cdecl$i8$i8.1 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add x29, sp, #48 +; CHECK-NEXT: .seh_add_fp 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: strb w0, [sp, #32] +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$i8$i8.2: +; CHECK: .seh_proc $iexit_thunk$cdecl$i8$i8.2 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add x29, sp, #48 +; CHECK-NEXT: .seh_add_fp 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: strh w0, [sp, #32] +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$i8$i8.3: +; CHECK: .seh_proc $iexit_thunk$cdecl$i8$i8.3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: stp x29, x30, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add x29, sp, #48 +; CHECK-NEXT: .seh_add_fp 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: strh w0, [sp, #32] +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 48 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$i8$i8.4: +; CHECK: .seh_proc $iexit_thunk$cdecl$i8$i8.4 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$i8$i8.5: +; CHECK: .seh_proc $iexit_thunk$cdecl$i8$i8.5 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$f$f: +; CHECK: .seh_proc $iexit_thunk$cdecl$f$f +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$d$d: +; CHECK: .seh_proc $iexit_thunk$cdecl$d$d +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$d$d.6: +; CHECK: .seh_proc $iexit_thunk$cdecl$d$d.6 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$i8$i8.7: +; CHECK: .seh_proc $iexit_thunk$cdecl$i8$i8.7 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc Index: llvm/test/CodeGen/AArch64/arm64ec-mangle-struct.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/arm64ec-mangle-struct.ll @@ -0,0 +1,1768 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --include-generated-funcs +; RUN: llc -mtriple=aarch64-pc-windows-msvc_arm64ec < %s | FileCheck %s + +%struct.s17 = type { [17 x i8] } +%struct.s32 = type { [32 x i8] } +%struct.s33 = type { [33 x i8] } +%struct.s64 = type { [64 x i8] } +%struct.s65 = type { [65 x i8] } +%struct.s128 = type { [128 x i8] } +%struct.s133 = type { [133 x i8] } +%struct.s192 = type { [192 x i8] } +%struct.s223 = type { [223 x i8] } +%struct.s256 = type { [256 x i8] } +%struct.s257 = type { [257 x i8] } + +@pfnstruct1 = global ptr null, align 8 +@pfnstruct2 = global ptr null, align 8 +@pfnstruct3 = global ptr null, align 8 +@pfnstruct4 = global ptr null, align 8 +@pfnstruct5 = global ptr null, align 8 +@pfnstruct6 = global ptr null, align 8 +@pfnstruct7 = global ptr null, align 8 +@pfnstruct8 = global ptr null, align 8 +@pfnstruct9 = global ptr null, align 8 +@pfnstruct10 = global ptr null, align 8 +@pfnstruct11 = global ptr null, align 8 +@pfnstruct12 = global ptr null, align 8 +@pfnstruct13 = global ptr null, align 8 +@pfnstruct14 = global ptr null, align 8 +@pfnstruct15 = global ptr null, align 8 +@pfnstruct16 = global ptr null, align 8 +@pfnstruct17 = global ptr null, align 8 +@pfnstruct32 = global ptr null, align 8 +@pfnstruct33 = global ptr null, align 8 +@pfnstruct64 = global ptr null, align 8 +@pfnstruct65 = global ptr null, align 8 +@pfnstruct128 = global ptr null, align 8 +@pfnstruct133 = global ptr null, align 8 +@pfnstruct192 = global ptr null, align 8 +@pfnstruct223 = global ptr null, align 8 +@pfnstruct256 = global ptr null, align 8 +@pfnstruct257 = global ptr null, align 8 + + +define dso_local arm64ec_x86sign(1) i8 @callstruct1(i64 arm64ec_x86sign(1) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct1, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 255 + %call = tail call arm64ec_x86sign(1) i8 %0(i64 arm64ec_x86sign(1) %coerce.dive1.coerce.0.insert.ext) #2 + ret i8 %call +} + + +declare void @llvm.memcpy.p0.p0.i64(ptr writeonly, ptr, i64, i1 immarg) + + +define dso_local arm64ec_x86sign(2) i16 @callstruct2(i64 arm64ec_x86sign(2) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct2, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 65535 + %call = tail call arm64ec_x86sign(2) i16 %0(i64 arm64ec_x86sign(2) %coerce.dive1.coerce.0.insert.ext) #2 + ret i16 %call +} + + +define dso_local arm64ec_x86sign(3) i24 @callstruct3(i64 arm64ec_x86sign(3) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct3, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 16777215 + %call = tail call arm64ec_x86sign(3) i24 %0(i64 arm64ec_x86sign(3) %coerce.dive1.coerce.0.insert.ext) #2 + ret i24 %call +} + + +define dso_local arm64ec_x86sign(4) i32 @callstruct4(i64 arm64ec_x86sign(4) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct4, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 4294967295 + %call = tail call arm64ec_x86sign(4) i32 %0(i64 arm64ec_x86sign(4) %coerce.dive1.coerce.0.insert.ext) #2 + ret i32 %call +} + + +define dso_local arm64ec_x86sign(5) i40 @callstruct5(i64 arm64ec_x86sign(5) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct5, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 1099511627775 + %call = tail call arm64ec_x86sign(5) i40 %0(i64 arm64ec_x86sign(5) %coerce.dive1.coerce.0.insert.ext) #2 + ret i40 %call +} + + +define dso_local arm64ec_x86sign(6) i48 @callstruct6(i64 arm64ec_x86sign(6) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct6, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 281474976710655 + %call = tail call arm64ec_x86sign(6) i48 %0(i64 arm64ec_x86sign(6) %coerce.dive1.coerce.0.insert.ext) #2 + ret i48 %call +} + + +define dso_local arm64ec_x86sign(7) i56 @callstruct7(i64 arm64ec_x86sign(7) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct7, align 8 + %coerce.dive1.coerce.0.insert.ext = and i64 %x.coerce, 72057594037927935 + %call = tail call arm64ec_x86sign(7) i56 %0(i64 arm64ec_x86sign(7) %coerce.dive1.coerce.0.insert.ext) #2 + ret i56 %call +} + + +define dso_local arm64ec_x86sign(8) i64 @callstruct8(i64 arm64ec_x86sign(8) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct8, align 8 + %call = tail call arm64ec_x86sign(8) i64 %0(i64 arm64ec_x86sign(8) %x.coerce) #2 + ret i64 %call +} + + +define dso_local arm64ec_x86sign(9) [2 x i64] @callstruct9([2 x i64] arm64ec_x86sign(9) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct9, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 255 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(9) [2 x i64] %0([2 x i64] arm64ec_x86sign(9) %.fca.1.insert6) #2 + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 255 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(10) [2 x i64] @callstruct10([2 x i64] arm64ec_x86sign(10) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct10, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 65535 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(10) [2 x i64] %0([2 x i64] arm64ec_x86sign(10) %.fca.1.insert6) #2 + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 65535 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(11) [2 x i64] @callstruct11([2 x i64] arm64ec_x86sign(11) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct11, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 16777215 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(11) [2 x i64] %0([2 x i64] arm64ec_x86sign(11) %.fca.1.insert6) #2 + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 16777215 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(12) [2 x i64] @callstruct12([2 x i64] arm64ec_x86sign(12) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct12, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 4294967295 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(12) [2 x i64] %0([2 x i64] arm64ec_x86sign(12) %.fca.1.insert6) #2 + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 4294967295 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(13) [2 x i64] @callstruct13([2 x i64] arm64ec_x86sign(13) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct13, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 1099511627775 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(13) [2 x i64] %0([2 x i64] arm64ec_x86sign(13) %.fca.1.insert6) #2 + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 1099511627775 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(14) [2 x i64] @callstruct14([2 x i64] arm64ec_x86sign(14) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct14, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 281474976710655 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(14) [2 x i64] %0([2 x i64] arm64ec_x86sign(14) %.fca.1.insert6) #2 + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 281474976710655 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(15) [2 x i64] @callstruct15([2 x i64] arm64ec_x86sign(15) %x.coerce) { +entry: + %x.coerce.fca.1.extract = extractvalue [2 x i64] %x.coerce, 1 + %0 = load ptr, ptr @pfnstruct15, align 8 + %coerce.dive1.coerce.sroa.2.0.insert.ext = and i64 %x.coerce.fca.1.extract, 72057594037927935 + %.fca.1.insert6 = insertvalue [2 x i64] %x.coerce, i64 %coerce.dive1.coerce.sroa.2.0.insert.ext, 1 + %call = tail call arm64ec_x86sign(15) [2 x i64] %0([2 x i64] arm64ec_x86sign(15) %.fca.1.insert6) #2 + %call.fca.1.extract = extractvalue [2 x i64] %call, 1 + %coerce.dive4.coerce.sroa.2.0.insert.ext = and i64 %call.fca.1.extract, 72057594037927935 + %.fca.1.insert = insertvalue [2 x i64] %call, i64 %coerce.dive4.coerce.sroa.2.0.insert.ext, 1 + ret [2 x i64] %.fca.1.insert +} + + +define dso_local arm64ec_x86sign(16) [2 x i64] @callstruct16([2 x i64] arm64ec_x86sign(16) %x.coerce) { +entry: + %0 = load ptr, ptr @pfnstruct16, align 8 + %call = tail call arm64ec_x86sign(16) [2 x i64] %0([2 x i64] arm64ec_x86sign(16) %x.coerce) #2 + ret [2 x i64] %call +} + + +define dso_local arm64ec_x86sign(17) void @callstruct17(ptr sret(%struct.s17) align 1 %agg.result, ptr arm64ec_x86sign(17) %x) { +entry: + %agg.tmp = alloca %struct.s17, align 1 + %0 = load ptr, ptr @pfnstruct17, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(17) %agg.tmp, ptr align 1 dereferenceable(17) %x, i64 17, i1 false) + call arm64ec_x86sign(17) void %0(ptr sret(%struct.s17) align 1 %agg.result, ptr arm64ec_x86sign(17) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(32) void @callstruct32(ptr sret(%struct.s32) align 1 %agg.result, ptr arm64ec_x86sign(32) %x) { +entry: + %agg.tmp = alloca %struct.s32, align 1 + %0 = load ptr, ptr @pfnstruct32, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(32) %agg.tmp, ptr align 1 dereferenceable(32) %x, i64 32, i1 false) + call arm64ec_x86sign(32) void %0(ptr sret(%struct.s32) align 1 %agg.result, ptr arm64ec_x86sign(32) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(33) void @callstruct33(ptr sret(%struct.s33) align 1 %agg.result, ptr arm64ec_x86sign(33) %x) { +entry: + %agg.tmp = alloca %struct.s33, align 1 + %0 = load ptr, ptr @pfnstruct33, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(33) %agg.tmp, ptr align 1 dereferenceable(33) %x, i64 33, i1 false) + call arm64ec_x86sign(33) void %0(ptr sret(%struct.s33) align 1 %agg.result, ptr arm64ec_x86sign(33) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(64) void @callstruct64(ptr sret(%struct.s64) align 1 %agg.result, ptr arm64ec_x86sign(64) %x) { +entry: + %agg.tmp = alloca %struct.s64, align 1 + %0 = load ptr, ptr @pfnstruct64, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(64) %agg.tmp, ptr align 1 dereferenceable(64) %x, i64 64, i1 false) + call arm64ec_x86sign(64) void %0(ptr sret(%struct.s64) align 1 %agg.result, ptr arm64ec_x86sign(64) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(65) void @callstruct65(ptr sret(%struct.s65) align 1 %agg.result, ptr arm64ec_x86sign(65) %x) { +entry: + %agg.tmp = alloca %struct.s65, align 1 + %0 = load ptr, ptr @pfnstruct65, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(65) %agg.tmp, ptr align 1 dereferenceable(65) %x, i64 65, i1 false) + call arm64ec_x86sign(65) void %0(ptr sret(%struct.s65) align 1 %agg.result, ptr arm64ec_x86sign(65) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(128) void @callstruct128(ptr sret(%struct.s128) align 1 %agg.result, ptr arm64ec_x86sign(128) %x) { +entry: + %agg.tmp = alloca %struct.s128, align 1 + %0 = load ptr, ptr @pfnstruct128, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(128) %agg.tmp, ptr align 1 dereferenceable(128) %x, i64 128, i1 false) + call arm64ec_x86sign(128) void %0(ptr sret(%struct.s128) align 1 %agg.result, ptr arm64ec_x86sign(128) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(133) void @callstruct133(ptr sret(%struct.s133) align 1 %agg.result, ptr arm64ec_x86sign(133) %x) { +entry: + %agg.tmp = alloca %struct.s133, align 1 + %0 = load ptr, ptr @pfnstruct133, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(133) %agg.tmp, ptr align 1 dereferenceable(133) %x, i64 133, i1 false) + call arm64ec_x86sign(133) void %0(ptr sret(%struct.s133) align 1 %agg.result, ptr arm64ec_x86sign(133) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(192) void @callstruct192(ptr sret(%struct.s192) align 1 %agg.result, ptr arm64ec_x86sign(192) %x) { +entry: + %agg.tmp = alloca %struct.s192, align 1 + %0 = load ptr, ptr @pfnstruct192, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(192) %agg.tmp, ptr align 1 dereferenceable(192) %x, i64 192, i1 false) + call arm64ec_x86sign(192) void %0(ptr sret(%struct.s192) align 1 %agg.result, ptr arm64ec_x86sign(192) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(223) void @callstruct223(ptr sret(%struct.s223) align 1 %agg.result, ptr arm64ec_x86sign(223) %x) { +entry: + %agg.tmp = alloca %struct.s223, align 1 + %0 = load ptr, ptr @pfnstruct223, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(223) %agg.tmp, ptr align 1 dereferenceable(223) %x, i64 223, i1 false) + call arm64ec_x86sign(223) void %0(ptr sret(%struct.s223) align 1 %agg.result, ptr arm64ec_x86sign(223) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(256) void @callstruct256(ptr sret(%struct.s256) align 1 %agg.result, ptr arm64ec_x86sign(256) %x) { +entry: + %agg.tmp = alloca %struct.s256, align 1 + %0 = load ptr, ptr @pfnstruct256, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(256) %agg.tmp, ptr align 1 dereferenceable(256) %x, i64 256, i1 false) + call arm64ec_x86sign(256) void %0(ptr sret(%struct.s256) align 1 %agg.result, ptr arm64ec_x86sign(256) %agg.tmp) #2 + ret void +} + + +define dso_local arm64ec_x86sign(257) void @callstruct257(ptr sret(%struct.s257) align 1 %agg.result, ptr arm64ec_x86sign(257) %x) { +entry: + %agg.tmp = alloca %struct.s257, align 1 + %0 = load ptr, ptr @pfnstruct257, align 8 + call void @llvm.memcpy.p0.p0.i64(ptr align 1 dereferenceable(257) %agg.tmp, ptr align 1 dereferenceable(257) %x, i64 257, i1 false) + call arm64ec_x86sign(257) void %0(ptr sret(%struct.s257) align 1 %agg.result, ptr arm64ec_x86sign(257) %agg.tmp) #2 + ret void +} +; CHECK-LABEL: callstruct1: +; CHECK: .seh_proc callstruct1 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct1 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m$m1) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m$m1) +; CHECK-NEXT: and x0, x0, #0xff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct1] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct2: +; CHECK: .seh_proc callstruct2 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct2 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m$m2) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m$m2) +; CHECK-NEXT: and x0, x0, #0xffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct2] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct3: +; CHECK: .seh_proc callstruct3 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct3 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m$m3) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m$m3) +; CHECK-NEXT: and x0, x0, #0xffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct3] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct4: +; CHECK: .seh_proc callstruct4 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct4 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m$m) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m$m) +; CHECK-NEXT: and x0, x0, #0xffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct4] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct5: +; CHECK: .seh_proc callstruct5 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct5 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m5) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m5) +; CHECK-NEXT: and x0, x0, #0xffffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct5] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct6: +; CHECK: .seh_proc callstruct6 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct6 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m6) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m6) +; CHECK-NEXT: and x0, x0, #0xffffffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct6] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct7: +; CHECK: .seh_proc callstruct7 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct7 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m7) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m7) +; CHECK-NEXT: and x0, x0, #0xffffffffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct7] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct8: +; CHECK: .seh_proc callstruct8 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct8 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m8$m8) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m8$m8) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct8] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct9: +; CHECK: .seh_proc callstruct9 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct9 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m12$m9) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m12$m9) +; CHECK-NEXT: and x1, x1, #0xff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct9] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct10: +; CHECK: .seh_proc callstruct10 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct10 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m12$m10) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m12$m10) +; CHECK-NEXT: and x1, x1, #0xffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct10] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct11: +; CHECK: .seh_proc callstruct11 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct11 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m12$m11) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m12$m11) +; CHECK-NEXT: and x1, x1, #0xffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct11] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct12: +; CHECK: .seh_proc callstruct12 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct12 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m12$m12) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m12$m12) +; CHECK-NEXT: and x1, x1, #0xffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct12] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffffffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct13: +; CHECK: .seh_proc callstruct13 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct13 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16$m13) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16$m13) +; CHECK-NEXT: and x1, x1, #0xffffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct13] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffffffffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct14: +; CHECK: .seh_proc callstruct14 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct14 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16$m14) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16$m14) +; CHECK-NEXT: and x1, x1, #0xffffffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct14] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffffffffffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct15: +; CHECK: .seh_proc callstruct15 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct15 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16$m15) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16$m15) +; CHECK-NEXT: and x1, x1, #0xffffffffffffff +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct15] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: and x1, x1, #0xffffffffffffff +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct16: +; CHECK: .seh_proc callstruct16 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, pfnstruct16 +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$m16$m16) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$m16$m16) +; CHECK-NEXT: ldr x11, [x8, :lo12:pfnstruct16] +; CHECK-NEXT: ldr x8, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg_x x30, 16 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: br x11 +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct17: +; CHECK: .seh_proc callstruct17 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct17 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct17] +; CHECK-NEXT: ldrb w9, [x0, #16] +; CHECK-NEXT: str q0, [sp] +; CHECK-NEXT: ldr x12, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8) +; CHECK-NEXT: strb w9, [sp, #16] +; CHECK-NEXT: blr x12 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct32: +; CHECK: .seh_proc callstruct32 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct32 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct32] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.1) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.1) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct33: +; CHECK: .seh_proc callstruct33 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 48 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct33 +; CHECK-NEXT: ldrb w10, [x0, #32] +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: strb w10, [sp, #32] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.2) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.2) +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct33] +; CHECK-NEXT: adrp x9, __os_arm64x_check_icall +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x9, [x9, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 48 +; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: .seh_stackalloc 64 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct64: +; CHECK: .seh_proc callstruct64 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct64 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ldp q2, q3, [x0, #32] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct64] +; CHECK-NEXT: stp q2, q3, [sp, #32] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.3) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.3) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct65: +; CHECK: .seh_proc callstruct65 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: .seh_stackalloc 96 +; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 80 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x9, pfnstruct65 +; CHECK-NEXT: ldrb w10, [x0, #64] +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: adrp x12, __os_arm64x_check_icall +; CHECK-NEXT: strb w10, [sp, #64] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.4) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.4) +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: stp q0, q1, [sp, #32] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct65] +; CHECK-NEXT: stp q3, q2, [sp] +; CHECK-NEXT: ldr x9, [x12, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 80 +; CHECK-NEXT: add sp, sp, #96 +; CHECK-NEXT: .seh_stackalloc 96 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct128: +; CHECK: .seh_proc callstruct128 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #144 +; CHECK-NEXT: .seh_stackalloc 144 +; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 128 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: ldp q0, q1, [x0, #64] +; CHECK-NEXT: adrp x9, pfnstruct128 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q2, q3, [x0, #96] +; CHECK-NEXT: stp q0, q1, [sp, #64] +; CHECK-NEXT: ldp q0, q4, [x0] +; CHECK-NEXT: stp q2, q3, [sp, #96] +; CHECK-NEXT: ldp q1, q2, [x0, #32] +; CHECK-NEXT: stp q0, q4, [sp] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct128] +; CHECK-NEXT: stp q1, q2, [sp, #32] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.5) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.5) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 128 +; CHECK-NEXT: add sp, sp, #144 +; CHECK-NEXT: .seh_stackalloc 144 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct133: +; CHECK: .seh_proc callstruct133 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #160 +; CHECK-NEXT: .seh_stackalloc 160 +; CHECK-NEXT: str x30, [sp, #144] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 144 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: ldp q0, q1, [x0, #96] +; CHECK-NEXT: adrp x9, pfnstruct133 +; CHECK-NEXT: adrp x12, __os_arm64x_check_icall +; CHECK-NEXT: ldp q2, q3, [x0, #32] +; CHECK-NEXT: stp q0, q1, [sp, #96] +; CHECK-NEXT: ldur x10, [x0, #125] +; CHECK-NEXT: stp q2, q3, [sp, #32] +; CHECK-NEXT: ldp q1, q0, [x0, #64] +; CHECK-NEXT: stur x10, [sp, #125] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.6) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.6) +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: stp q1, q0, [sp, #64] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct133] +; CHECK-NEXT: stp q3, q2, [sp] +; CHECK-NEXT: ldr x9, [x12, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #144] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 144 +; CHECK-NEXT: add sp, sp, #160 +; CHECK-NEXT: .seh_stackalloc 160 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct192: +; CHECK: .seh_proc callstruct192 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #208 +; CHECK-NEXT: .seh_stackalloc 208 +; CHECK-NEXT: str x30, [sp, #192] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 192 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: ldp q0, q1, [x0, #128] +; CHECK-NEXT: adrp x9, pfnstruct192 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q2, q3, [x0, #160] +; CHECK-NEXT: stp q0, q1, [sp, #128] +; CHECK-NEXT: ldp q0, q1, [x0, #64] +; CHECK-NEXT: stp q2, q3, [sp, #160] +; CHECK-NEXT: ldp q2, q3, [x0, #96] +; CHECK-NEXT: stp q0, q1, [sp, #64] +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q2, q3, [sp, #96] +; CHECK-NEXT: ldp q4, q2, [x0, #32] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct192] +; CHECK-NEXT: stp q4, q2, [sp, #32] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.7) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.7) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #192] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 192 +; CHECK-NEXT: add sp, sp, #208 +; CHECK-NEXT: .seh_stackalloc 208 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct223: +; CHECK: .seh_proc callstruct223 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #240 +; CHECK-NEXT: .seh_stackalloc 240 +; CHECK-NEXT: str x30, [sp, #224] // 8-byte Folded Spill +; CHECK-NEXT: .seh_save_reg x30, 224 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: ldp q0, q1, [x0, #160] +; CHECK-NEXT: adrp x9, pfnstruct223 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: stp q0, q1, [sp, #160] +; CHECK-NEXT: ldr q2, [x0, #192] +; CHECK-NEXT: ldp q0, q1, [x0, #96] +; CHECK-NEXT: str q2, [sp, #192] +; CHECK-NEXT: ldur q3, [x0, #207] +; CHECK-NEXT: stp q0, q1, [sp, #96] +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: stur q3, [sp, #207] +; CHECK-NEXT: ldp q2, q3, [x0, #128] +; CHECK-NEXT: stp q0, q1, [sp, #32] +; CHECK-NEXT: stp q2, q3, [sp, #128] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct223] +; CHECK-NEXT: ldp q2, q3, [x0, #64] +; CHECK-NEXT: ldp q4, q0, [x0] +; CHECK-NEXT: stp q2, q3, [sp, #64] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.8) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.8) +; CHECK-NEXT: stp q4, q0, [sp] +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldr x30, [sp, #224] // 8-byte Folded Reload +; CHECK-NEXT: .seh_save_reg x30, 224 +; CHECK-NEXT: add sp, sp, #240 +; CHECK-NEXT: .seh_stackalloc 240 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct256: +; CHECK: .seh_proc callstruct256 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #272 +; CHECK-NEXT: .seh_stackalloc 272 +; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 256 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: ldp q0, q1, [x0, #192] +; CHECK-NEXT: adrp x9, pfnstruct256 +; CHECK-NEXT: adrp x10, __os_arm64x_check_icall +; CHECK-NEXT: ldp q2, q3, [x0, #224] +; CHECK-NEXT: stp q0, q1, [sp, #192] +; CHECK-NEXT: ldp q0, q1, [x0, #128] +; CHECK-NEXT: stp q2, q3, [sp, #224] +; CHECK-NEXT: ldp q2, q3, [x0, #160] +; CHECK-NEXT: stp q0, q1, [sp, #128] +; CHECK-NEXT: ldp q0, q1, [x0, #64] +; CHECK-NEXT: stp q2, q3, [sp, #160] +; CHECK-NEXT: ldp q2, q3, [x0, #96] +; CHECK-NEXT: stp q0, q1, [sp, #64] +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q2, q3, [sp, #96] +; CHECK-NEXT: ldp q4, q2, [x0, #32] +; CHECK-NEXT: stp q0, q1, [sp] +; CHECK-NEXT: ldr x11, [x9, :lo12:pfnstruct256] +; CHECK-NEXT: stp q4, q2, [sp, #32] +; CHECK-NEXT: ldr x9, [x10, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.9) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.9) +; CHECK-NEXT: blr x9 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #256] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 256 +; CHECK-NEXT: add sp, sp, #272 +; CHECK-NEXT: .seh_stackalloc 272 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: callstruct257: +; CHECK: .seh_proc callstruct257 +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: sub sp, sp, #304 +; CHECK-NEXT: .seh_stackalloc 304 +; CHECK-NEXT: stp x19, x20, [sp, #272] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_regp x19, 272 +; CHECK-NEXT: stp x29, x30, [sp, #288] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 288 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x19, x8 +; CHECK-NEXT: adrp x8, pfnstruct257 +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: add x0, sp, #8 +; CHECK-NEXT: mov w2, #257 +; CHECK-NEXT: ldr x20, [x8, :lo12:pfnstruct257] +; CHECK-NEXT: bl "#memcpy" +; CHECK-NEXT: adrp x8, __os_arm64x_check_icall +; CHECK-NEXT: adrp x10, ($iexit_thunk$cdecl$v$i8i8.10) +; CHECK-NEXT: add x10, x10, :lo12:($iexit_thunk$cdecl$v$i8i8.10) +; CHECK-NEXT: mov x11, x20 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: add x0, sp, #8 +; CHECK-NEXT: mov x8, x19 +; CHECK-NEXT: blr x11 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 288 +; CHECK-NEXT: ldp x19, x20, [sp, #272] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_regp x19, 272 +; CHECK-NEXT: add sp, sp, #304 +; CHECK-NEXT: .seh_stackalloc 304 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m$m1: +; CHECK: .seh_proc $iexit_thunk$cdecl$m$m1 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m$m2: +; CHECK: .seh_proc $iexit_thunk$cdecl$m$m2 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m$m3: +; CHECK: .seh_proc $iexit_thunk$cdecl$m$m3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m$m: +; CHECK: .seh_proc $iexit_thunk$cdecl$m$m +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m5: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m5 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m6: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m6 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m7: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m7 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m8$m8: +; CHECK: .seh_proc $iexit_thunk$cdecl$m8$m8 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m12$m9: +; CHECK: .seh_proc $iexit_thunk$cdecl$m12$m9 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m12$m10: +; CHECK: .seh_proc $iexit_thunk$cdecl$m12$m10 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m12$m11: +; CHECK: .seh_proc $iexit_thunk$cdecl$m12$m11 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m12$m12: +; CHECK: .seh_proc $iexit_thunk$cdecl$m12$m12 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16$m13: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16$m13 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16$m14: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16$m14 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16$m15: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16$m15 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$m16$m16: +; CHECK: .seh_proc $iexit_thunk$cdecl$m16$m16 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: .seh_add_fp 64 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: stp x0, x1, [sp, #32] +; CHECK-NEXT: sub x0, x29, #16 +; CHECK-NEXT: add x1, sp, #32 +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: ldp x0, x1, [x29, #-16] +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 64 +; CHECK-NEXT: add sp, sp, #80 +; CHECK-NEXT: .seh_stackalloc 80 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.1: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.1 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.2: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.2 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.3: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.4: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.4 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.5: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.5 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.6: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.6 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.7: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.7 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.8: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.8 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.9: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.9 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc +; +; CHECK-LABEL: $iexit_thunk$cdecl$v$i8i8.10: +; CHECK: .seh_proc $iexit_thunk$cdecl$v$i8i8.10 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add x29, sp, #32 +; CHECK-NEXT: .seh_add_fp 32 +; CHECK-NEXT: .seh_endprologue +; CHECK-NEXT: mov x1, x0 +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_call_no_redirect +; CHECK-NEXT: ldr x8, [x8, :lo12:__os_arm64x_dispatch_call_no_redirect] +; CHECK-NEXT: blr x8 +; CHECK-NEXT: .seh_startepilogue +; CHECK-NEXT: ldp x29, x30, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: .seh_save_fplr 32 +; CHECK-NEXT: add sp, sp, #48 +; CHECK-NEXT: .seh_stackalloc 48 +; CHECK-NEXT: .seh_endepilogue +; CHECK-NEXT: ret +; CHECK-NEXT: .seh_endfunclet +; CHECK-NEXT: .seh_endproc