Index: llvm/include/llvm/CodeGen/TargetRegisterInfo.h =================================================================== --- llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -449,6 +449,13 @@ return nullptr; } + /// Return a register mask for the registers preserved by the unwinder, + /// or nullptr if no custom mask is needed. + virtual const uint32_t * + getCustomEHPadPreservedMask(const MachineFunction &MF) const { + return nullptr; + } + /// Return a register mask that clobbers everything. virtual const uint32_t *getNoPreservedMask() const { llvm_unreachable("target does not provide no preserved mask"); Index: llvm/lib/CodeGen/LiveIntervals.cpp =================================================================== --- llvm/lib/CodeGen/LiveIntervals.cpp +++ llvm/lib/CodeGen/LiveIntervals.cpp @@ -225,6 +225,15 @@ RegMaskBits.push_back(Mask); } + // Unwinders may clobber additional registers. + // FIXME: This functionality can possibly be merged into + // MachineBasicBlock::getBeginClobberMask(). + if (MBB.isEHPad()) + if (auto *Mask = TRI->getCustomEHPadPreservedMask(*MBB.getParent())) { + RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB)); + RegMaskBits.push_back(Mask); + } + for (const MachineInstr &MI : MBB) { for (const MachineOperand &MO : MI.operands()) { if (!MO.isRegMask()) Index: llvm/lib/CodeGen/MIRParser/MIRParser.cpp =================================================================== --- llvm/lib/CodeGen/MIRParser/MIRParser.cpp +++ llvm/lib/CodeGen/MIRParser/MIRParser.cpp @@ -634,6 +634,12 @@ // Compute MachineRegisterInfo::UsedPhysRegMask for (const MachineBasicBlock &MBB : MF) { + // Make sure MRI knows about registers clobbered by unwinder. + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (MBB.isEHPad()) + if (auto *RegMask = TRI->getCustomEHPadPreservedMask(MF)) + MRI.addPhysRegsUsedFromRegMask(RegMask); + for (const MachineInstr &MI : MBB) { for (const MachineOperand &MO : MI.operands()) { if (!MO.isRegMask()) Index: llvm/lib/CodeGen/RegAllocFast.cpp =================================================================== --- llvm/lib/CodeGen/RegAllocFast.cpp +++ llvm/lib/CodeGen/RegAllocFast.cpp @@ -1209,6 +1209,11 @@ // Otherwise, sequentially allocate each instruction in the MBB. for (MachineInstr &MI : MBB) { + // Make sure MRI knows about registers clobbered by unwinder. + if (MBB.isEHPad()) + if (auto *RegMask = TRI->getCustomEHPadPreservedMask(*MBB.getParent())) + MRI->addPhysRegsUsedFromRegMask(RegMask); + LLVM_DEBUG( dbgs() << "\n>> " << MI << "Regs:"; dumpState() Index: llvm/lib/CodeGen/VirtRegMap.cpp =================================================================== --- llvm/lib/CodeGen/VirtRegMap.cpp +++ llvm/lib/CodeGen/VirtRegMap.cpp @@ -499,6 +499,12 @@ for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end(); MBBI != MBBE; ++MBBI) { LLVM_DEBUG(MBBI->print(dbgs(), Indexes)); + + // Make sure MRI knows about registers clobbered by unwinder. + if (MBBI->isEHPad()) + if (auto *RegMask = TRI->getCustomEHPadPreservedMask(*MBBI->getParent())) + MRI->addPhysRegsUsedFromRegMask(RegMask); + for (MachineBasicBlock::instr_iterator MII = MBBI->instr_begin(), MIE = MBBI->instr_end(); MII != MIE;) { MachineInstr *MI = &*MII; Index: llvm/lib/Target/AArch64/AArch64RegisterInfo.h =================================================================== --- llvm/lib/Target/AArch64/AArch64RegisterInfo.h +++ llvm/lib/Target/AArch64/AArch64RegisterInfo.h @@ -72,6 +72,10 @@ // Funclets on ARM64 Windows don't preserve any registers. const uint32_t *getNoPreservedMask() const override; + // Unwinders may not preserve all Neon and SVE registers. + const uint32_t * + getCustomEHPadPreservedMask(const MachineFunction &MF) const override; + /// getThisReturnPreservedMask - Returns a call preserved mask specific to the /// case that 'returned' is on an i64 first argument if the calling convention /// is one that can (partially) model this attribute with a preserved mask Index: llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -240,6 +240,14 @@ return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask; } +const uint32_t *AArch64RegisterInfo::getCustomEHPadPreservedMask( + const MachineFunction &MF) const { + if (MF.getSubtarget().isTargetLinux()) + return CSR_AArch64_AAPCS_RegMask; + + return nullptr; +} + const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const { if (TT.isOSDarwin()) return CSR_Darwin_AArch64_TLS_RegMask; Index: llvm/test/CodeGen/AArch64/unwind-preserved.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/unwind-preserved.ll @@ -0,0 +1,357 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --extra_scrub +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -O0 -regalloc=fast < %s | FileCheck %s --check-prefix=FAST + +; Test that z0 is saved/restored, as the unwinder may only retain the low 64bits (d0). +define @invoke_callee_may_throw_sve( %v) personality i8 0 { +; CHECK-LABEL: invoke_callee_may_throw_sve: +; CHECK: .Lfunc_begin0: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-18 +; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 152 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl may_throw_sve +; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: .LBB0_1: // %.Lcontinue +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #18 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB0_2: // %.Lunwind +; CHECK-NEXT: .Ltmp2: +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: b .LBB0_1 +; +; FAST-LABEL: invoke_callee_may_throw_sve: +; FAST: .Lfunc_begin0: +; FAST-NEXT: .cfi_startproc +; FAST-NEXT: // %bb.0: +; FAST-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; FAST-NEXT: addvl sp, sp, #-18 +; FAST-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; FAST-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; FAST-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; FAST-NEXT: addvl sp, sp, #-2 +; FAST-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG +; FAST-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG +; FAST-NEXT: .cfi_offset w30, -8 +; FAST-NEXT: .cfi_offset w29, -16 +; FAST-NEXT: .Ltmp0: +; FAST-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill +; FAST-NEXT: bl may_throw_sve +; FAST-NEXT: .Ltmp1: +; FAST-NEXT: str z0, [sp] // 16-byte Folded Spill +; FAST-NEXT: b .LBB0_1 +; FAST-NEXT: .LBB0_1: // %.Lcontinue +; FAST-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; FAST-NEXT: addvl sp, sp, #2 +; FAST-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; FAST-NEXT: addvl sp, sp, #18 +; FAST-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; FAST-NEXT: ret +; FAST-NEXT: .LBB0_2: // %.Lunwind +; FAST-NEXT: .Ltmp2: +; FAST-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload +; FAST-NEXT: addvl sp, sp, #2 +; FAST-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; FAST-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; FAST-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; FAST-NEXT: addvl sp, sp, #18 +; FAST-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; FAST-NEXT: ret + %result = invoke @may_throw_sve( %v) to label %.Lcontinue unwind label %.Lunwind +.Lcontinue: + ret %result +.Lunwind: + %lp = landingpad { i8*, i32 } cleanup + ret %v; +} + +declare @may_throw_sve( %v); + + +; Test that q0 is saved/restored, as the unwinder may only retain the low 64bits (d0). +define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v) personality i8 0 { +; CHECK-LABEL: invoke_callee_may_throw_neon: +; CHECK: .Lfunc_begin1: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: sub sp, sp, #288 // =288 +; CHECK-NEXT: stp q23, q22, [sp, #16] // 32-byte Folded Spill +; CHECK-NEXT: stp q21, q20, [sp, #48] // 32-byte Folded Spill +; CHECK-NEXT: stp q19, q18, [sp, #80] // 32-byte Folded Spill +; CHECK-NEXT: stp q17, q16, [sp, #112] // 32-byte Folded Spill +; CHECK-NEXT: stp q15, q14, [sp, #144] // 32-byte Folded Spill +; CHECK-NEXT: stp q13, q12, [sp, #176] // 32-byte Folded Spill +; CHECK-NEXT: stp q11, q10, [sp, #208] // 32-byte Folded Spill +; CHECK-NEXT: stp q9, q8, [sp, #240] // 32-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #272] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 288 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_offset b8, -32 +; CHECK-NEXT: .cfi_offset b9, -48 +; CHECK-NEXT: .cfi_offset b10, -64 +; CHECK-NEXT: .cfi_offset b11, -80 +; CHECK-NEXT: .cfi_offset b12, -96 +; CHECK-NEXT: .cfi_offset b13, -112 +; CHECK-NEXT: .cfi_offset b14, -128 +; CHECK-NEXT: .cfi_offset b15, -144 +; CHECK-NEXT: .cfi_offset b16, -160 +; CHECK-NEXT: .cfi_offset b17, -176 +; CHECK-NEXT: .cfi_offset b18, -192 +; CHECK-NEXT: .cfi_offset b19, -208 +; CHECK-NEXT: .cfi_offset b20, -224 +; CHECK-NEXT: .cfi_offset b21, -240 +; CHECK-NEXT: .cfi_offset b22, -256 +; CHECK-NEXT: .cfi_offset b23, -272 +; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: bl may_throw_neon +; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .LBB1_1: // %.Lcontinue +; CHECK-NEXT: ldp x29, x30, [sp, #272] // 16-byte Folded Reload +; CHECK-NEXT: ldp q9, q8, [sp, #240] // 32-byte Folded Reload +; CHECK-NEXT: ldp q11, q10, [sp, #208] // 32-byte Folded Reload +; CHECK-NEXT: ldp q13, q12, [sp, #176] // 32-byte Folded Reload +; CHECK-NEXT: ldp q15, q14, [sp, #144] // 32-byte Folded Reload +; CHECK-NEXT: ldp q17, q16, [sp, #112] // 32-byte Folded Reload +; CHECK-NEXT: ldp q19, q18, [sp, #80] // 32-byte Folded Reload +; CHECK-NEXT: ldp q21, q20, [sp, #48] // 32-byte Folded Reload +; CHECK-NEXT: ldp q23, q22, [sp, #16] // 32-byte Folded Reload +; CHECK-NEXT: add sp, sp, #288 // =288 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB1_2: // %.Lunwind +; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: b .LBB1_1 +; +; FAST-LABEL: invoke_callee_may_throw_neon: +; FAST: .Lfunc_begin1: +; FAST-NEXT: .cfi_startproc +; FAST-NEXT: // %bb.0: +; FAST-NEXT: sub sp, sp, #304 // =304 +; FAST-NEXT: stp q23, q22, [sp, #32] // 32-byte Folded Spill +; FAST-NEXT: stp q21, q20, [sp, #64] // 32-byte Folded Spill +; FAST-NEXT: stp q19, q18, [sp, #96] // 32-byte Folded Spill +; FAST-NEXT: stp q17, q16, [sp, #128] // 32-byte Folded Spill +; FAST-NEXT: stp q15, q14, [sp, #160] // 32-byte Folded Spill +; FAST-NEXT: stp q13, q12, [sp, #192] // 32-byte Folded Spill +; FAST-NEXT: stp q11, q10, [sp, #224] // 32-byte Folded Spill +; FAST-NEXT: stp q9, q8, [sp, #256] // 32-byte Folded Spill +; FAST-NEXT: stp x29, x30, [sp, #288] // 16-byte Folded Spill +; FAST-NEXT: .cfi_def_cfa_offset 304 +; FAST-NEXT: .cfi_offset w30, -8 +; FAST-NEXT: .cfi_offset w29, -16 +; FAST-NEXT: .cfi_offset b8, -32 +; FAST-NEXT: .cfi_offset b9, -48 +; FAST-NEXT: .cfi_offset b10, -64 +; FAST-NEXT: .cfi_offset b11, -80 +; FAST-NEXT: .cfi_offset b12, -96 +; FAST-NEXT: .cfi_offset b13, -112 +; FAST-NEXT: .cfi_offset b14, -128 +; FAST-NEXT: .cfi_offset b15, -144 +; FAST-NEXT: .cfi_offset b16, -160 +; FAST-NEXT: .cfi_offset b17, -176 +; FAST-NEXT: .cfi_offset b18, -192 +; FAST-NEXT: .cfi_offset b19, -208 +; FAST-NEXT: .cfi_offset b20, -224 +; FAST-NEXT: .cfi_offset b21, -240 +; FAST-NEXT: .cfi_offset b22, -256 +; FAST-NEXT: .cfi_offset b23, -272 +; FAST-NEXT: .Ltmp3: +; FAST-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; FAST-NEXT: bl may_throw_neon +; FAST-NEXT: .Ltmp4: +; FAST-NEXT: str q0, [sp] // 16-byte Folded Spill +; FAST-NEXT: b .LBB1_1 +; FAST-NEXT: .LBB1_1: // %.Lcontinue +; FAST-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; FAST-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload +; FAST-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload +; FAST-NEXT: ldp q11, q10, [sp, #224] // 32-byte Folded Reload +; FAST-NEXT: ldp q13, q12, [sp, #192] // 32-byte Folded Reload +; FAST-NEXT: ldp q15, q14, [sp, #160] // 32-byte Folded Reload +; FAST-NEXT: ldp q17, q16, [sp, #128] // 32-byte Folded Reload +; FAST-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload +; FAST-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload +; FAST-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload +; FAST-NEXT: add sp, sp, #304 // =304 +; FAST-NEXT: ret +; FAST-NEXT: .LBB1_2: // %.Lunwind +; FAST-NEXT: .Ltmp5: +; FAST-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; FAST-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload +; FAST-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload +; FAST-NEXT: ldp q11, q10, [sp, #224] // 32-byte Folded Reload +; FAST-NEXT: ldp q13, q12, [sp, #192] // 32-byte Folded Reload +; FAST-NEXT: ldp q15, q14, [sp, #160] // 32-byte Folded Reload +; FAST-NEXT: ldp q17, q16, [sp, #128] // 32-byte Folded Reload +; FAST-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload +; FAST-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload +; FAST-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload +; FAST-NEXT: add sp, sp, #304 // =304 +; FAST-NEXT: ret + %result = invoke aarch64_vector_pcs <4 x i32> @may_throw_neon(<4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind +.Lcontinue: + ret <4 x i32> %result +.Lunwind: + %lp = landingpad { i8*, i32 } cleanup + ret <4 x i32> %v; +} + +declare aarch64_vector_pcs <4 x i32> @may_throw_neon(<4 x i32> %v);