Index: include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- include/llvm/IR/IntrinsicsAArch64.td +++ include/llvm/IR/IntrinsicsAArch64.td @@ -33,6 +33,14 @@ LLVMMatchType<0>], [IntrNoMem]>; //===----------------------------------------------------------------------===// +// SEH intrinsics for Windows +// Given a pointer to the end of an EH registration object, returns the true +// parent frame address that can be used with llvm.localrecover. +def int_aarch64_seh_recoverfp : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty, llvm_ptr_ty], + [IntrNoMem]>; + +//===----------------------------------------------------------------------===// // HINT def int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>; Index: lib/Target/AArch64/AArch64AsmPrinter.cpp =================================================================== --- lib/Target/AArch64/AArch64AsmPrinter.cpp +++ lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -563,6 +563,20 @@ switch (MI->getOpcode()) { default: break; + case AArch64::MOVMCSym: { + MCSymbol *Sym = MI->getOperand(1).getMCSymbol(); + const MCExpr *Expr = MCSymbolRefExpr::create(Sym, OutContext); + + MCInst TmpInst; + TmpInst.setOpcode(AArch64::MOVKXi); + TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); + TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); + TmpInst.addOperand(MCOperand::createExpr(Expr)); + TmpInst.addOperand(MCOperand::createImm(0)); // unused shift. + EmitToStreamer(*OutStreamer, TmpInst); + return; + } + case AArch64::MOVIv2d_ns: // If the target has , lower this // instruction to movi.16b instead. Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -37,6 +37,7 @@ #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/SelectionDAG.h" @@ -2710,6 +2711,32 @@ return DAG.getNode(AArch64ISD::UZP2, DL, VT, Mull, Mull2); } +static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn, + SDValue EntryFP) { + MachineFunction &MF = DAG.getMachineFunction(); + SDLoc dl; + + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); + + // It's possible that the parent function no longer has a personality function + // if the exceptional code was optimized away, in which case we just return + // the incoming FP. + if (!Fn->hasPersonalityFn()) + return EntryFP; + + // Get an MCSymbol that will ultimately resolve to the frame offset of the EH + // registration, or the .set_setframe offset. + MCSymbol *OffsetSym = + MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol( + GlobalValue::dropLLVMManglingEscape(Fn->getName())); + SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT); + SDValue ParentFrameOffset = + DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal); + + return DAG.getNode(ISD::ADD, dl, PtrVT, EntryFP, ParentFrameOffset); +} + SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); @@ -2735,6 +2762,17 @@ case Intrinsic::aarch64_neon_umin: return DAG.getNode(ISD::UMIN, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); + + case Intrinsic::aarch64_seh_recoverfp: { + SDValue FnOp = Op.getOperand(1); + SDValue IncomingFPOp = Op.getOperand(2); + GlobalAddressSDNode *GSD = dyn_cast(FnOp); + auto *Fn = dyn_cast_or_null(GSD ? GSD->getGlobal() : nullptr); + if (!Fn) + report_fatal_error( + "llvm.aarch64.seh.recoverfp must take a function as the first argument"); + return recoverFramePointer(DAG, Fn, IncomingFPOp); + } } } Index: lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.td +++ lib/Target/AArch64/AArch64InstrInfo.td @@ -347,6 +347,10 @@ def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>; def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>; +def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER", + SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, + SDTCisInt<1>]>>; + //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// @@ -6661,5 +6665,8 @@ def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)), (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>; +def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>; +def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>; + include "AArch64InstrAtomics.td" include "AArch64SVEInstrInfo.td" Index: lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.cpp +++ lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -455,6 +455,13 @@ // Modify MI as necessary to handle as much of 'Offset' as possible Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg); + + if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) { + MachineOperand &FI = MI.getOperand(FIOperandNum); + FI.ChangeToImmediate(Offset); + return; + } + if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII)) return; Index: lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp =================================================================== --- lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -216,9 +216,8 @@ Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned"); return Value >> 4; case AArch64::fixup_aarch64_movw: - Ctx.reportError(Fixup.getLoc(), - "no resolvable MOVZ/MOVK fixups supported yet"); - return Value; + // 16-bit immediate. Assumed unsigned. + return Value & 0xffff; case AArch64::fixup_aarch64_pcrel_branch14: // Signed 16-bit immediate if (SignedValue > 32767 || SignedValue < -32768) Index: test/CodeGen/AArch64/seh-try-except.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/seh-try-except.ll @@ -0,0 +1,46 @@ +; RUN: llc -mtriple=arm64-windows -verify-machineinstrs < %s | FileCheck %s + +; Generated from C program: +; void foo() { +; int x = 0; +; __try {} +; __except(x = 0) {} +; } + +declare i8* @llvm.aarch64.seh.recoverfp(i8*, i8*) +declare i8* @llvm.localrecover(i8*, i8*, i32) +declare void @llvm.localescape(...) + +define dso_local void @foo() { +entry: + %x = alloca i32, align 4 + %__exception_code = alloca i32, align 4 + call void (...) @llvm.localescape(i32* %x) + store i32 0, i32* %x, align 4 + ret void +} + +; CHECK-LABEL: foo +; CHECK: .set .Lfoo$frame_escape_0, 12 + +define internal i32 @"?filt$0@0@foo@@"(i8* %exception_pointers, i8* %frame_pointer) { +entry: + %frame_pointer.addr = alloca i8*, align 8 + %exception_pointers.addr = alloca i8*, align 8 + %0 = call i8* @llvm.aarch64.seh.recoverfp(i8* bitcast (void ()* @foo to i8*), i8* %frame_pointer) + %1 = call i8* @llvm.localrecover(i8* bitcast (void ()* @foo to i8*), i8* %0, i32 0) + %x = bitcast i8* %1 to i32* + %__exception_code = alloca i32, align 4 + store i8* %frame_pointer, i8** %frame_pointer.addr, align 8 + store i8* %exception_pointers, i8** %exception_pointers.addr, align 8 + %2 = bitcast i8* %exception_pointers to { i32*, i8* }* + %3 = getelementptr inbounds { i32*, i8* }, { i32*, i8* }* %2, i32 0, i32 0 + %4 = load i32*, i32** %3, align 8 + %5 = load i32, i32* %4, align 4 + store i32 %5, i32* %__exception_code, align 4 + store i32 0, i32* %x, align 4 + ret i32 0 +} + +; CHECK-LABEL: "?filt$0@0@foo@@" +; CHECK: movk x9, #.Lfoo$frame_escape_0