Index: lib/Target/AArch64/AArch64CallingConvention.td =================================================================== --- lib/Target/AArch64/AArch64CallingConvention.td +++ lib/Target/AArch64/AArch64CallingConvention.td @@ -86,6 +86,8 @@ CCIfType<[v2f32], CCBitConvertToType>, CCIfType<[v2f64, v4f32], CCBitConvertToType>, + CCIfSwiftError>>, + // Big endian vectors must be passed as if they were 1-element vectors so that // their lanes are in a consistent order. CCIfBigEndian>>, + // A SwiftError is passed in X19. + CCIfSwiftError>>, + CCIfConsecutiveRegs>, // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, @@ -273,6 +278,11 @@ // case) def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>; +def CSR_AArch64_AAPCS_SwiftError : CalleeSavedRegs<(add LR, FP, X20, X21, X22, + X23, X24, X25, X26, X27, X28, + D8, D9, D10, D11, + D12, D13, D14, D15)>; + // The function used by Darwin to obtain the address of a thread-local variable // guarantees more than a normal AAPCS function. x16 and x17 are used on the // fast path for calculation, but other registers except X0 (argument/return) Index: lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- lib/Target/AArch64/AArch64FastISel.cpp +++ lib/Target/AArch64/AArch64FastISel.cpp @@ -1900,6 +1900,17 @@ cast(I)->isAtomic()) return false; + const Value *SV = I->getOperand(0); + if (const Argument *Arg = dyn_cast(SV)) { + if (Arg->hasSwiftErrorAttr() && TLI.supportSwiftError()) + return false; + } + + if (const AllocaInst *Alloca = dyn_cast(SV)) { + if (Alloca->isSwiftError() && TLI.supportSwiftError()) + return false; + } + // See if we can handle this address. Address Addr; if (!computeAddress(I->getOperand(0), Addr, I->getType())) @@ -2064,6 +2075,17 @@ cast(I)->isAtomic()) return false; + const Value *PtrV = I->getOperand(1); + if (const Argument *Arg = dyn_cast(PtrV)) { + if (Arg->hasSwiftErrorAttr() && TLI.supportSwiftError()) + return false; + } + + if (const AllocaInst *Alloca = dyn_cast(PtrV)) { + if (Alloca->isSwiftError() && TLI.supportSwiftError()) + return false; + } + // Get the value to be stored into a register. Use the zero register directly // when possible to avoid an unnecessary copy and a wasted register. unsigned SrcReg = 0; @@ -2810,6 +2832,7 @@ F->getAttributes().hasAttribute(Idx, Attribute::InReg) || F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) || + F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) || F->getAttributes().hasAttribute(Idx, Attribute::Nest)) return false; @@ -3062,7 +3085,7 @@ for (auto Flag : CLI.OutFlags) if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal() || - Flag.isSwiftSelf()) + Flag.isSwiftSelf() || Flag.isSwiftError()) return false; // Set up the argument vectors. @@ -3644,6 +3667,10 @@ if (F.isVarArg()) return false; + if (F.getAttributes().hasAttrSomewhere(Attribute::SwiftError) && + TLI.supportSwiftError()) + return false; + if (TLI.supportSplitCSR(FuncInfo.MF)) return false; Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -706,6 +706,14 @@ return getKillRegState(LRKill); } +static bool produceCompactUnwindFrame(MachineFunction &MF) { + const AArch64Subtarget &Subtarget = MF.getSubtarget(); + AttributeSet Attrs = MF.getFunction()->getAttributes(); + return Subtarget.isTargetMachO() && + !Attrs.hasAttrSomewhere(Attribute::SwiftError); +} + + struct RegPairInfo { RegPairInfo() : Reg1(AArch64::NoRegister), Reg2(AArch64::NoRegister) {} unsigned Reg1; @@ -730,7 +738,7 @@ (void)CC; // MachO's compact unwind format relies on all registers being stored in // pairs. - assert((!MF.getSubtarget().isTargetMachO() || + assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || (Count & 1) == 0) && "Odd number of callee-saved regs to spill!"); @@ -764,7 +772,7 @@ // MachO's compact unwind format relies on all registers being stored in // adjacent register pairs. - assert((!MF.getSubtarget().isTargetMachO() || + assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || (RPI.isPaired() && ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || @@ -954,7 +962,6 @@ const AArch64RegisterInfo *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); AArch64FunctionInfo *AFI = MF.getInfo(); - const AArch64Subtarget &Subtarget = MF.getSubtarget(); unsigned UnspilledCSGPR = AArch64::NoRegister; unsigned UnspilledCSGPRPaired = AArch64::NoRegister; @@ -992,7 +999,7 @@ // MachO's compact unwind format relies on all registers being stored in // pairs. // FIXME: the usual format is actually better if unwinding isn't needed. - if (Subtarget.isTargetMachO() && !SavedRegs.test(PairedReg)) { + if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg)) { SavedRegs.set(PairedReg); ExtraCSSpill = true; } @@ -1035,7 +1042,7 @@ // MachO's compact unwind format relies on all registers being stored in // pairs, so if we need to spill one extra for BigStack, then we need to // store the pair. - if (Subtarget.isTargetMachO()) + if (produceCompactUnwindFrame(MF)) SavedRegs.set(UnspilledCSGPRPaired); ExtraCSSpill = true; NumRegsSpilled = SavedRegs.count(); Index: lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.h +++ lib/Target/AArch64/AArch64ISelLowering.h @@ -407,6 +407,10 @@ void addDRTypeForNEON(MVT VT); void addQRTypeForNEON(MVT VT); + bool supportSwiftError() const override { + return true; + } + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, SDLoc DL, Index: lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.cpp +++ lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -51,6 +51,9 @@ return MF->getInfo()->isSplitCSR() ? CSR_AArch64_CXX_TLS_Darwin_PE_SaveList : CSR_AArch64_CXX_TLS_Darwin_SaveList; + if (MF->getFunction()->getAttributes().hasAttrSomewhere( + Attribute::SwiftError)) + return CSR_AArch64_AAPCS_SwiftError_SaveList; if (MF->getFunction()->getCallingConv() == CallingConv::PreserveMost) return CSR_AArch64_RT_MostRegs_SaveList; else @@ -76,6 +79,8 @@ return CSR_AArch64_AllRegs_RegMask; if (CC == CallingConv::CXX_FAST_TLS) return CSR_AArch64_CXX_TLS_Darwin_RegMask; + if (MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) + return CSR_AArch64_AAPCS_SwiftError_RegMask; if (CC == CallingConv::PreserveMost) return CSR_AArch64_RT_MostRegs_RegMask; else Index: lib/Target/ARM/ARMBaseRegisterInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -87,6 +87,10 @@ } } + if (STI.isTargetDarwin() && + F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) + return CSR_iOS_SwiftError_SaveList; + if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS) return MF->getInfo()->isSplitCSR() ? CSR_iOS_CXX_TLS_PE_SaveList @@ -110,6 +114,11 @@ if (CC == CallingConv::GHC) // This is academic becase all GHC calls are (supposed to be) tail calls return CSR_NoRegs_RegMask; + + if (STI.isTargetDarwin() && + MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) + return CSR_iOS_SwiftError_RegMask; + if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) return CSR_iOS_CXX_TLS_RegMask; return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; Index: lib/Target/ARM/ARMCallingConv.td =================================================================== --- lib/Target/ARM/ARMCallingConv.td +++ lib/Target/ARM/ARMCallingConv.td @@ -26,6 +26,9 @@ // A SwiftSelf is passed in R9. CCIfSwiftSelf>>, + // An SwiftError is passed in R6. + CCIfSwiftError>>, + // Handle all vector types as either f64 or v2f64. CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType>, CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType>, @@ -45,6 +48,9 @@ CCIfType<[i1, i8, i16], CCPromoteToType>, CCIfType<[f32], CCBitConvertToType>, + // An SwiftError is returned in R6. + CCIfSwiftError>>, + // Handle all vector types as either f64 or v2f64. CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType>, CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType>, @@ -157,6 +163,9 @@ // A SwiftSelf is passed in R9. CCIfSwiftSelf>>, + // An SwiftError is passed in R6. + CCIfSwiftError>>, + CCIfType<[f64, v2f64], CCCustom<"CC_ARM_AAPCS_Custom_f64">>, CCIfType<[f32], CCBitConvertToType>, CCDelegateTo @@ -167,6 +176,9 @@ CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType>, CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType>, + // An SwiftError is returned in R6. + CCIfSwiftError>>, + CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_AAPCS_Custom_f64">>, CCIfType<[f32], CCBitConvertToType>, CCDelegateTo @@ -188,6 +200,9 @@ // A SwiftSelf is passed in R9. CCIfSwiftSelf>>, + // An SwiftError is passed in R6. + CCIfSwiftError>>, + // HFAs are passed in a contiguous block of registers, or on the stack CCIfConsecutiveRegs>, @@ -203,6 +218,9 @@ CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType>, CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType>, + // An SwiftError is returned in R6. + CCIfSwiftError>>, + CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8, @@ -231,6 +249,9 @@ // Also save R7-R4 first to match the stack frame fixed spill areas. def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>; +// R6 is used to pass swifterror, remove it from CSR. +def CSR_iOS_SwiftError : CalleeSavedRegs<(sub CSR_iOS, R6)>; + def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS_ThisReturn, R9))>; Index: lib/Target/ARM/ARMFastISel.cpp =================================================================== --- lib/Target/ARM/ARMFastISel.cpp +++ lib/Target/ARM/ARMFastISel.cpp @@ -1062,6 +1062,17 @@ if (cast(I)->isAtomic()) return false; + const Value *SV = I->getOperand(0); + if (const Argument *Arg = dyn_cast(SV)) { + if (Arg->hasSwiftErrorAttr() && TLI.supportSwiftError()) + return false; + } + + if (const AllocaInst *Alloca = dyn_cast(SV)) { + if (Alloca->isSwiftError() && TLI.supportSwiftError()) + return false; + } + // Verify we have a legal type before going any further. MVT VT; if (!isLoadTypeLegal(I->getType(), VT)) @@ -1177,6 +1188,17 @@ if (cast(I)->isAtomic()) return false; + const Value *PtrV = I->getOperand(1); + if (const Argument *Arg = dyn_cast(PtrV)) { + if (Arg->hasSwiftErrorAttr() && TLI.supportSwiftError()) + return false; + } + + if (const AllocaInst *Alloca = dyn_cast(PtrV)) { + if (Alloca->isSwiftError() && TLI.supportSwiftError()) + return false; + } + // Verify we have a legal type before going any further. MVT VT; if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) @@ -2084,6 +2106,10 @@ if (!FuncInfo.CanLowerReturn) return false; + if (F.getAttributes().hasAttrSomewhere(Attribute::SwiftError) && + TLI.supportSwiftError()) + return false; + if (TLI.supportSplitCSR(FuncInfo.MF)) return false; @@ -2346,6 +2372,7 @@ if (CS.paramHasAttr(AttrInd, Attribute::InReg) || CS.paramHasAttr(AttrInd, Attribute::StructRet) || CS.paramHasAttr(AttrInd, Attribute::SwiftSelf) || + CS.paramHasAttr(AttrInd, Attribute::SwiftError) || CS.paramHasAttr(AttrInd, Attribute::Nest) || CS.paramHasAttr(AttrInd, Attribute::ByVal)) return false; @@ -3021,6 +3048,7 @@ if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) || F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) || + F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) || F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) return false; Index: lib/Target/ARM/ARMISelLowering.h =================================================================== --- lib/Target/ARM/ARMISelLowering.h +++ lib/Target/ARM/ARMISelLowering.h @@ -587,6 +587,10 @@ SmallVectorImpl &InVals, bool isThisReturn, SDValue ThisVal) const; + bool supportSwiftError() const override { + return true; + } + bool supportSplitCSR(MachineFunction *MF) const override { return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS && MF->getFunction()->hasFnAttribute(Attribute::NoUnwind); Index: lib/Target/X86/X86CallingConv.td =================================================================== --- lib/Target/X86/X86CallingConv.td +++ lib/Target/X86/X86CallingConv.td @@ -162,6 +162,9 @@ // MMX vector types are always returned in XMM0. CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>, + + CCIfSwiftError>>, + CCDelegateTo ]>; @@ -276,6 +279,9 @@ // A SwiftSelf is passed in R10. CCIfSwiftSelf>>, + // An SwiftError is passed in R12. + CCIfSwiftError>>, + // The first 6 integer arguments are passed in integer registers. CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>, CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>, @@ -824,6 +830,8 @@ def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>; def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>; +def CSR_64_SwiftError : CalleeSavedRegs<(add RBX, R13, R14, R15, RBP)>; + def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>; def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>; Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -972,6 +972,17 @@ if (S->isAtomic()) return false; + const Value *PtrV = I->getOperand(1); + if (const Argument *Arg = dyn_cast(PtrV)) { + if (Arg->hasSwiftErrorAttr() && TLI.supportSwiftError()) + return false; + } + + if (const AllocaInst *Alloca = dyn_cast(PtrV)) { + if (Alloca->isSwiftError() && TLI.supportSwiftError()) + return false; + } + const Value *Val = S->getValueOperand(); const Value *Ptr = S->getPointerOperand(); @@ -1002,6 +1013,10 @@ if (!FuncInfo.CanLowerReturn) return false; + if (F.getAttributes().hasAttrSomewhere(Attribute::SwiftError) && + TLI.supportSwiftError()) + return false; + if (TLI.supportSplitCSR(FuncInfo.MF)) return false; @@ -1133,6 +1148,17 @@ if (LI->isAtomic()) return false; + const Value *SV = I->getOperand(0); + if (const Argument *Arg = dyn_cast(SV)) { + if (Arg->hasSwiftErrorAttr() && TLI.supportSwiftError()) + return false; + } + + if (const AllocaInst *Alloca = dyn_cast(SV)) { + if (Alloca->isSwiftError() && TLI.supportSwiftError()) + return false; + } + MVT VT; if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true)) return false; @@ -2745,6 +2771,7 @@ F->getAttributes().hasAttribute(Idx, Attribute::InReg) || F->getAttributes().hasAttribute(Idx, Attribute::StructRet) || F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) || + F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) || F->getAttributes().hasAttribute(Idx, Attribute::Nest)) return false; @@ -2876,6 +2903,10 @@ if (CLI.CS && CLI.CS->hasInAllocaArgument()) return false; + for (auto Flag : CLI.OutFlags) + if (Flag.isSwiftError()) + return false; + // Fast-isel doesn't know about callee-pop yet. if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg, TM.Options.GuaranteedTailCallOpt)) Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -1083,6 +1083,10 @@ SDValue LowerGC_TRANSITION_START(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGC_TRANSITION_END(SDValue Op, SelectionDAG &DAG) const; + bool supportSwiftError() const override { + return true; + } + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -2340,7 +2340,7 @@ // false, then an sret argument may be implicitly inserted in the SelDAG. In // either case FuncInfo->setSRetReturnReg() will have been called. if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) { - SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, + SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg, getPointerTy(MF.getDataLayout())); unsigned RetValReg Index: lib/Target/X86/X86RegisterInfo.cpp =================================================================== --- lib/Target/X86/X86RegisterInfo.cpp +++ lib/Target/X86/X86RegisterInfo.cpp @@ -299,6 +299,9 @@ return CSR_Win64_SaveList; if (CallsEHReturn) return CSR_64EHRet_SaveList; + if (MF->getFunction()->getAttributes().hasAttrSomewhere( + Attribute::SwiftError)) + return CSR_64_SwiftError_SaveList; return CSR_64_SaveList; } if (CallsEHReturn) @@ -385,6 +388,9 @@ if (Is64Bit) { if (IsWin64) return CSR_Win64_RegMask; + if (MF.getFunction()->getAttributes().hasAttrSomewhere( + Attribute::SwiftError)) + return CSR_64_SwiftError_RegMask; return CSR_64_RegMask; } return CSR_32_RegMask; Index: test/CodeGen/AArch64/swifterror.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/swifterror.ll @@ -0,0 +1,371 @@ +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-apple-ios -disable-post-ra | FileCheck --check-prefix=CHECK-APPLE %s +; RUN: llc -verify-machineinstrs -O0 < %s -mtriple=aarch64-apple-ios -disable-post-ra | FileCheck --check-prefix=CHECK-O0 %s + +declare i8* @malloc(i64) +declare void @free(i8*) +%swift_error = type {i64, i8} + +define float @foo(%swift_error** swifterror %error_ptr_ref) { +; CHECK-APPLE-LABEL: foo: +; CHECK-APPLE: orr w0, wzr, #0x10 +; CHECK-APPLE: malloc +; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1 +; CHECK-APPLE: strb [[ID]], [x0, #8] +; CHECK-APPLE: mov x19, x0 +; CHECK-APPLE-NOT: x19 + +; CHECK-O0-LABEL: foo: +; CHECK-O0: orr w{{.*}}, wzr, #0x10 +; CHECK-O0: malloc +; CHECK-O0: mov [[ID2:x[0-9]+]], x0 +; CHECK-O0: orr [[ID:w[0-9]+]], wzr, #0x1 +; CHECK-O0: strb [[ID]], [x0, #8] +; CHECK-O0: mov x19, [[ID2]] +; CHECK-O0-NOT: x19 +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + ret float 1.0 +} + +define float @caller(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller: +; CHECK-APPLE: mov [[ID:x[0-9]+]], x0 +; CHECK-APPLE: mov x19, xzr +; CHECK-APPLE: bl {{.*}}foo +; CHECK-APPLE: cbnz x19 +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrb [[CODE:w[0-9]+]], [x19, #8] +; CHECK-APPLE: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov x0, x19 +; CHECK_APPLE: bl {{.*}}free + +; CHECK-O0-LABEL: caller: +; CHECK-O0: mov x19 +; CHECK-O0: bl {{.*}}foo +; CHECK-O0: mov [[ID:x[0-9]+]], x19 +; CHECK-O0: cbnz [[ID]] +entry: + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + %call = call float @foo(%swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +define float @caller2(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller2: +; CHECK-APPLE: mov [[ID:x[0-9]+]], x0 +; CHECK-APPLE: fmov [[CMP:s[0-9]+]], #1.0 +; CHECK-APPLE: mov x19, xzr +; CHECK-APPLE: bl {{.*}}foo +; CHECK-APPLE: cbnz x19 +; CHECK-APPLE: fcmp s0, [[CMP]] +; CHECK-APPLE: b.le +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrb [[CODE:w[0-9]+]], [x19, #8] +; CHECK-APPLE: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov x0, x19 +; CHECK_APPLE: bl {{.*}}free + +; CHECK-O0-LABEL: caller2: +; CHECK-O0: mov x19 +; CHECK-O0: bl {{.*}}foo +; CHECK-O0: mov [[ID:x[0-9]+]], x19 +; CHECK-O0: cbnz [[ID]] +entry: + %error_ptr_ref = alloca swifterror %swift_error* + br label %bb_loop +bb_loop: + store %swift_error* null, %swift_error** %error_ptr_ref + %call = call float @foo(%swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %cmp = fcmp ogt float %call, 1.000000e+00 + br i1 %cmp, label %bb_end, label %bb_loop +bb_end: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) { +; CHECK-APPLE-LABEL: foo_if: +; CHECK-APPLE: cbz w0 +; CHECK-APPLE: orr w0, wzr, #0x10 +; CHECK-APPLE: malloc +; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1 +; CHECK-APPLE: strb [[ID]], [x0, #8] +; CHECK-APPLE: mov x19, x0 +; CHECK-APPLE-NOT: x19 +; CHECK-APPLE: ret + +; CHECK-O0-LABEL: foo_if: +; spill x19 +; CHECK-O0: str x19 +; CHECK-O0: cbz w0 +; CHECK-O0: orr w{{.*}}, wzr, #0x10 +; CHECK-O0: malloc +; CHECK-O0: mov [[ID:x[0-9]+]], x0 +; CHECK-O0: orr [[ID2:w[0-9]+]], wzr, #0x1 +; CHECK-O0: strb [[ID2]], [x0, #8] +; CHECK-O0: mov x19, [[ID]] +; CHECK-O0: ret +; reload from stack +; CHECK-O0: ldr x19 +; CHECK-O0: ret +entry: + %cond = icmp ne i32 %cc, 0 + br i1 %cond, label %gen_error, label %normal + +gen_error: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + ret float 1.0 + +normal: + ret float 0.0 +} + +define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) { +; CHECK-APPLE-LABEL: foo_loop: +; CHECK-APPLE: mov x0, x19 +; CHECK-APPLE: cbz +; CHECK-APPLE: orr w0, wzr, #0x10 +; CHECK-APPLE: malloc +; CHECK-APPLE: strb w{{.*}}, [x0, #8] +; CHECK-APPLE: fcmp +; CHECK-APPLE: b.le +; CHECK-APPLE: mov x19, x0 +; CHECK-APPLE: ret + +; CHECK-O0-LABEL: foo_loop: +; spill x19 +; CHECK-O0: str x19 +; CHECk-O0: cbz +; CHECK-O0: orr w{{.*}}, wzr, #0x10 +; CHECK-O0: malloc +; CHECK-O0: mov [[ID:x[0-9]+]], x0 +; CHECK-O0: strb w{{.*}}, [{{.*}}[[ID]], #8] +; spill x0 +; CHECK-O0: str x0 +; CHECK-O0: fcmp +; CHECK-O0: b.le +; reload from stack +; CHECK-O0: ldr x19 +; CHECK-O0: ret +entry: + br label %bb_loop + +bb_loop: + %cond = icmp ne i32 %cc, 0 + br i1 %cond, label %gen_error, label %bb_cont + +gen_error: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + br label %bb_cont + +bb_cont: + %cmp = fcmp ogt float %cc2, 1.000000e+00 + br i1 %cmp, label %bb_end, label %bb_loop +bb_end: + ret float 0.0 +} + +%struct.S = type { i32, i32, i32, i32, i32, i32 } + +define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) { +; CHECK-APPLE-LABEL: foo_sret: +; CHECK-APPLE: mov [[SRET:x[0-9]+]], x8 +; CHECK-APPLE: orr w0, wzr, #0x10 +; CHECK-APPLE: malloc +; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1 +; CHECK-APPLE: strb [[ID]], [x0, #8] +; CHECK-APPLE: str w{{.*}}, [{{.*}}[[SRET]], #4] +; CHECK-APPLE: mov x19, x0 +; CHECK-APPLE-NOT: x19 + +; CHECK-O0-LABEL: foo_sret: +; CHECK-O0: orr w{{.*}}, wzr, #0x10 +; spill x8 +; CHECK-O0-DAG: str x8 +; spill x19 +; CHECK-O0-DAG: str x19 +; CHECK-O0: malloc +; CHECK-O0: orr [[ID:w[0-9]+]], wzr, #0x1 +; CHECK-O0: strb [[ID]], [x0, #8] +; reload from stack +; CHECK-O0: ldr [[SRET:x[0-9]+]] +; CHECK-O0: str w{{.*}}, [{{.*}}[[SRET]], #4] +; CHECK-O0: mov x19 +; CHECK-O0-NOT: x19 +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + %v2 = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1 + store i32 %val1, i32* %v2 + ret void +} + +define float @caller3(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller3: +; CHECK-APPLE: mov [[ID:x[0-9]+]], x0 +; CHECK-APPLE: mov x19, xzr +; CHECK-APPLE: bl {{.*}}foo_sret +; CHECK-APPLE: cbnz x19 +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrb [[CODE:w[0-9]+]], [x19, #8] +; CHECK-APPLE: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov x0, x19 +; CHECK_APPLE: bl {{.*}}free + +; CHECK-O0-LABEL: caller3: +; spill x0 +; CHECK-O0: str x0 +; CHECK-O0: mov x19 +; CHECK-O0: bl {{.*}}foo_sret +; CHECK-O0: mov [[ID2:x[0-9]+]], x19 +; CHECK-O0: cbnz [[ID2]] +; Access part of the error object and save it to error_ref +; reload from stack +; CHECK-O0: ldrb [[CODE:w[0-9]+]] +; CHECK-O0: ldr [[ID:x[0-9]+]] +; CHECK-O0: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK_O0: bl {{.*}}free +entry: + %s = alloca %struct.S, align 8 + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + call void @foo_sret(%struct.S* sret %s, i32 1, %swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +declare void @llvm.va_start(i8*) nounwind +define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) { +; CHECK-APPLE-LABEL: foo_vararg: +; CHECK-APPLE: orr w0, wzr, #0x10 +; CHECK-APPLE: malloc +; CHECK-APPLE: orr [[ID:w[0-9]+]], wzr, #0x1 +; CHECK-FIXMEAPPLE: add [[ARGS:x[0-9]+]], [[TMP:x[0-9]+]], #16 +; CHECK-APPLE: strb [[ID]], [x0, #8] + +; First vararg +; CHECK-FIXMEAPPLE-DAG: orr {{x[0-9]+}}, [[ARGS]], #0x8 +; CHECK-FIXMEAPPLE-DAG: ldr {{w[0-9]+}}, [{{.*}}[[TMP]], #16] +; CHECK-APPLE: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; Second vararg +; CHECK-APPLE: ldr {{w[0-9]+}}, [{{x[0-9]+}}] +; CHECK-APPLE: add {{x[0-9]+}}, {{x[0-9]+}}, #8 +; Third vararg +; CHECK-APPLE: ldr {{w[0-9]+}}, [{{x[0-9]+}}] + +; CHECK-APPLE: mov x19, x0 +; CHECK-APPLE-NOT: x19 +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + + %args = alloca i8*, align 8 + %a10 = alloca i32, align 4 + %a11 = alloca i32, align 4 + %a12 = alloca i32, align 4 + %v10 = bitcast i8** %args to i8* + call void @llvm.va_start(i8* %v10) + %v11 = va_arg i8** %args, i32 + store i32 %v11, i32* %a10, align 4 + %v12 = va_arg i8** %args, i32 + store i32 %v12, i32* %a11, align 4 + %v13 = va_arg i8** %args, i32 + store i32 %v13, i32* %a12, align 4 + + ret float 1.0 +} + +define float @caller4(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller4: + +; CHECK-APPLE: mov [[ID:x[0-9]+]], x0 +; CHECK-APPLE: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] +; CHECK-APPLE: str {{x[0-9]+}}, [sp] + +; CHECK-APPLE: mov x19, xzr +; CHECK-APPLE: bl {{.*}}foo_vararg +; CHECK-APPLE: cbnz x19 +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrb [[CODE:w[0-9]+]], [x19, #8] +; CHECK-APPLE: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov x0, x19 +; CHECK_APPLE: bl {{.*}}free +entry: + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + + %a10 = alloca i32, align 4 + %a11 = alloca i32, align 4 + %a12 = alloca i32, align 4 + store i32 10, i32* %a10, align 4 + store i32 11, i32* %a11, align 4 + store i32 12, i32* %a12, align 4 + %v10 = load i32, i32* %a10, align 4 + %v11 = load i32, i32* %a11, align 4 + %v12 = load i32, i32* %a12, align 4 + + %call = call float (%swift_error**, ...) @foo_vararg(%swift_error** swifterror %error_ptr_ref, i32 %v10, i32 %v11, i32 %v12) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont + +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} Index: test/CodeGen/ARM/swifterror.ll =================================================================== --- /dev/null +++ test/CodeGen/ARM/swifterror.ll @@ -0,0 +1,368 @@ +; RUN: llc -verify-machineinstrs < %s -mtriple=armv7-apple-ios | FileCheck --check-prefix=CHECK-APPLE %s +; RUN: llc -verify-machineinstrs -O0 < %s -mtriple=armv7-apple-ios | FileCheck --check-prefix=CHECK-O0 %s + +declare i8* @malloc(i64) +declare void @free(i8*) +%swift_error = type {i64, i8} + +define float @foo(%swift_error** swifterror %error_ptr_ref) { +; CHECK-APPLE-LABEL: foo: +; CHECK-APPLE: mov r0, #16 +; CHECK-APPLE: malloc +; CHECK-APPLE-DAG: mov [[ID:r[0-9]+]], #1 +; CHECK-APPLE-DAG: mov r6, r{{.*}} +; CHECK-APPLE-DAG: strb [[ID]], [r{{.*}}, #8] + +; CHECK-O0-LABEL: foo: +; CHECK-O0: mov r{{.*}}, #16 +; CHECK-O0: malloc +; CHECK-O0: mov [[ID2:r[0-9]+]], r0 +; CHECK-O0: mov [[ID:r[0-9]+]], #1 +; CHECK-O0: strb [[ID]], [r0, #8] +; CHECK-O0: mov r6, [[ID2]] +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + ret float 1.0 +} + +define float @caller(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller: +; CHECK-APPLE-DAG: mov [[ID:r[0-9]+]], r0 +; CHECK-APPLE-DAG: mov r6, #0 +; CHECK-APPLE: bl {{.*}}foo +; CHECK-APPLE: cmp r6, #0 +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrbeq [[CODE:r[0-9]+]], [r6, #8] +; CHECK-APPLE: strbeq [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov r0, r6 +; CHECK_APPLE: bl {{.*}}free + +; CHECK-O0-LABEL: caller: +; spill r0 +; CHECK-O0-DAG: str r0, +; CHECK-O0-DAG: mov r6, #0 +; CHECK-O0: bl {{.*}}foo +; CHECK-O0: mov r{{.*}}, r6 +; CHECK-O0: bne +; CHECK-O0: ldrb [[CODE:r[0-9]+]], [r0, #8] +; reload r0 +; CHECK-O0: ldr [[ID:r[0-9]+]], +; CHECK-O0: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-O0: mov r0, +; CHECK-O0: free +entry: + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + %call = call float @foo(%swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +define float @caller2(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller2: +; CHECK-APPLE-DAG: mov [[ID:r[0-9]+]], r0 +; CHECK-APPLE-DAG: mov r6, #0 +; CHECK-APPLE: bl {{.*}}foo +; CHECK-APPLE: cmp r6, #0 +; CHECK-APPLE: bne +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrb [[CODE:r[0-9]+]], [r6, #8] +; CHECK-APPLE: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov r0, r6 +; CHECK_APPLE: bl {{.*}}free + +; CHECK-O0-LABEL: caller2: +; spill r0 +; CHECK-O0-DAG: str r0, +; CHECK-O0-DAG: mov r6, #0 +; CHECK-O0: bl {{.*}}foo +; CHECK-O0: mov r{{.*}}, r6 +; CHECK-O0: bne +; CHECK-O0: ble +; CHECK-O0: ldrb [[CODE:r[0-9]+]], [r0, #8] +; reload r0 +; CHECK-O0: ldr [[ID:r[0-9]+]], +; CHECK-O0: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-O0: mov r0, +; CHECK-O0: free +entry: + %error_ptr_ref = alloca swifterror %swift_error* + br label %bb_loop +bb_loop: + store %swift_error* null, %swift_error** %error_ptr_ref + %call = call float @foo(%swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %cmp = fcmp ogt float %call, 1.000000e+00 + br i1 %cmp, label %bb_end, label %bb_loop +bb_end: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) { +; CHECK-APPLE-LABEL: foo_if: +; CHECK-APPLE: cmp r0, #0 +; CHECK-APPLE: eq +; CHECK-APPLE: mov r0, #16 +; CHECK-APPLE: malloc +; CHECK-APPLE: mov [[ID:r[0-9]+]], #1 +; CHECK-APPLE-DAG: mov r6, r{{.*}} +; CHECK-APPLE-DAG: strb [[ID]], [r{{.*}}, #8] + +; CHECK-O0-LABEL: foo_if: +; CHECK-O0: cmp r0, #0 +; spill to stack +; CHECK-O0: str r6 +; CHECK-O0: beq +; CHECK-O0: mov r0, #16 +; CHECK-O0: malloc +; CHECK-O0: mov [[ID:r[0-9]+]], r0 +; CHECK-O0: mov [[ID2:[a-z0-9]+]], #1 +; CHECK-O0: strb [[ID2]], [r0, #8] +; CHECK-O0: mov r6, [[ID]] +; reload from stack +; CHECK-O0: ldr r6 +entry: + %cond = icmp ne i32 %cc, 0 + br i1 %cond, label %gen_error, label %normal + +gen_error: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + ret float 1.0 + +normal: + ret float 0.0 +} + +define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) { +; CHECK-APPLE-LABEL: foo_loop: +; CHECK-APPLE: mov [[CODE:r[0-9]+]], r0 +; swifterror is kept in a register +; CHECK-APPLE: mov [[ID:r[0-9]+]], r6 +; CHECK-APPLE: cmp [[CODE]], #0 +; CHECK-APPLE: beq +; CHECK-APPLE: mov r0, #16 +; CHECK-APPLE: malloc +; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8] +; CHECK-APPLE: ble +; CHECK-APPLE: mov r6, [[ID]] + +; CHECK-O0-LABEL: foo_loop: +; CHECK-O0: mov r{{.*}}, r6 +; CHECK-O0: cmp r{{.*}}, #0 +; CHECK-O0: beq +; CHECK-O0-DAG: movw r{{.*}}, #1 +; CHECK-O0-DAG: mov r{{.*}}, #16 +; CHECK-O0: malloc +; CHECK-O0-DAG: mov [[ID:r[0-9]+]], r0 +; CHECK-O0-DAG: ldr [[ID2:r[0-9]+]], [sp{{.*}}] +; CHECK-O0: strb [[ID2]], [{{.*}}[[ID]], #8] +; spill r0 +; CHECK-O0: str r0, [sp{{.*}}] +; CHECK-O0: vcmpe +; CHECK-O0: ble +; reload from stack +; CHECK-O0: ldr r6 +entry: + br label %bb_loop + +bb_loop: + %cond = icmp ne i32 %cc, 0 + br i1 %cond, label %gen_error, label %bb_cont + +gen_error: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + br label %bb_cont + +bb_cont: + %cmp = fcmp ogt float %cc2, 1.000000e+00 + br i1 %cmp, label %bb_end, label %bb_loop +bb_end: + ret float 0.0 +} + +%struct.S = type { i32, i32, i32, i32, i32, i32 } + +define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) { +; CHECK-APPLE-LABEL: foo_sret: +; CHECK-APPLE: mov [[SRET:r[0-9]+]], r0 +; CHECK-APPLE: mov r0, #16 +; CHECK-APPLE: malloc +; CHECK-APPLE: mov [[REG:r[0-9]+]], #1 +; CHECK-APPLE-DAG: mov r6, r0 +; CHECK-APPLE-DAG: strb [[REG]], [r0, #8] +; CHECK-APPLE-DAG: str r{{.*}}, [{{.*}}[[SRET]], #4] + +; CHECK-O0-LABEL: foo_sret: +; CHECK-O0: mov r{{.*}}, #16 +; spill to stack: sret and val1 +; CHECK-O0-DAG: str r0 +; CHECK-O0-DAG: str r1 +; CHECK-O0: malloc +; CHECK-O0: mov [[ID:r[0-9]+]], #1 +; CHECK-O0: strb [[ID]], [r0, #8] +; reload from stack: sret and val1 +; CHECK-O0: ldr +; CHECK-O0: ldr +; CHECK-O0: str r{{.*}}, [{{.*}}, #4] +; CHECK-O0: mov r6 +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + %v2 = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1 + store i32 %val1, i32* %v2 + ret void +} + +define float @caller3(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller3: +; CHECK-APPLE: mov [[ID:r[0-9]+]], r0 +; CHECK-APPLE: mov r6, #0 +; CHECK-APPLE: bl {{.*}}foo_sret +; CHECK-APPLE: cmp r6, #0 +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrbeq [[CODE:r[0-9]+]], [r6, #8] +; CHECK-APPLE: strbeq [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov r0, r6 +; CHECK_APPLE: bl {{.*}}free + +; CHECK-O0-LABEL: caller3: +; CHECK-O0-DAG: mov r6, #0 +; CHECK-O0-DAG: mov r0 +; CHECK-O0-DAG: mov r1 +; CHECK-O0: bl {{.*}}foo_sret +; CHECK-O0: mov [[ID2:r[0-9]+]], r6 +; CHECK-O0: cmp [[ID2]] +; CHECK-O0: bne +; Access part of the error object and save it to error_ref +; CHECK-O0: ldrb [[CODE:r[0-9]+]] +; CHECK-O0: ldr [[ID:r[0-9]+]] +; CHECK-O0: strb [[CODE]], [{{.*}}[[ID]]] +; CHECK-O0: mov r0, +; CHECK_O0: bl {{.*}}free +entry: + %s = alloca %struct.S, align 8 + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + call void @foo_sret(%struct.S* sret %s, i32 1, %swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +declare void @llvm.va_start(i8*) nounwind +define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) { +; CHECK-APPLE-LABEL: foo_vararg: +; CHECK-APPLE: mov r0, #16 +; CHECK-APPLE: malloc +; CHECK-APPLE: mov [[REG:r[0-9]+]], r0 +; CHECK-APPLE: mov [[ID:r[0-9]+]], #1 +; CHECK-APPLE-DAG: strb [[ID]], [{{.*}}[[REG]], #8] +; CHECK-APPLE-DAG: mov r6, [[REG]] + +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + + %args = alloca i8*, align 8 + %a10 = alloca i32, align 4 + %a11 = alloca i32, align 4 + %a12 = alloca i32, align 4 + %v10 = bitcast i8** %args to i8* + call void @llvm.va_start(i8* %v10) + %v11 = va_arg i8** %args, i32 + store i32 %v11, i32* %a10, align 4 + %v12 = va_arg i8** %args, i32 + store i32 %v12, i32* %a11, align 4 + %v13 = va_arg i8** %args, i32 + store i32 %v13, i32* %a12, align 4 + + ret float 1.0 +} + +define float @caller4(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller4: +; CHECK-APPLE: mov [[ID:r[0-9]+]], r0 +; CHECK-APPLE: mov r6, #0 +; CHECK-APPLE: bl {{.*}}foo_vararg +; CHECK-APPLE: cmp r6, #0 +; Access part of the error object and save it to error_ref +; CHECK-APPLE: ldrbeq [[CODE:r[0-9]+]], [r6, #8] +; CHECK-APPLE: strbeq [[CODE]], [{{.*}}[[ID]]] +; CHECK-APPLE: mov r0, r6 +; CHECK_APPLE: bl {{.*}}free +entry: + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + + %a10 = alloca i32, align 4 + %a11 = alloca i32, align 4 + %a12 = alloca i32, align 4 + store i32 10, i32* %a10, align 4 + store i32 11, i32* %a11, align 4 + store i32 12, i32* %a12, align 4 + %v10 = load i32, i32* %a10, align 4 + %v11 = load i32, i32* %a11, align 4 + %v12 = load i32, i32* %a12, align 4 + + %call = call float (%swift_error**, ...) @foo_vararg(%swift_error** swifterror %error_ptr_ref, i32 %v10, i32 %v11, i32 %v12) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont + +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} Index: test/CodeGen/X86/swifterror.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/swifterror.ll @@ -0,0 +1,276 @@ +; RUN: llc -verify-machineinstrs < %s -mtriple=x86_64-apple-darwin | FileCheck --check-prefix=CHECK-APPLE %s +; RUN: llc -verify-machineinstrs -O0 < %s -mtriple=x86_64-apple-darwin | FileCheck --check-prefix=CHECK-O0 %s + +declare i8* @malloc(i64) +declare void @free(i8*) +%swift_error = type {i64, i8} + +define float @foo(%swift_error** swifterror %error_ptr_ref) { +; CHECK-APPLE-LABEL: foo: +; CHECK-APPLE: movl $16, %edi +; CHECK-APPLE: malloc +; CHECK-APPLE: movb $1, 8(%rax) +; CHECK-APPLE: movq %rax, %r12 + +; CHECK-O0-LABEL: foo: +; CHECK-O0: movl $16 +; CHECK-O0: malloc +; CHECK-O0: movb $1, 8(%rax) +; CHECK-O0: movq %{{.*}}, %r12 +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + ret float 1.0 +} + +define float @caller(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller: +; CHECK-APPLE: xorl %r12d, %r12d +; CHECK-APPLE: callq {{.*}}foo +; CHECK-APPLE: testq %r12, %r12 +; CHECK-APPLE: jne +; Access part of the error object and save it to error_ref +; CHECK-APPLE: movb 8(%r12) +; CHECK-APPLE: movq %r12, %rdi +; CHECK_APPLE: callq {{.*}}free + +; CHECK-O0-LABEL: caller: +; CHECK-O0: xorl +; CHECK-O0: movl %{{.*}}, %r12d +; CHECK-O0: callq {{.*}}foo +; CHECK-O0: jne +entry: + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + %call = call float @foo(%swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +define float @caller2(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller2: +; CHECK-APPLE: xorl %r12d, %r12d +; CHECK-APPLE: callq {{.*}}foo +; CHECK-APPLE: testq %r12, %r12 +; CHECK-APPLE: jne +; CHECK-APPLE: ucomiss +; CHECK-APPLE: jbe +; Access part of the error object and save it to error_ref +; CHECK-APPLE: movb 8(%r12) +; CHECK-APPLE: movq %r12, %rdi +; CHECK_APPLE: callq {{.*}}free + +; CHECK-O0-LABEL: caller2: +; CHECK-O0: xorl +; CHECK-O0: movl %{{.*}}, %r12d +; CHECK-O0: callq {{.*}}foo +; CHECK-O0: movq %r12, [[ID:%[a-z]+]] +; CHECK-O0: cmpq $0, [[ID]] +; CHECK-O0: jne +entry: + %error_ptr_ref = alloca swifterror %swift_error* + br label %bb_loop +bb_loop: + store %swift_error* null, %swift_error** %error_ptr_ref + %call = call float @foo(%swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %cmp = fcmp ogt float %call, 1.000000e+00 + br i1 %cmp, label %bb_end, label %bb_loop +bb_end: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +} + +define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) { +; CHECK-APPLE-LABEL: foo_if: +; CHECK-APPLE: testl %edi, %edi +; CHECK-APPLE: je +; CHECK-APPLE: movl $16, %edi +; CHECK-APPLE: malloc +; CHECK-APPLE: movb $1, 8(%rax) +; CHECK-APPLE: movq %rax, %r12 +; CHECK-APPLE-NOT: %r12 +; CHECK-APPLE: ret + +; CHECK-O0-LABEL: foo_if: +; CHECK-O0: cmpl $0 +; spill to stack +; CHECK-O0: movq %r12, {{.*}}(%rsp) +; CHECK-O0: je +; CHECK-O0: movl $16, +; CHECK-O0: malloc +; CHECK-O0: movq %rax, [[ID:%[a-z]+]] +; CHECK-O0-DAG: movb $1, 8(%rax) +; CHECK-O0-DAG: movq [[ID]], %r12 +; CHECK-O0: ret +; reload from stack +; CHECK-O0: movq {{.*}}(%rsp), %r12 +; CHECK-O0: ret +entry: + %cond = icmp ne i32 %cc, 0 + br i1 %cond, label %gen_error, label %normal + +gen_error: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + ret float 1.0 + +normal: + ret float 0.0 +} + +define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) { +; CHECK-APPLE-LABEL: foo_loop: +; CHECK-APPLE: movq %r12, %rax +; CHECK-APPLE: testl +; CHECK-APPLE: je +; CHECK-APPLE: movl $16, %edi +; CHECK-APPLE: malloc +; CHECK-APPLE: movb $1, 8(%rax) +; CHECK-APPLE: ucomiss +; CHECK-APPLE: jbe +; CHECK-APPLE: movq %rax, %r12 +; CHECK-APPLE: ret + +; CHECK-O0-LABEL: foo_loop: +; spill to stack +; CHECK-O0: movq %r12, {{.*}}(%rsp) +; CHECK-O0: cmpl $0 +; CHECK-O0: je +; CHECK-O0: movl $16, +; CHECK-O0: malloc +; CHECK-O0: movq %rax, [[ID:%[a-z]+]] +; CHECK-O0: movb $1, 8([[ID]]) +; CHECK-O0: jbe +; reload from stack +; CHECK-O0: movq {{.*}}(%rsp), %r12 +; CHECK-O0: ret +entry: + br label %bb_loop + +bb_loop: + %cond = icmp ne i32 %cc, 0 + br i1 %cond, label %gen_error, label %bb_cont + +gen_error: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + br label %bb_cont + +bb_cont: + %cmp = fcmp ogt float %cc2, 1.000000e+00 + br i1 %cmp, label %bb_end, label %bb_loop +bb_end: + ret float 0.0 +} + +%struct.S = type { i32, i32, i32, i32, i32, i32 } + +define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) { +; CHECK-APPLE-LABEL: foo_sret: +; CHECK-APPLE: movq %rdi, %{{.*}} +; CHECK-APPLE: movl $16, %edi +; CHECK-APPLE: malloc +; CHECK-APPLE: movb $1, 8(%rax) +; CHECK-APPLE: movl %{{.*}}, 4(%{{.*}}) +; CHECK-APPLE: movq %rax, %r12 +; CHECK-APPLE: movq %{{.*}}, %rax +; CHECK-APPLE-NOT: x19 + +; CHECK-O0-LABEL: foo_sret: +; CHECK-O0: movl $16, +; spill sret to stack +; CHECK-O0: movq %rdi, +; CHECK-O0: movq {{.*}}, %rdi +; CHECK-O0: malloc +; CHECK-O0: movb $1, 8(%rax) +; CHECK-O0: movl %{{.*}}, 4(%{{.*}}) +; CHECK-O0: movq %{{.*}}, %r12 +; reload sret from stack +; CHECK-O0: movq {{.*}}(%rsp), %rax +; CHECK-O0: ret +entry: + %call = call i8* @malloc(i64 16) + %call.0 = bitcast i8* %call to %swift_error* + store %swift_error* %call.0, %swift_error** %error_ptr_ref + %0 = getelementptr inbounds i8, i8* %call, i64 8 + store i8 1, i8* %0 + %v2 = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1 + store i32 %val1, i32* %v2 + ret void +} + +define float @caller3(i8* %error_ref) { +; CHECK-APPLE-LABEL: caller3: +; CHECK-APPLE: movl $1, %esi +; CHECK-APPLE: xorl %r12d, %r12d +; CHECK-APPLE: callq {{.*}}foo_sret +; CHECK-APPLE: testq %r12, %r12 +; CHECK-APPLE: jne +; Access part of the error object and save it to error_ref +; CHECK-APPLE: movb 8(%r12), +; CHECK-APPLE: movb %{{.*}}, +; CHECK-APPLE: movq %r12, %rdi +; CHECK_APPLE: callq {{.*}}free + +; CHECK-O0-LABEL: caller3: +; CHECK-O0: xorl +; CHECK-O0: movl {{.*}}, %r12d +; CHECK-O0: movl $1, %esi +; CHECK-O0: movq {{.*}}, %rdi +; CHECK-O0: callq {{.*}}foo_sret +; CHECK-O0: movq %r12, +; CHECK-O0: cmpq $0 +; CHECK-O0: jne +; Access part of the error object and save it to error_ref +; CHECK-O0: movb 8(%{{.*}}), +; CHECK-O0: movb %{{.*}}, +; reload from stack +; CHECK-O0: movq {{.*}}(%rsp), %rdi +; CHECK-O0: callq {{.*}}free +entry: + %s = alloca %struct.S, align 8 + %error_ptr_ref = alloca swifterror %swift_error* + store %swift_error* null, %swift_error** %error_ptr_ref + call void @foo_sret(%struct.S* sret %s, i32 1, %swift_error** swifterror %error_ptr_ref) + %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref + %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null + %0 = bitcast %swift_error* %error_from_foo to i8* + br i1 %had_error_from_foo, label %handler, label %cont +cont: + %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1 + %t = load i8, i8* %v1 + store i8 %t, i8* %error_ref + br label %handler +handler: + call void @free(i8* %0) + ret float 1.0 +}