Index: llvm/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/lib/Target/X86/X86FastISel.cpp +++ llvm/lib/Target/X86/X86FastISel.cpp @@ -153,6 +153,8 @@ bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false); + bool isPointerSwiftError(const Value *PtrV); + bool IsMemcpySmall(uint64_t Len); bool TryEmitSmallMemcpy(X86AddressMode DestAM, @@ -1115,6 +1117,22 @@ return false; } +// Swifterror values can come from either a function parameter with +// swifterror attribute or an alloca with swifterror attribute. +bool X86FastISel::isPointerSwiftError(const Value *PtrV) { + if (!TLI.supportSwiftError()) + return false; + + if (const Argument *Arg = dyn_cast(PtrV)) + if (Arg->hasSwiftErrorAttr()) + return true;; + + if (const AllocaInst *Alloca = dyn_cast(PtrV)) + if (Alloca->isSwiftError()) + return true; + + return false; +} /// X86SelectStore - Select and emit code to implement store instructions. bool X86FastISel::X86SelectStore(const Instruction *I) { @@ -1124,20 +1142,8 @@ if (S->isAtomic()) return false; - const Value *PtrV = I->getOperand(1); - if (TLI.supportSwiftError()) { - // Swifterror values can come from either a function parameter with - // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(PtrV)) { - if (Arg->hasSwiftErrorAttr()) - return false; - } - - if (const AllocaInst *Alloca = dyn_cast(PtrV)) { - if (Alloca->isSwiftError()) - return false; - } - } + if (isPointerSwiftError(I->getOperand(1))) + return false; const Value *Val = S->getValueOperand(); const Value *Ptr = S->getPointerOperand(); @@ -1316,20 +1322,8 @@ if (LI->isAtomic()) return false; - const Value *SV = I->getOperand(0); - if (TLI.supportSwiftError()) { - // Swifterror values can come from either a function parameter with - // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(SV)) { - if (Arg->hasSwiftErrorAttr()) - return false; - } - - if (const AllocaInst *Alloca = dyn_cast(SV)) { - if (Alloca->isSwiftError()) - return false; - } - } + if (isPointerSwiftError(I->getOperand(0))) + return false; MVT VT; if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true)) @@ -3930,6 +3924,10 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, const LoadInst *LI) { const Value *Ptr = LI->getPointerOperand(); + + if (isPointerSwiftError(Ptr)) + return false; + X86AddressMode AM; if (!X86SelectAddress(Ptr, AM)) return false; Index: llvm/test/CodeGen/X86/swifterror.ll =================================================================== --- llvm/test/CodeGen/X86/swifterror.ll +++ llvm/test/CodeGen/X86/swifterror.ll @@ -2099,3 +2099,81 @@ %error = load ptr, ptr %err ret ptr %error } + +define i32 @nofold_swifterror() { +; CHECK-LABEL: nofold_swifterror: +; CHECK: cmpq $0, %r12 +; CHECK-APPLE-LABEL: nofold_swifterror: +; CHECK-APPLE: ## %bb.0: +; CHECK-APPLE-NEXT: pushq %r12 +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 16 +; CHECK-APPLE-NEXT: subq $16, %rsp +; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32 +; CHECK-APPLE-NEXT: .cfi_offset %r12, -16 +; CHECK-APPLE-NEXT: callq _foo +; CHECK-APPLE-NEXT: testq %r12, %r12 +; CHECK-APPLE-NEXT: leaq 16(%rsp), %rsp +; CHECK-APPLE-NEXT: popq %r12 +; CHECK-APPLE-NEXT: jne LBB26_2 +; CHECK-APPLE-NEXT: ## %bb.1: ## %good +; CHECK-APPLE-NEXT: xorl %eax, %eax +; CHECK-APPLE-NEXT: retq +; CHECK-APPLE-NEXT: LBB26_2: ## %bad +; CHECK-APPLE-NEXT: movl $42, %eax +; CHECK-APPLE-NEXT: retq +; +; CHECK-O0-LABEL: nofold_swifterror: +; CHECK-O0: ## %bb.0: +; CHECK-O0-NEXT: pushq %r12 +; CHECK-O0-NEXT: .cfi_def_cfa_offset 16 +; CHECK-O0-NEXT: subq $16, %rsp +; CHECK-O0-NEXT: .cfi_def_cfa_offset 32 +; CHECK-O0-NEXT: .cfi_offset %r12, -16 +; CHECK-O0-NEXT: ## implicit-def: $rax +; CHECK-O0-NEXT: ## %bb.1: ## %next +; CHECK-O0-NEXT: movq (%rsp), %r12 ## 8-byte Reload +; CHECK-O0-NEXT: callq _foo +; CHECK-O0-NEXT: cmpq $0, %r12 +; CHECK-O0-NEXT: jne LBB26_3 +; CHECK-O0-NEXT: ## %bb.2: ## %good +; CHECK-O0-NEXT: xorl %eax, %eax +; CHECK-O0-NEXT: addq $16, %rsp +; CHECK-O0-NEXT: popq %r12 +; CHECK-O0-NEXT: retq +; CHECK-O0-NEXT: LBB26_3: ## %bad +; CHECK-O0-NEXT: movl $42, %eax +; CHECK-O0-NEXT: addq $16, %rsp +; CHECK-O0-NEXT: popq %r12 +; CHECK-O0-NEXT: retq +; +; CHECK-i386-LABEL: nofold_swifterror: +; CHECK-i386: ## %bb.0: +; CHECK-i386-NEXT: subl $12, %esp +; CHECK-i386-NEXT: .cfi_def_cfa_offset 16 +; CHECK-i386-NEXT: leal 8(%esp), %eax +; CHECK-i386-NEXT: movl %eax, (%esp) +; CHECK-i386-NEXT: calll _foo +; CHECK-i386-NEXT: cmpl $0, 8(%esp) +; CHECK-i386-NEXT: leal 12(%esp), %esp +; CHECK-i386-NEXT: jne LBB26_2 +; CHECK-i386-NEXT: ## %bb.1: ## %good +; CHECK-i386-NEXT: xorl %eax, %eax +; CHECK-i386-NEXT: retl +; CHECK-i386-NEXT: LBB26_2: ## %bad +; CHECK-i386-NEXT: movl $42, %eax +; CHECK-i386-NEXT: retl + %se = alloca swifterror ptr, align 8 + br label %next + +next: + call void @foo(ptr swifterror %se) + %err = load ptr, ptr %se, align 8 + %tst = icmp eq ptr %err, null + br i1 %tst, label %good, label %bad + +good: + ret i32 0 + +bad: + ret i32 42 +}