diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -100,7 +100,7 @@ MF.getInfo()->hasPreallocatedCall() || MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() || MFI.hasStackMap() || MFI.hasPatchPoint() || - MFI.hasCopyImplyingStackAdjustment()); + (isWin64Prologue(MF) && MFI.hasCopyImplyingStackAdjustment())); } static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) { @@ -1385,6 +1385,9 @@ return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone); } +/// Return true if we need to use the restricted Windows x64 prologue and +/// epilogue code patterns that can be described with WinCFI (.seh_* +/// directives). bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const { return MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); } diff --git a/llvm/test/CodeGen/X86/x86-flags-intrinsics.ll b/llvm/test/CodeGen/X86/x86-flags-intrinsics.ll --- a/llvm/test/CodeGen/X86/x86-flags-intrinsics.ll +++ b/llvm/test/CodeGen/X86/x86-flags-intrinsics.ll @@ -11,11 +11,9 @@ } ; CHECK-LABEL: _read_flags: -; CHECK: pushl %ebp -; CHECK-NEXT: movl %esp, %ebp -; CHECK-NEXT: pushfl +; CHECK: pushfl ; CHECK-NEXT: popl %eax -; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl define x86_fastcallcc void @write_flags(i32 inreg %arg) { entry: @@ -24,8 +22,6 @@ } ; CHECK-LABEL: @write_flags@4: -; CHECK: pushl %ebp -; CHECK-NEXT: movl %esp, %ebp -; CHECK-NEXT: pushl %ecx +; CHECK: pushl %ecx ; CHECK-NEXT: popfl -; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl