diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -647,6 +647,8 @@ uint64_t AlignOffset) const { assert(Offset && "null offset"); + const bool NeedsDwarfCFI = needsDwarfCFI(MF); + const bool HasFP = hasFP(MF); const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; @@ -686,17 +688,36 @@ Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : Is64Bit ? X86::R11D : X86::EAX; + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); // save loop bound { - const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset); + const unsigned BoundOffset = Offset / StackProbeSize * StackProbeSize; + const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, BoundOffset); BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed) .addReg(FinalStackProbed) - .addImm(Offset / StackProbeSize * StackProbeSize) + .addImm(BoundOffset) .setMIFlag(MachineInstr::FrameSetup); + + // while in the loop, use loop-invariant reg for CFI, + // instead of the stack pointer, which changes during the loop + if (!HasFP && NeedsDwarfCFI) { + // x32 uses the same DWARF register numbers as x86-64, + // so there isn't a register number for r11d, we must use r11 instead + const Register DwarfFinalStackProbed = + STI.isTarget64BitILP32() + ? Register(getX86SubSuperRegister(FinalStackProbed, 64)) + : FinalStackProbed; + + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::createDefCfaRegister( + nullptr, TRI->getDwarfRegNum(DwarfFinalStackProbed, true))); + BuildCFI(MBB, MBBI, DL, + MCCFIInstruction::createAdjustCfaOffset(nullptr, BoundOffset)); + } } // allocate a page @@ -735,15 +756,30 @@ MBB.addSuccessor(testMBB); // handle tail - unsigned TailOffset = Offset % StackProbeSize; + const unsigned TailOffset = Offset % StackProbeSize; + MachineBasicBlock::iterator TailMBBIter = tailMBB->begin(); if (TailOffset) { const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset); - BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr) + BuildMI(*tailMBB, TailMBBIter, DL, TII.get(Opc), StackPtr) .addReg(StackPtr) .addImm(TailOffset) .setMIFlag(MachineInstr::FrameSetup); } + // after the loop, switch back to stack pointer for CFI + if (!HasFP && NeedsDwarfCFI) { + // x32 uses the same DWARF register numbers as x86-64, + // so there isn't a register number for esp, we must use rsp instead + const Register DwarfStackPtr = + STI.isTarget64BitILP32() + ? Register(getX86SubSuperRegister(StackPtr, 64)) + : Register(StackPtr); + + BuildCFI(*tailMBB, TailMBBIter, DL, + MCCFIInstruction::createDefCfaRegister( + nullptr, TRI->getDwarfRegNum(DwarfStackPtr, true))); + } + // Update Live In information recomputeLiveIns(*testMBB); recomputeLiveIns(*tailMBB); diff --git a/llvm/test/CodeGen/X86/stack-clash-large.ll b/llvm/test/CodeGen/X86/stack-clash-large.ll --- a/llvm/test/CodeGen/X86/stack-clash-large.ll +++ b/llvm/test/CodeGen/X86/stack-clash-large.ll @@ -7,6 +7,8 @@ ; CHECK-X64: # %bb.0: ; CHECK-X64-NEXT: movq %rsp, %r11 ; CHECK-X64-NEXT: subq $69632, %r11 # imm = 0x11000 +; CHECK-X64-NEXT: .cfi_def_cfa_register %r11 +; CHECK-X64-NEXT: .cfi_adjust_cfa_offset 69632 ; CHECK-X64-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 ; CHECK-X64-NEXT: subq $4096, %rsp # imm = 0x1000 ; CHECK-X64-NEXT: movq $0, (%rsp) @@ -14,6 +16,7 @@ ; CHECK-X64-NEXT: jne .LBB0_1 ; CHECK-X64-NEXT: # %bb.2: ; CHECK-X64-NEXT: subq $2248, %rsp # imm = 0x8C8 +; CHECK-X64-NEXT: .cfi_def_cfa_register %rsp ; CHECK-X64-NEXT: .cfi_def_cfa_offset 71888 ; CHECK-X64-NEXT: movl $1, 264(%rsp) ; CHECK-X64-NEXT: movl $1, 28664(%rsp) @@ -26,6 +29,8 @@ ; CHECK-X86: # %bb.0: ; CHECK-X86-NEXT: movl %esp, %eax ; CHECK-X86-NEXT: subl $69632, %eax # imm = 0x11000 +; CHECK-X86-NEXT: .cfi_def_cfa_register %eax +; CHECK-X86-NEXT: .cfi_adjust_cfa_offset 69632 ; CHECK-X86-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 ; CHECK-X86-NEXT: subl $4096, %esp # imm = 0x1000 ; CHECK-X86-NEXT: movl $0, (%esp) @@ -33,6 +38,7 @@ ; CHECK-X86-NEXT: jne .LBB0_1 ; CHECK-X86-NEXT: # %bb.2: ; CHECK-X86-NEXT: subl $2380, %esp # imm = 0x94C +; CHECK-X86-NEXT: .cfi_def_cfa_register %esp ; CHECK-X86-NEXT: .cfi_def_cfa_offset 72016 ; CHECK-X86-NEXT: movl $1, 392(%esp) ; CHECK-X86-NEXT: movl $1, 28792(%esp) @@ -45,6 +51,8 @@ ; CHECK-X32: # %bb.0: ; CHECK-X32-NEXT: movl %esp, %r11d ; CHECK-X32-NEXT: subl $69632, %r11d # imm = 0x11000 +; CHECK-X32-NEXT: .cfi_def_cfa_register %r11 +; CHECK-X32-NEXT: .cfi_adjust_cfa_offset 69632 ; CHECK-X32-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 ; CHECK-X32-NEXT: subl $4096, %esp # imm = 0x1000 ; CHECK-X32-NEXT: movq $0, (%esp) @@ -52,6 +60,7 @@ ; CHECK-X32-NEXT: jne .LBB0_1 ; CHECK-X32-NEXT: # %bb.2: ; CHECK-X32-NEXT: subl $2248, %esp # imm = 0x8C8 +; CHECK-X32-NEXT: .cfi_def_cfa_register %rsp ; CHECK-X32-NEXT: .cfi_def_cfa_offset 71888 ; CHECK-X32-NEXT: movl $1, 264(%esp) ; CHECK-X32-NEXT: movl $1, 28664(%esp)