Index: llvm/lib/Target/X86/X86FrameLowering.h =================================================================== --- llvm/lib/Target/X86/X86FrameLowering.h +++ llvm/lib/Target/X86/X86FrameLowering.h @@ -213,14 +213,14 @@ void emitStackProbeInlineGenericBlock(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - const DebugLoc &DL, - uint64_t Offset) const; + const DebugLoc &DL, uint64_t Offset, + uint64_t Align) const; void emitStackProbeInlineGenericLoop(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - const DebugLoc &DL, - uint64_t Offset) const; + const DebugLoc &DL, uint64_t Offset, + uint64_t Align) const; /// Emit a stub to later inline the target stack probe. MachineInstr *emitStackProbeInlineStub(MachineFunction &MF, Index: llvm/lib/Target/X86/X86FrameLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86FrameLowering.cpp +++ llvm/lib/Target/X86/X86FrameLowering.cpp @@ -586,27 +586,48 @@ const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); uint64_t ProbeChunk = StackProbeSize * 8; + uint64_t MaxAlign = + TRI->needsStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0; + // Synthesize a loop or unroll it, depending on the number of iterations. - if (Offset > ProbeChunk) { - emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset); + if (Offset > ProbeChunk || MaxAlign > StackProbeSize) { + emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset, MaxAlign); } else { - emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset); + emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset, MaxAlign); } } void X86FrameLowering::emitStackProbeInlineGenericBlock( MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, const DebugLoc &DL, - uint64_t Offset) const { + MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, + uint64_t Align) const { const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset); const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); + uint64_t CurrentOffset = 0; - // 0 Thanks to return address being saved on the stack - uint64_t CurrentProbeOffset = 0; + + if (CurrentOffset + StackProbeSize < Offset) { + assert(Align < StackProbeSize && + "Should be an emitStackProbeInlineGenericLoop"); + + MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr) + .addReg(StackPtr) + .addImm(StackProbeSize - Align) + .setMIFlag(MachineInstr::FrameSetup); + MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. + + addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) + .setMIFlag(MachineInstr::FrameSetup), + StackPtr, false, 0) + .addImm(0) + .setMIFlag(MachineInstr::FrameSetup); + NumFrameExtraProbe++; + CurrentOffset = StackProbeSize - Align; + } // For the first N - 1 pages, just probe. I tried to take advantage of // natural probes but it implies much more logic and there was very few @@ -626,7 +647,6 @@ .setMIFlag(MachineInstr::FrameSetup); NumFrameExtraProbe++; CurrentOffset += StackProbeSize; - CurrentProbeOffset += StackProbeSize; } uint64_t ChunkSize = Offset - CurrentOffset; @@ -639,8 +659,8 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop( MachineFunction &MF, MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, const DebugLoc &DL, - uint64_t Offset) const { + MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset, + uint64_t Align) const { assert(Offset && "null offset"); const X86Subtarget &STI = MF.getSubtarget(); @@ -648,26 +668,158 @@ const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi; const uint64_t StackProbeSize = TLI.getStackProbeSize(MF); + MachineBasicBlock *CurrMBB = &MBB; + + if (Align) { + if (Align < StackProbeSize) { + // Perform a first smaller allocation followed by a probe. + const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Align); + MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr) + .addReg(StackPtr) + .addImm(Align) + .setMIFlag(MachineInstr::FrameSetup); + MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. + + addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc)) + .setMIFlag(MachineInstr::FrameSetup), + StackPtr, false, 0) + .addImm(0) + .setMIFlag(MachineInstr::FrameSetup); + NumFrameExtraProbe++; + Offset -= Align; + } else { + NumFrameLoopProbe++; + const BasicBlock *LLVM_BB = MBB.getBasicBlock(); + + MachineBasicBlock *headMBB = MF.CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *bodyMBB = MF.CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *footMBB = MF.CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB); + + MachineFunction::iterator MBBIter = ++MBB.getIterator(); + MF.insert(MBBIter, headMBB); + MF.insert(MBBIter, bodyMBB); + MF.insert(MBBIter, footMBB); + MF.insert(MBBIter, tailMBB); + + Register StackIterator = Uses64BitFramePtr ? X86::R11 : X86::R11D; + + // Setup loop header + { + BuildMI(headMBB, DL, TII.get(TargetOpcode::COPY), StackIterator) + .addReg(Uses64BitFramePtr ? X86::RBP : X86::EBP) + .setMIFlag(MachineInstr::FrameSetup); + + const unsigned SUBOpc = + getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); + BuildMI(headMBB, DL, TII.get(SUBOpc), StackIterator) + .addReg(StackIterator) + .addImm(StackProbeSize) + .setMIFlag(MachineInstr::FrameSetup); + + BuildMI(headMBB, DL, + TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) + .addReg(StackPtr) + .addReg(StackIterator) + .setMIFlag(MachineInstr::FrameSetup); + + // jump + BuildMI(headMBB, DL, TII.get(X86::JCC_1)) + .addMBB(footMBB) + .addImm(X86::COND_B) + .setMIFlag(MachineInstr::FrameSetup); + + headMBB->addSuccessor(bodyMBB); + headMBB->addSuccessor(footMBB); + } + + // setup loop body + { + addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc)) + .setMIFlag(MachineInstr::FrameSetup), + StackIterator, false, 0) + .addImm(0) + .setMIFlag(MachineInstr::FrameSetup); + + const unsigned SUBOpc = + getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); + BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackIterator) + .addReg(StackIterator) + .addImm(StackProbeSize) + .setMIFlag(MachineInstr::FrameSetup); + + // cmp with stack pointer bound + BuildMI(bodyMBB, DL, + TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) + .addReg(StackPtr) + .addReg(StackIterator) + .setMIFlag(MachineInstr::FrameSetup); + + // jump + BuildMI(bodyMBB, DL, TII.get(X86::JCC_1)) + .addMBB(bodyMBB) + .addImm(X86::COND_B) + .setMIFlag(MachineInstr::FrameSetup); + bodyMBB->addSuccessor(bodyMBB); + bodyMBB->addSuccessor(footMBB); + } + + // setup loop footer + { + addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc)) + .setMIFlag(MachineInstr::FrameSetup), + StackPtr, false, 0) + .addImm(0) + .setMIFlag(MachineInstr::FrameSetup); + footMBB->addSuccessor(tailMBB); + } + + // BB management + tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end()); + tailMBB->transferSuccessorsAndUpdatePHIs(&MBB); + + BuildMI(&MBB, DL, + TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr)) + .addReg(Uses64BitFramePtr ? X86::RBP : X86::EBP) + .addReg(StackPtr) + .setMIFlag(MachineInstr::FrameSetup); + BuildMI(&MBB, DL, TII.get(X86::JCC_1)) + .addMBB(tailMBB) + .addImm(X86::COND_E) + .setMIFlag(MachineInstr::FrameSetup); + MBB.addSuccessor(tailMBB); + MBB.addSuccessor(headMBB); + + CurrMBB = tailMBB; + recomputeLiveIns(*headMBB); + recomputeLiveIns(*bodyMBB); + recomputeLiveIns(*footMBB); + recomputeLiveIns(*tailMBB); + } + } + +#if 1 + // Synthesize a loop NumFrameLoopProbe++; - const BasicBlock *LLVM_BB = MBB.getBasicBlock(); + const BasicBlock *LLVM_BB = CurrMBB->getBasicBlock(); MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB); - MachineFunction::iterator MBBIter = ++MBB.getIterator(); + MachineFunction::iterator MBBIter = ++CurrMBB->getIterator(); MF.insert(MBBIter, testMBB); MF.insert(MBBIter, tailMBB); Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D; - BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) + BuildMI(*CurrMBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed) .addReg(StackPtr) .setMIFlag(MachineInstr::FrameSetup); // save loop bound { - const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset); - BuildMI(MBB, MBBI, DL, TII.get(Opc), FinalStackProbed) + const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset); + BuildMI(*CurrMBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed) .addReg(FinalStackProbed) .addImm(Offset / StackProbeSize * StackProbeSize) .setMIFlag(MachineInstr::FrameSetup); @@ -675,8 +827,8 @@ // allocate a page { - const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); - BuildMI(testMBB, DL, TII.get(Opc), StackPtr) + const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize); + BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr) .addReg(StackPtr) .addImm(StackProbeSize) .setMIFlag(MachineInstr::FrameSetup); @@ -704,9 +856,9 @@ testMBB->addSuccessor(tailMBB); // BB management - tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end()); - tailMBB->transferSuccessorsAndUpdatePHIs(&MBB); - MBB.addSuccessor(testMBB); + tailMBB->splice(tailMBB->end(), CurrMBB, MBBI, CurrMBB->end()); + tailMBB->transferSuccessorsAndUpdatePHIs(CurrMBB); + CurrMBB->addSuccessor(testMBB); // handle tail unsigned TailOffset = Offset % StackProbeSize; @@ -721,6 +873,7 @@ // Update Live In information recomputeLiveIns(*testMBB); recomputeLiveIns(*tailMBB); +#endif } void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64( Index: llvm/test/CodeGen/X86/stack-clash-no-free-probe.ll =================================================================== --- llvm/test/CodeGen/X86/stack-clash-no-free-probe.ll +++ /dev/null @@ -1,27 +0,0 @@ -; RUN: llc < %s | FileCheck %s - -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -define i32 @foo(i64 %i) local_unnamed_addr #0 { -; CHECK-LABEL: foo: -; CHECK: # %bb.0: -; CHECK-NEXT: subq $4096, %rsp # imm = 0x1000 -; CHECK-NEXT: movq $0, (%rsp) -; CHECK-NEXT: subq $3784, %rsp # imm = 0xEC8 -; CHECK-NEXT: .cfi_def_cfa_offset 7888 -; CHECK-NEXT: movl $1, -128(%rsp,%rdi,4) -; CHECK-NEXT: movl -128(%rsp), %eax -; CHECK-NEXT: addq $7880, %rsp # imm = 0x1EC8 -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: retq - - %a = alloca i32, i32 2000, align 16 - %b = getelementptr inbounds i32, i32* %a, i64 %i - store volatile i32 1, i32* %b - %c = load volatile i32, i32* %a - ret i32 %c -} - -attributes #0 = {"probe-stack"="inline-asm"} - Index: llvm/test/CodeGen/X86/stack-clash-small-alloc-medium-align.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/stack-clash-small-alloc-medium-align.ll @@ -0,0 +1,34 @@ +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define i32 @foo(i64 %i) local_unnamed_addr #0 { +; CHECK-LABEL: foo: +; CHECK: # %bb.0: +; CHECK-NEXT: pushq %rbp +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbp, -16 +; CHECK-NEXT: movq %rsp, %rbp +; CHECK-NEXT: .cfi_def_cfa_register %rbp +; CHECK-NEXT: andq $-64, %rsp +; CHECK-NEXT: subq $4032, %rsp # imm = 0xFC0 +; CHECK-NEXT: movq $0, (%rsp) +; CHECK-NEXT: subq $4032, %rsp # imm = 0xFC0 +; CHECK-NEXT: movl $1, (%rsp,%rdi,4) +; CHECK-NEXT: movl (%rsp), %eax +; CHECK-NEXT: movq %rbp, %rsp +; CHECK-NEXT: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 +; CHECK-NEXT: retq + + + %a = alloca i32, i32 2000, align 64 + %b = getelementptr inbounds i32, i32* %a, i64 %i + store volatile i32 1, i32* %b + %c = load volatile i32, i32* %a + ret i32 %c +} + +attributes #0 = {"probe-stack"="inline-asm"} + Index: llvm/test/CodeGen/X86/stack-clash-small-large-align.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/stack-clash-small-large-align.ll @@ -0,0 +1,81 @@ +; RUN: llc < %s | FileCheck %s + + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define i32 @foo_noprotect() local_unnamed_addr { +; CHECK-LABEL: foo_noprotect: +; CHECK: # %bb.0: +; CHECK-NEXT: pushq %rbp +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset %rbp, -16 +; CHECK-NEXT: movq %rsp, %rbp +; CHECK-NEXT: .cfi_def_cfa_register %rbp +; CHECK-NEXT: andq $-65536, %rsp +; CHECK-NEXT: subq $65536, %rsp +; CHECK-NEXT: movl $1, 392(%rsp) +; CHECK-NEXT: movl (%rsp), %eax +; CHECK-NEXT: movq %rbp, %rsp +; CHECK-NEXT: popq %rbp +; CHECK-NEXT: .cfi_def_cfa %rsp, 8 +; CHECK-NEXT: retq + + + + %a = alloca i32, i64 100, align 65536 + %b = getelementptr inbounds i32, i32* %a, i64 98 + store volatile i32 1, i32* %b + %c = load volatile i32, i32* %a + ret i32 %c +} + +define i32 @foo_protect() local_unnamed_addr #0 { +; CHECK-LABEL: foo_protect: +; CHECK: # %bb.0: +; CHECK-NEXT: pushq %rbp +; CHECk-NEXT: .cfi_def_cfa_offset 16 +; CHECk-NEXT: .cfi_offset %rbp, -16 +; CHECk-NEXT: movq %rsp, %rbp +; CHECk-NEXT: .cfi_def_cfa_register %rbp +; CHECk-NEXT: andq $-65536, %rsp # imm = 0xFFFF0000 +; CHECk-NEXT: cmpq %rsp, %rbp +; CHECk-NEXT: je .LBB1_4 +; CHECk-NEXT:# %bb.1: +; CHECk-NEXT: movq %rbp, %r11 +; CHECk-NEXT: subq $4096, %r11 # imm = 0x1000 +; CHECk-NEXT: cmpq %r11, %rsp +; CHECk-NEXT: jb .LBB1_3 +; CHECk-NEXT:.LBB1_2: # =>This Inner Loop Header: Depth=1 +; CHECk-NEXT: movq $0, (%r11) +; CHECk-NEXT: subq $4096, %r11 # imm = 0x1000 +; CHECk-NEXT: cmpq %r11, %rsp +; CHECk-NEXT: jb .LBB1_2 +; CHECk-NEXT:.LBB1_3: +; CHECk-NEXT: movq $0, (%rsp) +; CHECk-NEXT:.LBB1_4: +; CHECk-NEXT: movq %rsp, %r11 +; CHECk-NEXT: subq $65536, %r11 # imm = 0x10000 +; CHECk-NEXT:.LBB1_5: # =>This Inner Loop Header: Depth=1 +; CHECk-NEXT: subq $4096, %rsp # imm = 0x1000 +; CHECk-NEXT: movq $0, (%rsp) +; CHECk-NEXT: cmpq %r11, %rsp +; CHECk-NEXT: jne .LBB1_5 +; CHECk-NEXT:# %bb.6: +; CHECk-NEXT: movl $1, 392(%rsp) +; CHECk-NEXT: movl (%rsp), %eax +; CHECk-NEXT: movq %rbp, %rsp +; CHECk-NEXT: popq %rbp +; CHECk-NEXT: .cfi_def_cfa %rsp, 8 +; CHECk-NEXT: retq + + + + %a = alloca i32, i64 100, align 65536 + %b = getelementptr inbounds i32, i32* %a, i64 98 + store volatile i32 1, i32* %b + %c = load volatile i32, i32* %a + ret i32 %c +} + +attributes #0 = {"probe-stack"="inline-asm"}