Index: lib/Target/X86/X86CallFrameOptimization.cpp =================================================================== --- lib/Target/X86/X86CallFrameOptimization.cpp +++ lib/Target/X86/X86CallFrameOptimization.cpp @@ -217,6 +217,9 @@ } bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + STI = &MF.getSubtarget(); TII = STI->getInstrInfo(); TFL = STI->getFrameLowering(); Index: lib/Target/X86/X86FixupBWInsts.cpp =================================================================== --- lib/Target/X86/X86FixupBWInsts.cpp +++ lib/Target/X86/X86FixupBWInsts.cpp @@ -135,7 +135,7 @@ FunctionPass *llvm::createX86FixupBWInsts() { return new FixupBWInstPass(); } bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) { - if (!FixupBWInsts) + if (skipFunction(*MF.getFunction()) || !FixupBWInsts) return false; this->MF = &MF; Index: lib/Target/X86/X86FixupLEAs.cpp =================================================================== --- lib/Target/X86/X86FixupLEAs.cpp +++ lib/Target/X86/X86FixupLEAs.cpp @@ -162,6 +162,9 @@ FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); } bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) { + if (skipFunction(*Func.getFunction())) + return false; + MF = &Func; const X86Subtarget &ST = Func.getSubtarget(); OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize(); Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -7377,7 +7377,10 @@ LDTLSCleanup() : MachineFunctionPass(ID) {} bool runOnMachineFunction(MachineFunction &MF) override { - X86MachineFunctionInfo* MFI = MF.getInfo(); + if (skipFunction(*MF.getFunction())) + return false; + + X86MachineFunctionInfo *MFI = MF.getInfo(); if (MFI->getNumLocalDynamicTLSAccesses() < 2) { // No point folding accesses if there isn't at least two. return false; Index: lib/Target/X86/X86OptimizeLEAs.cpp =================================================================== --- lib/Target/X86/X86OptimizeLEAs.cpp +++ lib/Target/X86/X86OptimizeLEAs.cpp @@ -612,7 +612,8 @@ bool Changed = false; // Perform this optimization only if we care about code size. - if (DisableX86LEAOpt || !MF.getFunction()->optForSize()) + if (skipFunction(*MF.getFunction()) || DisableX86LEAOpt || + !MF.getFunction()->optForSize()) return false; MRI = &MF.getRegInfo(); Index: lib/Target/X86/X86PadShortFunction.cpp =================================================================== --- lib/Target/X86/X86PadShortFunction.cpp +++ lib/Target/X86/X86PadShortFunction.cpp @@ -98,6 +98,9 @@ /// runOnMachineFunction - Loop over all of the basic blocks, inserting /// NOOP instructions before early exits. bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + if (MF.getFunction()->optForSize()) { return false; } Index: lib/Target/X86/X86VZeroUpper.cpp =================================================================== --- lib/Target/X86/X86VZeroUpper.cpp +++ lib/Target/X86/X86VZeroUpper.cpp @@ -255,6 +255,9 @@ /// runOnMachineFunction - Loop over all of the basic blocks, inserting /// vzeroupper instructions before function calls. bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + const X86Subtarget &ST = MF.getSubtarget(); if (!ST.hasAVX() || ST.hasAVX512() || ST.hasFastPartialYMMWrite()) return false;