Index: lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp +++ lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp @@ -88,6 +88,9 @@ } bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) { + if (skipFunction(F)) + return false; + DA = &getAnalysis(); visit(F); Index: lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -97,7 +97,7 @@ } bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { - if (!TM || F.hasFnAttribute(Attribute::OptimizeNone)) + if (!TM || skipFunction(F)) return false; FunctionType *FTy = F.getFunctionType(); Index: lib/Target/AMDGPU/R600ClauseMergePass.cpp =================================================================== --- lib/Target/AMDGPU/R600ClauseMergePass.cpp +++ lib/Target/AMDGPU/R600ClauseMergePass.cpp @@ -168,6 +168,9 @@ } bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + TII = static_cast(MF.getSubtarget().getInstrInfo()); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { Index: lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp =================================================================== --- lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp +++ lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp @@ -314,6 +314,9 @@ } bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) { + if (skipFunction(*Fn.getFunction())) + return false; + TII = static_cast(Fn.getSubtarget().getInstrInfo()); MRI = &(Fn.getRegInfo()); for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -295,6 +295,9 @@ } bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + MachineRegisterInfo &MRI = MF.getRegInfo(); const SIInstrInfo *TII = static_cast(MF.getSubtarget().getInstrInfo()); Index: lib/Target/AMDGPU/SILoadStoreOptimizer.cpp =================================================================== --- lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -423,6 +423,9 @@ } bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + const TargetSubtargetInfo &STM = MF.getSubtarget(); TRI = static_cast(STM.getRegisterInfo()); TII = static_cast(STM.getInstrInfo()); Index: lib/Target/AMDGPU/SIShrinkInstructions.cpp =================================================================== --- lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -203,6 +203,9 @@ } bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + MachineRegisterInfo &MRI = MF.getRegInfo(); const SIInstrInfo *TII = static_cast(MF.getSubtarget().getInstrInfo());