Index: llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp @@ -88,6 +88,9 @@ } bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) { + if (skipFunction(F)) + return false; + DA = &getAnalysis(); visit(F); Index: llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -97,7 +97,7 @@ } bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { - if (!TM || F.hasFnAttribute(Attribute::OptimizeNone)) + if (!TM || skipFunction(F)) return false; FunctionType *FTy = F.getFunctionType(); Index: llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp +++ llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp @@ -168,6 +168,9 @@ } bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + TII = static_cast(MF.getSubtarget().getInstrInfo()); for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end(); BB != BB_E; ++BB) { Index: llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp +++ llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp @@ -314,6 +314,9 @@ } bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) { + if (skipFunction(*Fn.getFunction())) + return false; + TII = static_cast(Fn.getSubtarget().getInstrInfo()); MRI = &(Fn.getRegInfo()); for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); Index: llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -295,6 +295,9 @@ } bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + MachineRegisterInfo &MRI = MF.getRegInfo(); const SIInstrInfo *TII = static_cast(MF.getSubtarget().getInstrInfo()); Index: llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -423,6 +423,9 @@ } bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + const TargetSubtargetInfo &STM = MF.getSubtarget(); TRI = static_cast(STM.getRegisterInfo()); TII = static_cast(STM.getInstrInfo()); Index: llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -203,6 +203,9 @@ } bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(*MF.getFunction())) + return false; + MachineRegisterInfo &MRI = MF.getRegInfo(); const SIInstrInfo *TII = static_cast(MF.getSubtarget().getInstrInfo());