Index: llvm/trunk/include/llvm/IR/Function.h =================================================================== --- llvm/trunk/include/llvm/IR/Function.h +++ llvm/trunk/include/llvm/IR/Function.h @@ -395,6 +395,16 @@ addAttribute(n, Attribute::ReadOnly); } + /// Optimize this function for minimum size (-Oz). + bool optForMinSize() const { + return hasFnAttribute(Attribute::MinSize); + }; + + /// Optimize this function for size (-Os) or minimum size (-Oz). + bool optForSize() const { + return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize(); + } + /// copyAttributesFrom - copy all additional attributes (those not needed to /// create a Function) from the Function Src to this one. void copyAttributesFrom(const GlobalValue *Src) override; Index: llvm/trunk/lib/CodeGen/BranchFolding.cpp =================================================================== --- llvm/trunk/lib/CodeGen/BranchFolding.cpp +++ llvm/trunk/lib/CodeGen/BranchFolding.cpp @@ -606,6 +606,7 @@ // instructions that would be deleted in the merge. MachineFunction *MF = MBB1->getParent(); if (EffectiveTailLen >= 2 && + // FIXME: Use Function::optForSize(). MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && (I1 == MBB1->begin() || I2 == MBB2->begin())) return true; Index: llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp +++ llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp @@ -214,6 +214,7 @@ TLI = TM->getSubtargetImpl(F)->getTargetLowering(); TLInfo = &getAnalysis().getTLI(); TTI = &getAnalysis().getTTI(F); + // FIXME: Use Function::optForSize(). OptSize = F.hasFnAttribute(Attribute::OptimizeForSize); /// This optimization identifies DIV instructions that can be Index: llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp +++ llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp @@ -1064,6 +1064,7 @@ // exclusively on the loop info here so that we can align backedges in // unnatural CFGs and backedges that were introduced purely because of the // loop rotations done during this layout pass. + // FIXME: Use Function::optForSize(). if (F.getFunction()->hasFnAttribute(Attribute::OptimizeForSize)) return; if (FunctionChain.begin() == FunctionChain.end()) Index: llvm/trunk/lib/CodeGen/MachineCombiner.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineCombiner.cpp +++ llvm/trunk/lib/CodeGen/MachineCombiner.cpp @@ -427,6 +427,7 @@ Traces = &getAnalysis(); MinInstr = 0; + // FIXME: Use Function::optForSize(). OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); Index: llvm/trunk/lib/CodeGen/MachineFunction.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineFunction.cpp +++ llvm/trunk/lib/CodeGen/MachineFunction.cpp @@ -79,6 +79,7 @@ Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn. + // FIXME: Use Function::optForSize(). if (!Fn->hasFnAttribute(Attribute::OptimizeForSize)) Alignment = std::max(Alignment, STI->getTargetLowering()->getPrefFunctionAlignment()); Index: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -428,9 +428,7 @@ DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) { - auto *F = DAG.getMachineFunction().getFunction(); - ForCodeSize = F->hasFnAttribute(Attribute::OptimizeForSize) || - F->hasFnAttribute(Attribute::MinSize); + ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize(); } /// Runs the dag combiner on all nodes in the work list Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -4152,15 +4152,11 @@ } static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { - const Function *F = MF.getFunction(); - bool HasMinSize = F->hasFnAttribute(Attribute::MinSize); - bool HasOptSize = F->hasFnAttribute(Attribute::OptimizeForSize); - // On Darwin, -Os means optimize for size without hurting performance, so // only really optimize for size when -Oz (MinSize) is used. if (MF.getTarget().getTargetTriple().isOSDarwin()) - return HasMinSize; - return HasOptSize || HasMinSize; + return MF.getFunction()->optForMinSize(); + return MF.getFunction()->optForSize(); } static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl, Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3968,6 +3968,7 @@ return DAG.getConstantFP(1.0, DL, LHS.getValueType()); const Function *F = DAG.getMachineFunction().getFunction(); + // FIXME: Use Function::optForSize(). if (!F->hasFnAttribute(Attribute::OptimizeForSize) || // If optimizing for size, don't insert too many multiplies. This // inserts up to 5 multiplies. Index: llvm/trunk/lib/CodeGen/TailDuplication.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TailDuplication.cpp +++ llvm/trunk/lib/CodeGen/TailDuplication.cpp @@ -563,6 +563,7 @@ // compensate for the duplication. unsigned MaxDuplicateCount; if (TailDuplicateSize.getNumOccurrences() == 0 && + // FIXME: Use Function::optForSize(). MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize)) MaxDuplicateCount = 1; else Index: llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -899,7 +899,7 @@ Loops = getAnalysisIfAvailable(); Traces = &getAnalysis(); MinInstr = nullptr; - MinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize); + MinSize = MF.getFunction()->optForMinSize(); bool Changed = false; CmpConv.runOnMachineFunction(MF); Index: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -53,9 +53,7 @@ } bool runOnMachineFunction(MachineFunction &MF) override { - ForCodeSize = - MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) || - MF.getFunction()->hasFnAttribute(Attribute::MinSize); + ForCodeSize = MF.getFunction()->optForSize(); Subtarget = &MF.getSubtarget(); return SelectionDAGISel::runOnMachineFunction(MF); } Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -8422,10 +8422,8 @@ if (!Subtarget->isCyclone()) return SDValue(); - // Don't split at Oz. - MachineFunction &MF = DAG.getMachineFunction(); - bool IsMinSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize); - if (IsMinSize) + // Don't split at -Oz. + if (DAG.getMachineFunction().getFunction()->optForMinSize()) return SDValue(); SDValue StVal = S->getValue(); Index: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1652,9 +1652,7 @@ // If we are optimizing for size, see if the branch in the predecessor can be // lowered to cbn?z by the constant island lowering pass, and return false if // so. This results in a shorter instruction sequence. - const Function *F = MBB.getParent()->getFunction(); - if (F->hasFnAttribute(Attribute::OptimizeForSize) || - F->hasFnAttribute(Attribute::MinSize)) { + if (MBB.getParent()->getFunction()->optForSize()) { MachineBasicBlock *Pred = *MBB.pred_begin(); if (!Pred->empty()) { MachineInstr *LastMI = &*Pred->rbegin(); @@ -1989,7 +1987,7 @@ unsigned NumBytes) { // This optimisation potentially adds lots of load and store // micro-operations, it's only really a great benefit to code-size. - if (!MF.getFunction()->hasFnAttribute(Attribute::MinSize)) + if (!MF.getFunction()->optForMinSize()) return false; // If only one register is pushed/popped, LLVM can use an LDR/STR @@ -3652,6 +3650,7 @@ // instructions). if (Latency > 0 && Subtarget.isThumb2()) { const MachineFunction *MF = DefMI->getParent()->getParent(); + // FIXME: Use Function::optForSize(). if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize)) --Latency; } Index: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp @@ -1826,7 +1826,6 @@ // FIXME: handle tail calls differently. unsigned CallOpc; - bool HasMinSizeAttr = MF.getFunction()->hasFnAttribute(Attribute::MinSize); if (Subtarget->isThumb()) { if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) CallOpc = ARMISD::CALL_NOLINK; @@ -1836,8 +1835,8 @@ if (!isDirect && !Subtarget->hasV5TOps()) CallOpc = ARMISD::CALL_NOLINK; else if (doesNotRet && isDirect && Subtarget->hasRAS() && - // Emit regular call when code size is the priority - !HasMinSizeAttr) + // Emit regular call when code size is the priority + !MF.getFunction()->optForMinSize()) // "mov lr, pc; b _foo" to avoid confusing the RSP CallOpc = ARMISD::CALL_NOLINK; else Index: llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp +++ llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp @@ -294,8 +294,7 @@ // immediates as it is inherently position independent, and may be out of // range otherwise. return !NoMovt && hasV6T2Ops() && - (isTargetWindows() || - !MF.getFunction()->hasFnAttribute(Attribute::MinSize)); + (isTargetWindows() || !MF.getFunction()->optForMinSize()); } bool ARMSubtarget::useFastISel() const { Index: llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp +++ llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -633,10 +633,9 @@ if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr)) return false; - if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && - STI->avoidMOVsShifterOperand()) + if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand()) // Don't issue movs with shifter operand for some CPUs unless we - // are optimizing / minimizing for size. + // are optimizing for size. return false; unsigned Reg0 = MI->getOperand(0).getReg(); @@ -750,10 +749,9 @@ if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit)) return false; - if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs && - STI->avoidMOVsShifterOperand()) + if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand()) // Don't issue movs with shifter operand for some CPUs unless we - // are optimizing / minimizing for size. + // are optimizing for size. return false; unsigned Limit = ~0U; @@ -1012,9 +1010,9 @@ TII = static_cast(STI->getInstrInfo()); - // Optimizing / minimizing size? - OptimizeSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); - MinimizeSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize); + // Optimizing / minimizing size? Minimizing size implies optimizing for size. + OptimizeSize = MF.getFunction()->optForSize(); + MinimizeSize = MF.getFunction()->optForMinSize(); BlockInfo.clear(); BlockInfo.resize(MF.getNumBlockIDs()); Index: llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1219,6 +1219,7 @@ } +// FIXME: Use Function::optForSize(). inline static bool isOptSize(const MachineFunction &MF) { AttributeSet AF = MF.getFunction()->getAttributes(); return AF.hasAttribute(AttributeSet::FunctionIndex, @@ -1226,8 +1227,7 @@ } inline static bool isMinSize(const MachineFunction &MF) { - AttributeSet AF = MF.getFunction()->getAttributes(); - return AF.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize); + return MF.getFunction()->optForMinSize(); } Index: llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp +++ llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp @@ -170,11 +170,7 @@ return true; // Don't do this when not optimizing for size. - bool OptForSize = - MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) || - MF.getFunction()->hasFnAttribute(Attribute::MinSize); - - if (!OptForSize) + if (!MF.getFunction()->optForSize()) return false; unsigned StackAlign = TFL->getStackAlignment(); Index: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp +++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -462,6 +462,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() { // OptForSize is used in pattern predicates that isel is matching. + // FIXME: Use Function::optForSize(). OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize); for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -5189,6 +5189,7 @@ // it may be detrimental to overall size. There needs to be a way to detect // that condition to know if this is truly a size win. const Function *F = DAG.getMachineFunction().getFunction(); + // FIXME: Use Function::optForSize(). bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize); // Handle broadcasting a single constant scalar from the constant pool @@ -11118,8 +11119,7 @@ // Bits [3:0] of the constant are the zero mask. The DAG Combiner may // combine either bitwise AND or insert of float 0.0 to set these bits. - const Function *F = DAG.getMachineFunction().getFunction(); - bool MinSize = F->hasFnAttribute(Attribute::MinSize); + bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize(); if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) { // If this is an insertion of 32-bits into the low 32-bits of // a vector, we prefer to generate a blend with immediate rather @@ -13195,8 +13195,7 @@ // if we're optimizing for size, however, as that'll allow better folding // of memory operations. if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 && - !DAG.getMachineFunction().getFunction()->hasFnAttribute( - Attribute::MinSize) && + !DAG.getMachineFunction().getFunction()->optForMinSize() && !Subtarget->isAtom()) { unsigned ExtendOp = isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; @@ -23962,6 +23961,7 @@ // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) MachineFunction &MF = DAG.getMachineFunction(); + // FIXME: Use Function::optForSize(). bool OptForSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize); Index: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp +++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp @@ -4875,8 +4875,7 @@ // For CPUs that favor the register form of a call or push, // do not fold loads into calls or pushes, unless optimizing for size // aggressively. - if (isCallRegIndirect && - !MF.getFunction()->hasFnAttribute(Attribute::MinSize) && + if (isCallRegIndirect && !MF.getFunction()->optForMinSize() && (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r || MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r || MI->getOpcode() == X86::PUSH64r)) @@ -5242,6 +5241,7 @@ // Unless optimizing for size, don't fold to avoid partial // register update stalls + // FIXME: Use Function::optForSize(). if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) return nullptr; @@ -5351,6 +5351,7 @@ // Unless optimizing for size, don't fold to avoid partial // register update stalls + // FIXME: Use Function::optForSize(). if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) return nullptr; Index: llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp +++ llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp @@ -93,8 +93,7 @@ /// runOnMachineFunction - Loop over all of the basic blocks, inserting /// NOOP instructions before early exits. bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) { - if (MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) || - MF.getFunction()->hasFnAttribute(Attribute::MinSize)) { + if (MF.getFunction()->optForSize()) { return false; } Index: llvm/trunk/lib/Transforms/IPO/Inliner.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/Inliner.cpp +++ llvm/trunk/lib/Transforms/IPO/Inliner.cpp @@ -265,6 +265,7 @@ // would decrease the threshold. Function *Caller = CS.getCaller(); bool OptSize = Caller && !Caller->isDeclaration() && + // FIXME: Use Function::optForSize(). Caller->hasFnAttribute(Attribute::OptimizeForSize); if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres) Index: llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -208,6 +208,7 @@ : UP.DynamicCostSavingsDiscount; if (!UserThreshold && + // FIXME: Use Function::optForSize(). L->getHeader()->getParent()->hasFnAttribute( Attribute::OptimizeForSize)) { Threshold = UP.OptSizeThreshold; Index: llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -600,6 +600,7 @@ } // Do not do non-trivial unswitch while optimizing for size. + // FIXME: Use Function::optForSize(). if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize)) return false; Index: llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1616,6 +1616,7 @@ // Check the function attributes to find out if this function should be // optimized for size. bool OptForSize = Hints.getForce() != LoopVectorizeHints::FK_Enabled && + // FIXME: Use Function::optForSize(). F->hasFnAttribute(Attribute::OptimizeForSize); // Compute the weighted frequency of this loop being executed and see if it