Index: clang/lib/CodeGen/CGCall.cpp =================================================================== --- clang/lib/CodeGen/CGCall.cpp +++ clang/lib/CodeGen/CGCall.cpp @@ -1809,7 +1809,7 @@ void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) { llvm::AttrBuilder FuncAttrs; - ConstructDefaultFnAttrList(F.getName(), F.optForNone(), + ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(), /* AttrOnCallsite = */ false, FuncAttrs); F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); } Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -953,7 +953,7 @@ /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl. virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range) const { - const bool OptForSize = SI->getParent()->getParent()->optForSize(); + const bool OptForSize = SI->getParent()->getParent()->hasOptSize(); const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); const unsigned MaxJumpTableSize = OptForSize ? UINT_MAX : getMaximumJumpTableSize(); Index: llvm/include/llvm/IR/Function.h =================================================================== --- llvm/include/llvm/IR/Function.h +++ llvm/include/llvm/IR/Function.h @@ -591,14 +591,14 @@ } /// Do not optimize this function (-O0). - bool optForNone() const { return hasFnAttribute(Attribute::OptimizeNone); } + bool hasOptNone() const { return hasFnAttribute(Attribute::OptimizeNone); } /// Optimize this function for minimum size (-Oz). - bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); } + bool hasMinSize() const { return hasFnAttribute(Attribute::MinSize); } /// Optimize this function for size (-Os) or minimum size (-Oz). - bool optForSize() const { - return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize(); + bool hasOptSize() const { + return hasFnAttribute(Attribute::OptimizeForSize) || hasMinSize(); } /// copyAttributesFrom - copy all additional attributes (those not needed to Index: llvm/lib/Analysis/GlobalsModRef.cpp =================================================================== --- llvm/lib/Analysis/GlobalsModRef.cpp +++ llvm/lib/Analysis/GlobalsModRef.cpp @@ -513,7 +513,7 @@ break; } - if (F->isDeclaration() || F->optForNone()) { + if (F->isDeclaration() || F->hasOptNone()) { // Try to get mod/ref behaviour from function attributes. if (F->doesNotAccessMemory()) { // Can't do better than that! @@ -566,7 +566,7 @@ // Don't prove any properties based on the implementation of an optnone // function. Function attributes were already used as a best approximation // above. - if (Node->getFunction()->optForNone()) + if (Node->getFunction()->hasOptNone()) continue; for (Instruction &I : instructions(Node->getFunction())) { Index: llvm/lib/Analysis/InlineCost.cpp =================================================================== --- llvm/lib/Analysis/InlineCost.cpp +++ llvm/lib/Analysis/InlineCost.cpp @@ -897,7 +897,7 @@ // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available // and reduce the threshold if the caller has the necessary attribute. - if (Caller->optForMinSize()) { + if (Caller->hasMinSize()) { Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold); // For minsize, we want to disable the single BB bonus and the vector // bonuses, but not the last-call-to-static bonus. Inlining the last call to @@ -905,12 +905,12 @@ // call/return instructions. SingleBBBonusPercent = 0; VectorBonusPercent = 0; - } else if (Caller->optForSize()) + } else if (Caller->hasOptSize()) Threshold = MinIfValid(Threshold, Params.OptSizeThreshold); // Adjust the threshold based on inlinehint attribute and profile based // hotness information if the caller does not have MinSize attribute. - if (!Caller->optForMinSize()) { + if (!Caller->hasMinSize()) { if (Callee.hasFnAttribute(Attribute::InlineHint)) Threshold = MaxIfValid(Threshold, Params.HintThreshold); @@ -923,7 +923,7 @@ // BlockFrequencyInfo is available. BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr; auto HotCallSiteThreshold = getHotCallSiteThreshold(CS, CallerBFI); - if (!Caller->optForSize() && HotCallSiteThreshold) { + if (!Caller->hasOptSize() && HotCallSiteThreshold) { LLVM_DEBUG(dbgs() << "Hot callsite.\n"); // FIXME: This should update the threshold only if it exceeds the // current threshold, but AutoFDO + ThinLTO currently relies on this @@ -1899,7 +1899,7 @@ // size, we penalise any call sites that perform loops. We do this after all // other costs here, so will likely only be dealing with relatively small // functions (and hence DT and LI will hopefully be cheap). - if (Caller->optForMinSize()) { + if (Caller->hasMinSize()) { DominatorTree DT(F); LoopInfo LI(DT); int NumLoops = 0; @@ -2036,7 +2036,7 @@ return llvm::InlineCost::getNever("conflicting attributes"); // Don't inline this call if the caller has the optnone attribute. - if (Caller->optForNone()) + if (Caller->hasOptNone()) return llvm::InlineCost::getNever("optnone attribute"); // Don't inline a function that treats null pointer as valid into a caller Index: llvm/lib/Analysis/LoopPass.cpp =================================================================== --- llvm/lib/Analysis/LoopPass.cpp +++ llvm/lib/Analysis/LoopPass.cpp @@ -396,7 +396,7 @@ if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(*L))) return true; // Check for the OptimizeNone attribute. - if (F->optForNone()) { + if (F->hasOptNone()) { // FIXME: Report this to dbgs() only once per function. LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' in function " << F->getName() << "\n"); Index: llvm/lib/Analysis/RegionPass.cpp =================================================================== --- llvm/lib/Analysis/RegionPass.cpp +++ llvm/lib/Analysis/RegionPass.cpp @@ -288,7 +288,7 @@ if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(R))) return true; - if (F.optForNone()) { + if (F.hasOptNone()) { // Report this only once per function. if (R.getEntry() == &F.getEntryBlock()) LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() Index: llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2865,7 +2865,7 @@ MCCodePaddingContext &Context) const { assert(MF != nullptr && "Machine function must be valid"); Context.IsPaddingActive = !MF->hasInlineAsm() && - !MF->getFunction().optForSize() && + !MF->getFunction().hasOptSize() && TM.getOptLevel() != CodeGenOpt::None; Context.IsBasicBlockReachableViaFallthrough = std::find(MBB.pred_begin(), MBB.pred_end(), MBB.getPrevNode()) != Index: llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp =================================================================== --- llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -1342,7 +1342,7 @@ FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedLocalFramePtrReg) << 14U); FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedParamFramePtrReg) << 16U); if (Asm->TM.getOptLevel() != CodeGenOpt::None && - !GV.optForSize() && !GV.optForNone()) + !GV.hasOptSize() && !GV.hasOptNone()) FPO |= FrameProcedureOptions::OptimizedForSpeed; // FIXME: Set GuardCfg when it is implemented. CurFn->FrameProcOpts = FPO; Index: llvm/lib/CodeGen/AtomicExpandPass.cpp =================================================================== --- llvm/lib/CodeGen/AtomicExpandPass.cpp +++ llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -1111,11 +1111,11 @@ bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic && SuccessOrder != AtomicOrdering::Monotonic && SuccessOrder != AtomicOrdering::Acquire && - !F->optForMinSize(); + !F->hasMinSize(); // There's no overhead for sinking the release barrier in a weak cmpxchg, so // do it even on minsize. - bool UseUnconditionalReleaseBarrier = F->optForMinSize() && !CI->isWeak(); + bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak(); // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord // Index: llvm/lib/CodeGen/BranchFolding.cpp =================================================================== --- llvm/lib/CodeGen/BranchFolding.cpp +++ llvm/lib/CodeGen/BranchFolding.cpp @@ -721,7 +721,7 @@ // branch instruction, which is likely to be smaller than the 2 // instructions that would be deleted in the merge. MachineFunction *MF = MBB1->getParent(); - return EffectiveTailLen >= 2 && MF->getFunction().optForSize() && + return EffectiveTailLen >= 2 && MF->getFunction().hasOptSize() && (I1 == MBB1->begin() || I2 == MBB2->begin()); } @@ -1574,7 +1574,7 @@ } if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 && - MF.getFunction().optForSize()) { + MF.getFunction().hasOptSize()) { // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch // direction, thereby defeating careful block placement and regressing // performance. Therefore, only consider this for optsize functions. Index: llvm/lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- llvm/lib/CodeGen/CodeGenPrepare.cpp +++ llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -426,7 +426,7 @@ LI = &getAnalysis().getLoopInfo(); BPI.reset(new BranchProbabilityInfo(F, *LI)); BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); - OptSize = F.optForSize(); + OptSize = F.hasOptSize(); ProfileSummaryInfo *PSI = &getAnalysis().getPSI(); @@ -4454,7 +4454,7 @@ if (!MightBeFoldableInst(I)) return true; - const bool OptSize = I->getFunction()->optForSize(); + const bool OptSize = I->getFunction()->hasOptSize(); // Loop over all the uses, recursively processing them. for (Use &U : I->uses()) { Index: llvm/lib/CodeGen/ExpandMemCmp.cpp =================================================================== --- llvm/lib/CodeGen/ExpandMemCmp.cpp +++ llvm/lib/CodeGen/ExpandMemCmp.cpp @@ -721,7 +721,7 @@ NumMemCmpCalls++; // Early exit from expansion if -Oz. - if (CI->getFunction()->optForMinSize()) + if (CI->getFunction()->hasMinSize()) return false; // Early exit from expansion if size is not a constant. @@ -742,7 +742,7 @@ if (!Options) return false; const unsigned MaxNumLoads = - TLI->getMaxExpandSizeMemcmp(CI->getFunction()->optForSize()); + TLI->getMaxExpandSizeMemcmp(CI->getFunction()->hasOptSize()); unsigned NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences() ? MemCmpEqZeroNumLoadsPerBlock Index: llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -657,7 +657,7 @@ LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n'); const Function &F = MF.getFunction(); Mode SaveOptMode = OptMode; - if (F.optForNone()) + if (F.hasOptNone()) OptMode = Mode::Fast; init(MF); Index: llvm/lib/CodeGen/GlobalMerge.cpp =================================================================== --- llvm/lib/CodeGen/GlobalMerge.cpp +++ llvm/lib/CodeGen/GlobalMerge.cpp @@ -330,7 +330,7 @@ Function *ParentFn = I->getParent()->getParent(); // If we're only optimizing for size, ignore non-minsize functions. - if (OnlyOptimizeForSize && !ParentFn->optForMinSize()) + if (OnlyOptimizeForSize && !ParentFn->hasMinSize()) continue; size_t UGSIdx = GlobalUsesByFunction[ParentFn]; Index: llvm/lib/CodeGen/MachineBlockPlacement.cpp =================================================================== --- llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -1813,7 +1813,7 @@ // i.e. when the layout predecessor does not fallthrough to the loop header. // In practice this never happens though: there always seems to be a preheader // that can fallthrough and that is also placed before the header. - if (F->getFunction().optForSize()) + if (F->getFunction().hasOptSize()) return L.getHeader(); // Check that the header hasn't been fused with a preheader block due to @@ -2561,8 +2561,8 @@ // exclusively on the loop info here so that we can align backedges in // unnatural CFGs and backedges that were introduced purely because of the // loop rotations done during this layout pass. - if (F->getFunction().optForMinSize() || - (F->getFunction().optForSize() && !TLI->alignLoopsWithOptSize())) + if (F->getFunction().hasMinSize() || + (F->getFunction().hasOptSize() && !TLI->alignLoopsWithOptSize())) return; BlockChain &FunctionChain = *BlockToChain[&F->front()]; if (FunctionChain.begin() == FunctionChain.end()) @@ -2837,7 +2837,7 @@ if (allowTailDupPlacement()) { MPDT = &getAnalysis(); - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) TailDupSize = 1; bool PreRegAlloc = false; TailDup.initMF(MF, PreRegAlloc, MBPI, /* LayoutMode */ true, TailDupSize); Index: llvm/lib/CodeGen/MachineCombiner.cpp =================================================================== --- llvm/lib/CodeGen/MachineCombiner.cpp +++ llvm/lib/CodeGen/MachineCombiner.cpp @@ -637,7 +637,7 @@ MLI = &getAnalysis(); Traces = &getAnalysis(); MinInstr = nullptr; - OptSize = MF.getFunction().optForSize(); + OptSize = MF.getFunction().hasOptSize(); LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); if (!TII->useMachineCombiner()) { Index: llvm/lib/CodeGen/MachineFunction.cpp =================================================================== --- llvm/lib/CodeGen/MachineFunction.cpp +++ llvm/lib/CodeGen/MachineFunction.cpp @@ -174,7 +174,7 @@ Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. - // FIXME: Use Function::optForSize(). + // FIXME: Use Function::hasOptSize(). if (!F.hasFnAttribute(Attribute::OptimizeForSize)) Alignment = std::max(Alignment, STI->getTargetLowering()->getPrefFunctionAlignment()); Index: llvm/lib/CodeGen/SafeStack.cpp =================================================================== --- llvm/lib/CodeGen/SafeStack.cpp +++ llvm/lib/CodeGen/SafeStack.cpp @@ -728,7 +728,7 @@ if (!isa(UnsafeStackPtr)) return; - if(F.optForNone()) + if(F.hasOptNone()) return; CallSite CS(UnsafeStackPtr); Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -196,7 +196,7 @@ DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL) : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), OptLevel(OL), AA(AA) { - ForCodeSize = DAG.getMachineFunction().getFunction().optForSize(); + ForCodeSize = DAG.getMachineFunction().getFunction().hasOptSize(); MaximumLegalStoreInBits = 0; for (MVT VT : MVT::all_valuetypes()) @@ -12188,7 +12188,7 @@ // Assume that libcalls are the smallest code. // TODO: This restriction should probably be lifted for vectors. - if (DAG.getMachineFunction().getFunction().optForSize()) + if (DAG.getMachineFunction().getFunction().hasOptSize()) return SDValue(); // pow(X, 0.25) --> sqrt(sqrt(X)) @@ -19213,7 +19213,7 @@ SDValue DAGCombiner::BuildSDIV(SDNode *N) { // when optimising for minimum size, we don't want to expand a div to a mul // and a shift. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); SmallVector Built; @@ -19254,7 +19254,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) { // when optimising for minimum size, we don't want to expand a div to a mul // and a shift. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); SmallVector Built; Index: llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -3092,7 +3092,7 @@ // Check to see if this FP immediate is already legal. // If this is a legal constant, turn it into a TargetConstantFP node. if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0), - DAG.getMachineFunction().getFunction().optForSize())) + DAG.getMachineFunction().getFunction().hasOptSize())) Results.push_back(ExpandConstantFP(CFP, true)); break; } Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1418,7 +1418,7 @@ assert((TargetFlags == 0 || isTarget) && "Cannot set target flags on target-independent globals"); if (Alignment == 0) - Alignment = MF->getFunction().optForSize() + Alignment = MF->getFunction().hasOptSize() ? getDataLayout().getABITypeAlignment(C->getType()) : getDataLayout().getPrefTypeAlignment(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; @@ -5657,8 +5657,8 @@ // On Darwin, -Os means optimize for size without hurting performance, so // only really optimize for size when -Oz (MinSize) is used. if (MF.getTarget().getTargetTriple().isOSDarwin()) - return MF.getFunction().optForMinSize(); - return MF.getFunction().optForSize(); + return MF.getFunction().hasMinSize(); + return MF.getFunction().hasOptSize(); } static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -5220,7 +5220,7 @@ return DAG.getConstantFP(1.0, DL, LHS.getValueType()); const Function &F = DAG.getMachineFunction().getFunction(); - if (!F.optForSize() || + if (!F.hasOptSize() || // If optimizing for size, don't insert too many multiplies. // This inserts up to 5 multiplies. countPopulation(Val) + Log2_32(Val) < 7) { @@ -10617,7 +10617,7 @@ // Don't perform if there is only one cluster or optimizing for size. if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 || TM.getOptLevel() == CodeGenOpt::None || - SwitchMBB->getParent()->getFunction().optForMinSize()) + SwitchMBB->getParent()->getFunction().hasMinSize()) return SwitchMBB; BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100); @@ -10740,7 +10740,7 @@ unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None && - !DefaultMBB->getParent()->getFunction().optForMinSize()) { + !DefaultMBB->getParent()->getFunction().hasMinSize()) { // For optimized builds, lower large range as a balanced binary tree. splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); continue; Index: llvm/lib/CodeGen/TailDuplicator.cpp =================================================================== --- llvm/lib/CodeGen/TailDuplicator.cpp +++ llvm/lib/CodeGen/TailDuplicator.cpp @@ -557,7 +557,7 @@ unsigned MaxDuplicateCount; if (TailDupSize == 0 && TailDuplicateSize.getNumOccurrences() == 0 && - MF->getFunction().optForSize()) + MF->getFunction().hasOptSize()) MaxDuplicateCount = 1; else if (TailDupSize == 0) MaxDuplicateCount = TailDuplicateSize; Index: llvm/lib/IR/Pass.cpp =================================================================== --- llvm/lib/IR/Pass.cpp +++ llvm/lib/IR/Pass.cpp @@ -168,7 +168,7 @@ if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(F))) return true; - if (F.optForNone()) { + if (F.hasOptNone()) { LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' on function " << F.getName() << "\n"); return true; @@ -207,7 +207,7 @@ OptPassGate &Gate = F->getContext().getOptPassGate(); if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(BB))) return true; - if (F->optForNone()) { + if (F->hasOptNone()) { // Report this only once per function. if (&BB == &F->getEntryBlock()) LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() Index: llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp +++ llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp @@ -140,7 +140,7 @@ const auto &ST = MF->getSubtarget(); TII = ST.getInstrInfo(); - if (ST.force32BitJumpTables() && !MF->getFunction().optForMinSize()) + if (ST.force32BitJumpTables() && !MF->getFunction().hasMinSize()) return false; scanFunction(); Index: llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -940,7 +940,7 @@ MBPI = &getAnalysis(); Traces = &getAnalysis(); MinInstr = nullptr; - MinSize = MF.getFunction().optForMinSize(); + MinSize = MF.getFunction().hasMinSize(); bool Changed = false; CmpConv.runOnMachineFunction(MF, MBPI); Index: llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -52,7 +52,7 @@ } bool runOnMachineFunction(MachineFunction &MF) override { - ForCodeSize = MF.getFunction().optForSize(); + ForCodeSize = MF.getFunction().hasOptSize(); Subtarget = &MF.getSubtarget(); return SelectionDAGISel::runOnMachineFunction(MF); } Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -474,7 +474,7 @@ } bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override { - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return false; return true; } Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10382,7 +10382,7 @@ return SDValue(); // Don't split at -Oz. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); // Don't split v2i64 vectors. Memcpy lowering produces those and splitting Index: llvm/lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -5486,7 +5486,7 @@ bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault( MachineFunction &MF) const { - return MF.getFunction().optForMinSize(); + return MF.getFunction().hasMinSize(); } #define GET_INSTRINFO_HELPERS Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -407,10 +407,10 @@ // the Function object through the Subtarget and objections were raised // to that (see post-commit review comments for r301750). let RecomputePerFunction = 1 in { - def ForCodeSize : Predicate<"MF->getFunction().optForSize()">; - def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">; + def ForCodeSize : Predicate<"MF->getFunction().hasOptSize()">; + def NotForCodeSize : Predicate<"!MF->getFunction().hasOptSize()">; // Avoid generating STRQro if it is slow, unless we're optimizing for code size. - def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">; + def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().hasOptSize()">; def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>; def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>; Index: llvm/lib/Target/ARM/ARMAsmPrinter.cpp =================================================================== --- llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -119,13 +119,13 @@ // Calculate this function's optimization goal. unsigned OptimizationGoal; - if (F.optForNone()) + if (F.hasOptNone()) // For best debugging illusion, speed and small size sacrificed OptimizationGoal = 6; - else if (F.optForMinSize()) + else if (F.hasMinSize()) // Aggressively for small size, speed and debug illusion sacrificed OptimizationGoal = 4; - else if (F.optForSize()) + else if (F.hasOptSize()) // For small size, but speed and debugging illusion preserved OptimizationGoal = 3; else if (TM.getOptLevel() == CodeGenOpt::Aggressive) Index: llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1899,7 +1899,7 @@ // If we are optimizing for size, see if the branch in the predecessor can be // lowered to cbn?z by the constant island lowering pass, and return false if // so. This results in a shorter instruction sequence. - if (MBB.getParent()->getFunction().optForSize()) { + if (MBB.getParent()->getFunction().hasOptSize()) { MachineBasicBlock *Pred = *MBB.pred_begin(); if (!Pred->empty()) { MachineInstr *LastMI = &*Pred->rbegin(); @@ -2267,7 +2267,7 @@ unsigned NumBytes) { // This optimisation potentially adds lots of load and store // micro-operations, it's only really a great benefit to code-size. - if (!Subtarget.optForMinSize()) + if (!Subtarget.hasMinSize()) return false; // If only one register is pushed/popped, LLVM can use an LDR/STR @@ -4163,7 +4163,7 @@ // instructions). if (Latency > 0 && Subtarget.isThumb2()) { const MachineFunction *MF = DefMI.getParent()->getParent(); - // FIXME: Use Function::optForSize(). + // FIXME: Use Function::hasOptSize(). if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize)) --Latency; } Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -2074,7 +2074,7 @@ auto *GV = cast(Callee)->getGlobal(); auto *BB = CLI.CS.getParent(); bool PreferIndirect = - Subtarget->isThumb() && Subtarget->optForMinSize() && + Subtarget->isThumb() && Subtarget->hasMinSize() && count_if(GV->users(), [&BB](const User *U) { return isa(U) && cast(U)->getParent() == BB; }) > 2; @@ -2146,7 +2146,7 @@ CallOpc = ARMISD::CALL_NOLINK; else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && // Emit regular call when code size is the priority - !Subtarget->optForMinSize()) + !Subtarget->hasMinSize()) // "mov lr, pc; b _foo" to avoid confusing the RSP CallOpc = ARMISD::CALL_NOLINK; else @@ -7818,7 +7818,7 @@ return SDValue(); const auto &ST = static_cast(DAG.getSubtarget()); - const bool MinSize = ST.optForMinSize(); + const bool MinSize = ST.hasMinSize(); const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() : ST.hasDivideInARMMode(); @@ -14826,7 +14826,7 @@ } bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { - return !Subtarget->optForMinSize(); + return !Subtarget->hasMinSize(); } Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, Index: llvm/lib/Target/ARM/ARMInstrInfo.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrInfo.td +++ llvm/lib/Target/ARM/ARMInstrInfo.td @@ -361,7 +361,7 @@ def UseFPVMLx: Predicate<"((Subtarget->useFPVMLx() &&" " TM.Options.AllowFPOpFusion != FPOpFusion::Fast) ||" - "Subtarget->optForMinSize())">; + "Subtarget->hasMinSize())">; } def UseMulOps : Predicate<"Subtarget->useMulOps()">; Index: llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp =================================================================== --- llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1294,7 +1294,7 @@ // can still change to a writeback form as that will save us 2 bytes // of code size. It can create WAW hazards though, so only do it if // we're minimizing code size. - if (!STI->optForMinSize() || !BaseKill) + if (!STI->hasMinSize() || !BaseKill) return false; bool HighRegsUsed = false; Index: llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp =================================================================== --- llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -170,7 +170,7 @@ // Code size optimisation: do not inline memcpy if expansion results in // more instructions than the libary call. - if (NumMEMCPYs > 1 && Subtarget.optForMinSize()) { + if (NumMEMCPYs > 1 && Subtarget.hasMinSize()) { return SDValue(); } Index: llvm/lib/Target/ARM/ARMSubtarget.h =================================================================== --- llvm/lib/Target/ARM/ARMSubtarget.h +++ llvm/lib/Target/ARM/ARMSubtarget.h @@ -715,7 +715,7 @@ bool disablePostRAScheduler() const { return DisablePostRAScheduler; } bool useSoftFloat() const { return UseSoftFloat; } bool isThumb() const { return InThumbMode; } - bool optForMinSize() const { return OptMinSize; } + bool hasMinSize() const { return OptMinSize; } bool isThumb1Only() const { return InThumbMode && !HasThumb2; } bool isThumb2() const { return InThumbMode && HasThumb2; } bool hasThumb2() const { return HasThumb2; } Index: llvm/lib/Target/ARM/ARMTargetMachine.cpp =================================================================== --- llvm/lib/Target/ARM/ARMTargetMachine.cpp +++ llvm/lib/Target/ARM/ARMTargetMachine.cpp @@ -270,7 +270,7 @@ // Use the optminsize to identify the subtarget, but don't use it in the // feature string. std::string Key = CPU + FS; - if (F.optForMinSize()) + if (F.hasMinSize()) Key += "+minsize"; auto &I = SubtargetMap[Key]; @@ -280,7 +280,7 @@ // function that reside in TargetOptions. resetTargetOptions(F); I = llvm::make_unique(TargetTriple, CPU, FS, *this, isLittle, - F.optForMinSize()); + F.hasMinSize()); if (!I->isThumb() && !I->hasARMOps()) F.getContext().emitError("Function '" + F.getName() + "' uses ARM " Index: llvm/lib/Target/ARM/ARMTargetTransformInfo.h =================================================================== --- llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -94,7 +94,7 @@ bool enableInterleavedAccessVectorization() { return true; } bool shouldFavorBackedgeIndex(const Loop *L) const { - if (L->getHeader()->getParent()->optForSize()) + if (L->getHeader()->getParent()->hasOptSize()) return false; return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1; } Index: llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -602,7 +602,7 @@ // Disable loop unrolling for Oz and Os. UP.OptSizeThreshold = 0; UP.PartialOptSizeThreshold = 0; - if (L->getHeader()->getParent()->optForSize()) + if (L->getHeader()->getParent()->hasOptSize()) return; // Only enable on Thumb-2 targets. Index: llvm/lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- llvm/lib/Target/ARM/Thumb2SizeReduction.cpp +++ llvm/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -1127,8 +1127,8 @@ TII = static_cast(STI->getInstrInfo()); // Optimizing / minimizing size? Minimizing size implies optimizing for size. - OptimizeSize = MF.getFunction().optForSize(); - MinimizeSize = STI->optForMinSize(); + OptimizeSize = MF.getFunction().hasOptSize(); + MinimizeSize = STI->hasMinSize(); BlockInfo.clear(); BlockInfo.resize(MF.getNumBlockIDs()); Index: llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp =================================================================== --- llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -374,17 +374,17 @@ } static inline bool isOptNone(const MachineFunction &MF) { - return MF.getFunction().optForNone() || + return MF.getFunction().hasOptNone() || MF.getTarget().getOptLevel() == CodeGenOpt::None; } static inline bool isOptSize(const MachineFunction &MF) { const Function &F = MF.getFunction(); - return F.optForSize() && !F.optForMinSize(); + return F.hasOptSize() && !F.hasMinSize(); } static inline bool isMinSize(const MachineFunction &MF) { - return MF.getFunction().optForMinSize(); + return MF.getFunction().hasMinSize(); } /// Implements shrink-wrapping of the stack frame. By default, stack frame Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -14707,7 +14707,7 @@ return SDValue(); // An imul is usually smaller than the alternative sequence for legal type. - if (DAG.getMachineFunction().getFunction().optForMinSize() && + if (DAG.getMachineFunction().getFunction().hasMinSize() && isOperationLegal(ISD::MUL, N->getValueType(0))) return SDValue(); Index: llvm/lib/Target/X86/X86FixupBWInsts.cpp =================================================================== --- llvm/lib/Target/X86/X86FixupBWInsts.cpp +++ llvm/lib/Target/X86/X86FixupBWInsts.cpp @@ -150,7 +150,7 @@ this->MF = &MF; TII = MF.getSubtarget().getInstrInfo(); - OptForSize = MF.getFunction().optForSize(); + OptForSize = MF.getFunction().hasOptSize(); MLI = &getAnalysis(); LiveRegs.init(TII->getRegisterInfo()); Index: llvm/lib/Target/X86/X86FixupLEAs.cpp =================================================================== --- llvm/lib/Target/X86/X86FixupLEAs.cpp +++ llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -200,7 +200,7 @@ bool IsSlowLEA = ST.slowLEA(); bool IsSlow3OpsLEA = ST.slow3OpsLEA(); - OptIncDec = !ST.slowIncDec() || Func.getFunction().optForSize(); + OptIncDec = !ST.slowIncDec() || Func.getFunction().hasOptSize(); OptLEA = ST.LEAusesAG() || IsSlowLEA || IsSlow3OpsLEA; if (!OptLEA && !OptIncDec) Index: llvm/lib/Target/X86/X86FrameLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86FrameLowering.cpp +++ llvm/lib/Target/X86/X86FrameLowering.cpp @@ -2810,7 +2810,7 @@ StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); if (StackAdjustment) { - if (!(F.optForMinSize() && + if (!(F.hasMinSize() && adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, /*InEpilogue=*/false); Index: llvm/lib/Target/X86/X86ISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -183,8 +183,8 @@ "indirect-tls-seg-refs"); // OptFor[Min]Size are used in pattern predicates that isel is matching. - OptForSize = MF.getFunction().optForSize(); - OptForMinSize = MF.getFunction().optForMinSize(); + OptForSize = MF.getFunction().hasOptSize(); + OptForMinSize = MF.getFunction().hasMinSize(); assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"); Index: llvm/lib/Target/X86/X86ISelLowering.h =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.h +++ llvm/lib/Target/X86/X86ISelLowering.h @@ -823,7 +823,7 @@ } bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override { - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return false; return true; } Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -7762,7 +7762,7 @@ // TODO: If multiple splats are generated to load the same constant, // it may be detrimental to overall size. There needs to be a way to detect // that condition to know if this is truly a size win. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); // Handle broadcasting a single constant scalar from the constant pool // into a vector. @@ -10669,7 +10669,7 @@ case MVT::v32i16: case MVT::v64i8: { // Attempt to lower to a bitmask if we can. Only if not optimizing for size. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if (!OptForSize) { if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG)) @@ -16985,7 +16985,7 @@ // Bits [3:0] of the constant are the zero mask. The DAG Combiner may // combine either bitwise AND or insert of float 0.0 to set these bits. - bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize(); + bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize(); if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) { // If this is an insertion of 32-bits into the low 32-bits of // a vector, we prefer to generate a blend with immediate rather @@ -17639,7 +17639,7 @@ "Unexpected funnel shift type!"); // Expand slow SHLD/SHRD cases if we are not optimizing for size. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if (!OptForSize && Subtarget.isSHLDSlow()) return SDValue(); @@ -18898,7 +18898,7 @@ /// implementation, and likely shuffle complexity of the alternate sequence. static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG, const X86Subtarget &Subtarget) { - bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize(); + bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize(); bool HasFastHOps = Subtarget.hasFastHorizontalOps(); return !IsSingleSource || IsOptimizingSize || HasFastHOps; } @@ -19379,7 +19379,7 @@ !cast(Op0)->getAPIntValue().isSignedIntN(8)) || (isa(Op1) && !cast(Op1)->getAPIntValue().isSignedIntN(8))) && - !DAG.getMachineFunction().getFunction().optForMinSize() && + !DAG.getMachineFunction().getFunction().hasMinSize() && !Subtarget.isAtom()) { unsigned ExtendOp = isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; @@ -19553,7 +19553,7 @@ } else { // Use BT if the immediate can't be encoded in a TEST instruction or we // are optimizing for size and the immedaite won't fit in a byte. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) && isPowerOf2_64(AndRHSVal)) { Src = AndLHS; @@ -35993,7 +35993,7 @@ // pmulld is supported since SSE41. It is better to use pmulld // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than // the expansion. - bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize(); + bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize(); if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) return SDValue(); @@ -36301,7 +36301,7 @@ if (!MulConstantOptimization) return SDValue(); // An imul is usually smaller than the alternative sequence. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) @@ -37720,7 +37720,7 @@ return SDValue(); // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); unsigned Bits = VT.getScalarSizeInBits(); // SHLD/SHRD instructions have lower register pressure, but on some @@ -39999,7 +39999,7 @@ // If we have to respect NaN inputs, this takes at least 3 instructions. // Favor a library call when operating on a scalar and minimizing code size. - if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize()) + if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), Index: llvm/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/lib/Target/X86/X86InstrInfo.cpp +++ llvm/lib/Target/X86/X86InstrInfo.cpp @@ -1453,7 +1453,7 @@ case X86::VBLENDPDrri: case X86::VBLENDPSrri: // If we're optimizing for size, try to use MOVSD/MOVSS. - if (MI.getParent()->getParent()->getFunction().optForSize()) { + if (MI.getParent()->getParent()->getFunction().hasOptSize()) { unsigned Mask, Opc; switch (MI.getOpcode()) { default: llvm_unreachable("Unreachable!"); @@ -4820,14 +4820,14 @@ // For CPUs that favor the register form of a call or push, // do not fold loads into calls or pushes, unless optimizing for size // aggressively. - if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() && + if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() && (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || MI.getOpcode() == X86::PUSH64r)) return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; @@ -4995,7 +4995,7 @@ return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; @@ -5195,7 +5195,7 @@ if (NoFusing) return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; Index: llvm/lib/Target/X86/X86InstrInfo.td =================================================================== --- llvm/lib/Target/X86/X86InstrInfo.td +++ llvm/lib/Target/X86/X86InstrInfo.td @@ -916,12 +916,12 @@ // the Function object through the Subtarget and objections were raised // to that (see post-commit review comments for r301750). let RecomputePerFunction = 1 in { - def OptForSize : Predicate<"MF->getFunction().optForSize()">; - def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">; - def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">; + def OptForSize : Predicate<"MF->getFunction().hasOptSize()">; + def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">; + def OptForSpeed : Predicate<"!MF->getFunction().hasOptSize()">; def UseIncDec : Predicate<"!Subtarget->slowIncDec() || " - "MF->getFunction().optForSize()">; - def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().optForSize() || " + "MF->getFunction().hasOptSize()">; + def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().hasOptSize() || " "!Subtarget->hasSSE41()">; } Index: llvm/lib/Target/X86/X86OptimizeLEAs.cpp =================================================================== --- llvm/lib/Target/X86/X86OptimizeLEAs.cpp +++ llvm/lib/Target/X86/X86OptimizeLEAs.cpp @@ -700,7 +700,7 @@ // Remove redundant address calculations. Do it only for -Os/-Oz since only // a code size gain is expected from this part of the pass. - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) Changed |= removeRedundantAddrCalc(LEAs); } Index: llvm/lib/Target/X86/X86PadShortFunction.cpp =================================================================== --- llvm/lib/Target/X86/X86PadShortFunction.cpp +++ llvm/lib/Target/X86/X86PadShortFunction.cpp @@ -97,7 +97,7 @@ if (skipFunction(MF.getFunction())) return false; - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) return false; if (!MF.getSubtarget().padShortFunctions()) Index: llvm/lib/Target/X86/X86SelectionDAGInfo.cpp =================================================================== --- llvm/lib/Target/X86/X86SelectionDAGInfo.cpp +++ llvm/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -248,7 +248,7 @@ Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32; if (Repeats.BytesLeft() > 0 && - DAG.getMachineFunction().getFunction().optForMinSize()) { + DAG.getMachineFunction().getFunction().hasMinSize()) { // When aggressively optimizing for size, avoid generating the code to // handle BytesLeft. Repeats.AVT = MVT::i8; Index: llvm/lib/Transforms/IPO/FunctionAttrs.cpp =================================================================== --- llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -1366,7 +1366,7 @@ bool HasUnknownCall = false; for (LazyCallGraph::Node &N : C) { Function &F = N.getFunction(); - if (F.optForNone() || F.hasFnAttribute(Attribute::Naked)) { + if (F.hasOptNone() || F.hasFnAttribute(Attribute::Naked)) { // Treat any function we're trying not to optimize as if it were an // indirect call and omit it from the node set used below. HasUnknownCall = true; @@ -1439,7 +1439,7 @@ bool ExternalNode = false; for (CallGraphNode *I : SCC) { Function *F = I->getFunction(); - if (!F || F->optForNone() || F->hasFnAttribute(Attribute::Naked)) { + if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked)) { // External node or function we're trying not to optimize - we both avoid // transform them and avoid leveraging information they provide. ExternalNode = true; Index: llvm/lib/Transforms/IPO/HotColdSplitting.cpp =================================================================== --- llvm/lib/Transforms/IPO/HotColdSplitting.cpp +++ llvm/lib/Transforms/IPO/HotColdSplitting.cpp @@ -149,7 +149,7 @@ /// module has profile data), set entry count to 0 to ensure treated as cold. /// Return true if the function is changed. static bool markFunctionCold(Function &F, bool UpdateEntryCount = false) { - assert(!F.optForNone() && "Can't mark this cold"); + assert(!F.hasOptNone() && "Can't mark this cold"); bool Changed = false; if (!F.hasFnAttribute(Attribute::Cold)) { F.addFnAttr(Attribute::Cold); @@ -673,7 +673,7 @@ continue; // Do not modify `optnone` functions. - if (F.optForNone()) + if (F.hasOptNone()) continue; // Detect inherently cold functions and mark them as such. Index: llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp =================================================================== --- llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp +++ llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp @@ -25,7 +25,7 @@ for (Function &F : M.functions()) // We only infer things using the prototype and the name; we don't need // definitions. - if (F.isDeclaration() && !F.optForNone()) + if (F.isDeclaration() && !F.hasOptNone()) Changed |= inferLibFuncAttributes(F, TLI); return Changed; Index: llvm/lib/Transforms/IPO/Inliner.cpp =================================================================== --- llvm/lib/Transforms/IPO/Inliner.cpp +++ llvm/lib/Transforms/IPO/Inliner.cpp @@ -973,7 +973,7 @@ LazyCallGraph::Node &N = *CG.lookup(F); if (CG.lookupSCC(N) != C) continue; - if (F.optForNone()) { + if (F.hasOptNone()) { setInlineRemark(Calls[i].first, "optnone attribute"); continue; } Index: llvm/lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -3508,7 +3508,7 @@ MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist); - InstCombiner IC(Worklist, Builder, F.optForMinSize(), ExpensiveCombines, AA, + InstCombiner IC(Worklist, Builder, F.hasMinSize(), ExpensiveCombines, AA, AC, TLI, DT, ORE, DL, LI); IC.MaxArraySizeForCombine = MaxArraySize; Index: llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp =================================================================== --- llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp +++ llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp @@ -393,7 +393,7 @@ } bool Changed = false; for (auto &F : M) { - if (F.isDeclaration() || F.optForNone()) + if (F.isDeclaration() || F.hasOptNone()) continue; std::unique_ptr OwnedORE; Index: llvm/lib/Transforms/Scalar/ConstantHoisting.cpp =================================================================== --- llvm/lib/Transforms/Scalar/ConstantHoisting.cpp +++ llvm/lib/Transforms/Scalar/ConstantHoisting.cpp @@ -548,7 +548,7 @@ ConstCandVecType::iterator &MaxCostItr) { unsigned NumUses = 0; - if(!Entry->getParent()->optForSize() || std::distance(S,E) > 100) { + if(!Entry->getParent()->hasOptSize() || std::distance(S,E) > 100) { for (auto ConstCand = S; ConstCand != E; ++ConstCand) { NumUses += ConstCand->Uses.size(); if (ConstCand->CumulativeCost > MaxCostItr->CumulativeCost) Index: llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -284,7 +284,7 @@ // Determine if code size heuristics need to be applied. ApplyCodeSizeHeuristics = - L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs; + L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs; HasMemset = TLI->has(LibFunc_memset); HasMemsetPattern = TLI->has(LibFunc_memset_pattern16); Index: llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -529,7 +529,7 @@ } if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) { - if (L->getHeader()->getParent()->optForSize()) { + if (L->getHeader()->getParent()->hasOptSize()) { LLVM_DEBUG( dbgs() << "Versioning is needed but not allowed when optimizing " "for size.\n"); Index: llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -198,7 +198,7 @@ TTI.getUnrollingPreferences(L, SE, UP); // Apply size attributes - if (L->getHeader()->getParent()->optForSize()) { + if (L->getHeader()->getParent()->hasOptSize()) { UP.Threshold = UP.OptSizeThreshold; UP.PartialThreshold = UP.PartialOptSizeThreshold; } Index: llvm/lib/Transforms/Scalar/LoopUnswitch.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LoopUnswitch.cpp +++ llvm/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -657,7 +657,7 @@ } // Do not do non-trivial unswitch while optimizing for size. - // FIXME: Use Function::optForSize(). + // FIXME: Use Function::hasOptSize(). if (OptimizeForSize || loopHeader->getParent()->hasFnAttribute(Attribute::OptimizeForSize)) return false; Index: llvm/lib/Transforms/Scalar/WarnMissedTransforms.cpp =================================================================== --- llvm/lib/Transforms/Scalar/WarnMissedTransforms.cpp +++ llvm/lib/Transforms/Scalar/WarnMissedTransforms.cpp @@ -92,7 +92,7 @@ WarnMissedTransformationsPass::run(Function &F, FunctionAnalysisManager &AM) { // Do not warn about not applied transformations if optimizations are // disabled. - if (F.optForNone()) + if (F.hasOptNone()) return PreservedAnalyses::all(); auto &ORE = AM.getResult(F); Index: llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp =================================================================== --- llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -2375,7 +2375,7 @@ // Don't rewrite fputs to fwrite when optimising for size because fwrite // requires more arguments and thus extra MOVs are required. - if (CI->getFunction()->optForSize()) + if (CI->getFunction()->hasOptSize()) return nullptr; // Check if has any use Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -7162,7 +7162,7 @@ // Check the function attributes to find out if this function should be // optimized for size. bool OptForSize = - Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); + Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->hasOptSize(); // Plan how to best vectorize, return the best VF and its cost. const VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF); @@ -7245,7 +7245,7 @@ // Check the function attributes to find out if this function should be // optimized for size. bool OptForSize = - Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); + Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->hasOptSize(); // Entrance to the VPlan-native vectorization path. Outer loops are processed // here. They may require CFG and instruction level transformations before