diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -93,11 +93,12 @@ "exceeds the threshold.")); namespace { - +class DefaultCallAnalyzer; class CallAnalyzer : public InstVisitor { typedef InstVisitor Base; friend class InstVisitor; +protected: /// The TargetTransformInfo available for this compilation. const TargetTransformInfo &TTI; @@ -124,20 +125,27 @@ /// easily cacheable. Instead, use the cover function paramHasAttr. CallBase &CandidateCall; - /// Tunable parameters that control the analysis. - const InlineParams &Params; - - /// Upper bound for the inlining cost. Bonuses are being applied to account - /// for speculative "expected profit" of the inlining decision. - int Threshold; - - /// Inlining cost measured in abstract units, accounts for all the - /// instructions expected to be executed for a given function invocation. - /// Instructions that are statically proven to be dead based on call-site - /// arguments are not counted here. - int Cost = 0; + bool SingleBB = true; - bool ComputeFullInlineCost; + /// Extension points for handling callsite features. + virtual void OnBlockAnalyzed(const BasicBlock *BB) = 0; + virtual InlineResult FinalizeAnalysis() = 0; + virtual bool ShouldStop() = 0; + virtual InlineResult OnAnalysisStart() = 0; + virtual InlineResult OnInstructionAnalyzed() = 0; + virtual void OnDisableSROA(int value) = 0; + virtual void OnDisableLoadElimination() = 0; + virtual void OnCallPenalty() = 0; + virtual void OnLoadEliminationOpportunity() = 0; + virtual void OnCallArgumentSetup(const CallBase &Call) = 0; + virtual void OnLoadRelativeIntrinsic() = 0; + virtual void OnLoweredCall(const CallBase &) = 0; + virtual void OnJumpTable(unsigned) = 0; + virtual void OnSwitch(unsigned) = 0; + virtual bool OnCaseCluster(unsigned) = 0; + virtual void OnCommonInstruction() = 0; + virtual void OnLoops(unsigned) = 0; + virtual bool HandleIndirectCall(Function &, CallBase &) = 0; bool IsCallerRecursive = false; bool IsRecursiveCall = false; @@ -149,20 +157,11 @@ bool HasUninlineableIntrinsic = false; bool InitsVargArgs = false; - /// Attempt to evaluate indirect calls to boost its inline cost. - bool BoostIndirectCalls; - /// Number of bytes allocated statically by the callee. uint64_t AllocatedSize = 0; unsigned NumInstructions = 0; unsigned NumVectorInstructions = 0; - /// Bonus to be applied when percentage of vector instructions in callee is - /// high (see more details in updateThreshold). - int VectorBonus = 0; - /// Bonus to be applied when the callee has only one reachable basic block. - int SingleBBBonus = 0; - /// While we walk the potentially-inlined instructions, we build up and /// maintain a mapping of simplified values specific to this callsite. The /// idea is to propagate any special information we have about arguments to @@ -196,7 +195,6 @@ /// loads. bool EnableLoadElimination; SmallPtrSet LoadAddrSet; - int LoadEliminationCost = 0; // Custom simplification helper routines. bool isAllocaDerivedArg(Value *V); @@ -227,32 +225,13 @@ /// inlined through this particular callsite. bool isKnownNonNullInCallee(Value *V); - /// Update Threshold based on callsite properties such as callee - /// attributes and callee hotness for PGO builds. The Callee is explicitly - /// passed to support analyzing indirect calls whose target is inferred by - /// analysis. - void updateThreshold(CallBase &Call, Function &Callee); - /// Return true if size growth is allowed when inlining the callee at \p Call. bool allowSizeGrowth(CallBase &Call); - /// Return true if \p Call is a cold callsite. - bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI); - - /// Return a higher threshold if \p Call is a hot callsite. - Optional getHotCallSiteThreshold(CallBase &Call, - BlockFrequencyInfo *CallerBFI); - // Custom analysis routines. InlineResult analyzeBlock(BasicBlock *BB, SmallPtrSetImpl &EphValues); - /// Handle a capped 'int' increment for Cost. - void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) { - assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound"); - Cost = (int)std::min(UpperBound, Cost + Inc); - } - // Disable several entry points to the visitor so we don't accidentally use // them by declaring but not defining them here. void visit(Module *); @@ -298,20 +277,13 @@ std::function &GetAssumptionCache, Optional> &GetBFI, ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE, - Function &Callee, CallBase &Call, const InlineParams &Params, - bool BoostIndirect = true) + Function &Callee, CallBase &Call) : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI), PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE), - CandidateCall(Call), Params(Params), Threshold(Params.DefaultThreshold), - ComputeFullInlineCost(OptComputeFullInlineCost || - Params.ComputeFullInlineCost || ORE), - BoostIndirectCalls(BoostIndirect), EnableLoadElimination(true) {} + CandidateCall(Call), EnableLoadElimination(true) {} InlineResult analyze(); - int getThreshold() { return Threshold; } - int getCost() { return Cost; } - // Keep a bunch of stats about the cost savings found so we can print them // out when debugging. unsigned NumConstantArgs = 0; @@ -326,6 +298,211 @@ void dump(); }; +class DefaultCallAnalyzer : public CallAnalyzer { + const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1; + const bool ComputeFullInlineCost; + int LoadEliminationCost = 0; + /// Bonus to be applied when percentage of vector instructions in callee is + /// high (see more details in updateThreshold). + int VectorBonus = 0; + /// Bonus to be applied when the callee has only one reachable basic block. + int SingleBBBonus = 0; + + /// Tunable parameters that control the analysis. + const InlineParams &Params; + + /// Upper bound for the inlining cost. Bonuses are being applied to account + /// for speculative "expected profit" of the inlining decision. + int Threshold = 0; + + /// Attempt to evaluate indirect calls to boost its inline cost. + const bool BoostIndirectCalls; + + /// Inlining cost measured in abstract units, accounts for all the + /// instructions expected to be executed for a given function invocation. + /// Instructions that are statically proven to be dead based on call-site + /// arguments are not counted here. + int Cost = 0; + + /// Return true if \p Call is a cold callsite. + bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI); + + /// Update Threshold based on callsite properties such as callee + /// attributes and callee hotness for PGO builds. The Callee is explicitly + /// passed to support analyzing indirect calls whose target is inferred by + /// analysis. + void updateThreshold(CallBase &Call, Function &Callee); + /// Return a higher threshold if \p Call is a hot callsite. + Optional getHotCallSiteThreshold(CallBase &Call, + BlockFrequencyInfo *CallerBFI); + + /// Handle a capped 'int' increment for Cost. + void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) { + assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound"); + Cost = (int)std::min(UpperBound, Cost + Inc); + } + void OnDisableSROA(int C) override { + addCost(C); + SROACostSavings -= C; + SROACostSavingsLost += C; + } + void OnDisableLoadElimination() override { + addCost(LoadEliminationCost); + LoadEliminationCost = 0; + } + void OnCallPenalty() override { addCost(InlineConstants::CallPenalty); } + void OnCallArgumentSetup(const CallBase &Call) override { + addCost(Call.arg_size() * InlineConstants::InstrCost); + } + void OnLoadRelativeIntrinsic() override { + addCost(3 * InlineConstants::InstrCost); + } + void OnLoweredCall(const CallBase &Call) override { + addCost(Call.arg_size() * InlineConstants::InstrCost); + } + void OnJumpTable(unsigned JumpTableSize) override { + // Maximum valid cost increased in this function. + + int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost + + 4 * InlineConstants::InstrCost; + + addCost(JTCost, (int64_t)CostUpperBound); + } + bool OnCaseCluster(unsigned NumCaseCluster) override { + // Suppose a comparison includes one compare and one conditional branch. + addCost(NumCaseCluster * 2 * InlineConstants::InstrCost); + return false; + } + + void OnSwitch(unsigned NumCaseCluster) override { + int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1; + int64_t SwitchCost = + ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost; + + addCost(SwitchCost, (int64_t)CostUpperBound); + } + void OnCommonInstruction() override { addCost(InlineConstants::InstrCost); } + void OnLoops(unsigned NumLoops) override { + addCost(NumLoops * InlineConstants::CallPenalty); + } + + bool HandleIndirectCall(Function &F, CallBase &Call) override { + if (!BoostIndirectCalls) + return false; + + auto IndirectCallParams = Params; + IndirectCallParams.DefaultThreshold = + InlineConstants::IndirectCallThreshold; + DefaultCallAnalyzer CA(TTI, GetAssumptionCache, GetBFI, PSI, ORE, F, Call, + IndirectCallParams, false); + if (CA.analyze()) { + // We were able to inline the indirect call! Subtract the cost from the + // threshold to get the bonus we want to apply, but don't go below zero. + Cost -= std::max(0, CA.getThreshold() - CA.getCost()); + } + return true; + } + + virtual void OnBlockAnalyzed(const BasicBlock *BB) override { + auto *TI = BB->getTerminator(); + // If we had any successors at this point, than post-inlining is likely to + // have them as well. Note that we assume any basic blocks which existed + // due to branches or switches which folded above will also fold after + // inlining. + if (SingleBB && TI->getNumSuccessors() > 1) { + // Take off the bonus we applied to the threshold. + Threshold -= SingleBBBonus; + SingleBB = false; + } + } + virtual InlineResult FinalizeAnalysis() override { + // We applied the maximum possible vector bonus at the beginning. Now, + // subtract the excess bonus, if any, from the Threshold before + // comparing against Cost. + if (NumVectorInstructions <= NumInstructions / 10) + Threshold -= VectorBonus; + else if (NumVectorInstructions <= NumInstructions / 2) + Threshold -= VectorBonus / 2; + + return Cost < std::max(1, Threshold); + } + virtual bool ShouldStop() override { + return Cost >= Threshold && !ComputeFullInlineCost; + } + + InlineResult OnInstructionAnalyzed() override { + // Check if we've passed the maximum possible threshold so we don't spin in + // huge basic blocks that will never inline. + if (Cost >= Threshold && !ComputeFullInlineCost) + return false; + return true; + } + virtual void OnLoadEliminationOpportunity() { + LoadEliminationCost += InlineConstants::InstrCost; + } + + InlineResult OnAnalysisStart() override { + // Perform some tweaks to the cost and threshold based on the direct + // callsite information. + + // We want to more aggressively inline vector-dense kernels, so up the + // threshold, and we'll lower it if the % of vector instructions gets too + // low. Note that these bonuses are some what arbitrary and evolved over + // time by accident as much as because they are principled bonuses. + // + // FIXME: It would be nice to remove all such bonuses. At least it would be + // nice to base the bonus values on something more scientific. + assert(NumInstructions == 0); + assert(NumVectorInstructions == 0); + + // Update the threshold based on callsite properties + updateThreshold(CandidateCall, F); + + // While Threshold depends on commandline options that can take negative + // values, we want to enforce the invariant that the computed threshold and + // bonuses are non-negative. + assert(Threshold >= 0); + assert(SingleBBBonus >= 0); + assert(VectorBonus >= 0); + + // Speculatively apply all possible bonuses to Threshold. If cost exceeds + // this Threshold any time, and cost cannot decrease, we can stop processing + // the rest of the function body. + Threshold += (SingleBBBonus + VectorBonus); + + // Give out bonuses for the callsite, as the instructions setting them up + // will be gone after inlining. + addCost(-getCallsiteCost(this->CandidateCall, DL)); + + // If this function uses the coldcc calling convention, prefer not to inline + // it. + if (F.getCallingConv() == CallingConv::Cold) + Cost += InlineConstants::ColdccPenalty; + + // Check if we're done. This can happen due to bonuses and penalties. + if (Cost >= Threshold && !ComputeFullInlineCost) + return "high cost"; + + return true; + } + +public: + DefaultCallAnalyzer( + const TargetTransformInfo &TTI, + std::function &GetAssumptionCache, + Optional> &GetBFI, + ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE, Function &Callee, + CallBase &Call, const InlineParams &Params, bool BoostIndirect = true) + : CallAnalyzer(TTI, GetAssumptionCache, GetBFI, PSI, ORE, Callee, Call), + ComputeFullInlineCost(OptComputeFullInlineCost || + Params.ComputeFullInlineCost || ORE), + Params(Params), Threshold(Params.DefaultThreshold), + BoostIndirectCalls(BoostIndirect) {} + void dump(); + + int getThreshold() { return Threshold; } + int getCost() { return Cost; } +}; } // namespace /// Test whether the given value is an Alloca-derived function argument. @@ -356,9 +533,7 @@ void CallAnalyzer::disableSROA(DenseMap::iterator CostIt) { // If we're no longer able to perform SROA we need to undo its cost savings // and prevent subsequent analysis. - addCost(CostIt->second); - SROACostSavings -= CostIt->second; - SROACostSavingsLost += CostIt->second; + OnDisableSROA(CostIt->second); SROAArgCosts.erase(CostIt); disableLoadElimination(); } @@ -380,8 +555,7 @@ void CallAnalyzer::disableLoadElimination() { if (EnableLoadElimination) { - addCost(LoadEliminationCost); - LoadEliminationCost = 0; + OnDisableLoadElimination(); EnableLoadElimination = false; } } @@ -737,7 +911,7 @@ case Instruction::FPToUI: case Instruction::FPToSI: if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive) - addCost(InlineConstants::CallPenalty); + OnCallPenalty(); break; default: break; @@ -810,8 +984,8 @@ return true; } -bool CallAnalyzer::isColdCallSite(CallBase &Call, - BlockFrequencyInfo *CallerBFI) { +bool DefaultCallAnalyzer::isColdCallSite(CallBase &Call, + BlockFrequencyInfo *CallerBFI) { // If global profile summary is available, then callsite's coldness is // determined based on that. if (PSI && PSI->hasProfileSummary()) @@ -834,8 +1008,8 @@ } Optional -CallAnalyzer::getHotCallSiteThreshold(CallBase &Call, - BlockFrequencyInfo *CallerBFI) { +DefaultCallAnalyzer::getHotCallSiteThreshold(CallBase &Call, + BlockFrequencyInfo *CallerBFI) { // If global profile summary is available, then callsite's hotness is // determined based on that. @@ -862,7 +1036,7 @@ return None; } -void CallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) { +void DefaultCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) { // If no size growth is allowed for this inlining, set Threshold to 0. if (!allowSizeGrowth(Call)) { Threshold = 0; @@ -1100,7 +1274,7 @@ if (I.getType()->isFloatingPointTy() && TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive && !match(&I, m_FNeg(m_Value()))) - addCost(InlineConstants::CallPenalty); + OnCallPenalty(); return false; } @@ -1143,7 +1317,7 @@ // eliminated. if (EnableLoadElimination && !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) { - LoadEliminationCost += InlineConstants::InstrCost; + OnLoadEliminationOpportunity(); return true; } @@ -1252,7 +1426,7 @@ if (!F) { // Pay the price of the argument setup. We account for the average 1 // instruction per call argument setup here. - addCost(Call.arg_size() * InlineConstants::InstrCost); + OnCallArgumentSetup(Call); if (!Call.onlyReadsMemory()) disableLoadElimination(); @@ -1277,7 +1451,7 @@ case Intrinsic::load_relative: // This is normally lowered to 4 LLVM instructions. - addCost(3 * InlineConstants::InstrCost); + OnLoadRelativeIntrinsic(); return false; case Intrinsic::memset: @@ -1305,27 +1479,15 @@ if (TTI.isLoweredToCall(F)) { // We account for the average 1 instruction per call argument setup here. - addCost(Call.arg_size() * InlineConstants::InstrCost); + OnLoweredCall(Call); // If we have a constant that we are calling as a function, we can peer // through it and see the function target. This happens not infrequently // during devirtualization and so we want to give it a hefty bonus for // inlining, but cap that bonus in the event that inlining wouldn't pan out. // Pretend to inline the function, with a custom threshold. - if (IsIndirectCall && BoostIndirectCalls) { - auto IndirectCallParams = Params; - IndirectCallParams.DefaultThreshold = - InlineConstants::IndirectCallThreshold; - CallAnalyzer CA(TTI, GetAssumptionCache, GetBFI, PSI, ORE, *F, Call, - IndirectCallParams, false); - if (CA.analyze()) { - // We were able to inline the indirect call! Subtract the cost from the - // threshold to get the bonus we want to apply, but don't go below zero. - Cost -= std::max(0, CA.getThreshold() - CA.getCost()); - } - } else - // Otherwise simply add the cost for merely making the call. - addCost(InlineConstants::CallPenalty); + if (!IsIndirectCall || !HandleIndirectCall(*F, Call)) + OnCallPenalty(); } if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory()))) @@ -1452,9 +1614,6 @@ // inlining those. It will prevent inlining in cases where the optimization // does not (yet) fire. - // Maximum valid cost increased in this function. - int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1; - unsigned JumpTableSize = 0; BlockFrequencyInfo *BFI = GetBFI ? &((*GetBFI)(F)) : nullptr; unsigned NumCaseCluster = @@ -1463,10 +1622,7 @@ // If suitable for a jump table, consider the cost for the table size and // branch to destination. if (JumpTableSize) { - int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost + - 4 * InlineConstants::InstrCost; - - addCost(JTCost, (int64_t)CostUpperBound); + OnJumpTable(JumpTableSize); return false; } @@ -1485,16 +1641,10 @@ // Considering comparisons from leaf and non-leaf nodes, we can estimate the // number of comparisons in a simple closed form : // n + n / 2 - 1 = n * 3 / 2 - 1 - if (NumCaseCluster <= 3) { - // Suppose a comparison includes one compare and one conditional branch. - addCost(NumCaseCluster * 2 * InlineConstants::InstrCost); + if (!OnCaseCluster(NumCaseCluster)) return false; - } - - int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1; - int64_t SwitchCost = ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost; - addCost(SwitchCost, (int64_t)CostUpperBound); + OnSwitch(NumCaseCluster); return false; } @@ -1587,7 +1737,7 @@ if (Base::visit(&*I)) ++NumInstructionsSimplified; else - addCost(InlineConstants::InstrCost); + OnCommonInstruction(); using namespace ore; // If the visit this instruction detected an uninlinable pattern, abort. @@ -1632,9 +1782,7 @@ return IR; } - // Check if we've passed the maximum possible threshold so we don't spin in - // huge basic blocks that will never inline. - if (Cost >= Threshold && !ComputeFullInlineCost) + if (!OnInstructionAnalyzed()) return false; } @@ -1728,46 +1876,9 @@ InlineResult CallAnalyzer::analyze() { ++NumCallsAnalyzed; - // Perform some tweaks to the cost and threshold based on the direct - // callsite information. - - // We want to more aggressively inline vector-dense kernels, so up the - // threshold, and we'll lower it if the % of vector instructions gets too - // low. Note that these bonuses are some what arbitrary and evolved over time - // by accident as much as because they are principled bonuses. - // - // FIXME: It would be nice to remove all such bonuses. At least it would be - // nice to base the bonus values on something more scientific. - assert(NumInstructions == 0); - assert(NumVectorInstructions == 0); - - // Update the threshold based on callsite properties - updateThreshold(CandidateCall, F); - - // While Threshold depends on commandline options that can take negative - // values, we want to enforce the invariant that the computed threshold and - // bonuses are non-negative. - assert(Threshold >= 0); - assert(SingleBBBonus >= 0); - assert(VectorBonus >= 0); - - // Speculatively apply all possible bonuses to Threshold. If cost exceeds - // this Threshold any time, and cost cannot decrease, we can stop processing - // the rest of the function body. - Threshold += (SingleBBBonus + VectorBonus); - - // Give out bonuses for the callsite, as the instructions setting them up - // will be gone after inlining. - addCost(-getCallsiteCost(CandidateCall, DL)); - - // If this function uses the coldcc calling convention, prefer not to inline - // it. - if (F.getCallingConv() == CallingConv::Cold) - Cost += InlineConstants::ColdccPenalty; - - // Check if we're done. This can happen due to bonuses and penalties. - if (Cost >= Threshold && !ComputeFullInlineCost) - return "high cost"; + auto Result = OnAnalysisStart(); + if (!Result) + return Result; if (F.empty()) return true; @@ -1824,12 +1935,12 @@ BBSetVector; BBSetVector BBWorklist; BBWorklist.insert(&F.getEntryBlock()); - bool SingleBB = true; + SingleBB = true; // Note that we *must not* cache the size, this loop grows the worklist. for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { // Bail out the moment we cross the threshold. This means we'll under-count // the cost, but only when undercounting doesn't matter. - if (Cost >= Threshold && !ComputeFullInlineCost) + if (ShouldStop()) break; BasicBlock *BB = BBWorklist[Idx]; @@ -1889,15 +2000,7 @@ ++TIdx) BBWorklist.insert(TI->getSuccessor(TIdx)); - // If we had any successors at this point, than post-inlining is likely to - // have them as well. Note that we assume any basic blocks which existed - // due to branches or switches which folded above will also fold after - // inlining. - if (SingleBB && TI->getNumSuccessors() > 1) { - // Take off the bonus we applied to the threshold. - Threshold -= SingleBBBonus; - SingleBB = false; - } + OnBlockAnalyzed(BB); } bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && @@ -1923,23 +2026,15 @@ continue; NumLoops++; } - addCost(NumLoops * InlineConstants::CallPenalty); + OnLoops(NumLoops); } - // We applied the maximum possible vector bonus at the beginning. Now, - // subtract the excess bonus, if any, from the Threshold before - // comparing against Cost. - if (NumVectorInstructions <= NumInstructions / 10) - Threshold -= VectorBonus; - else if (NumVectorInstructions <= NumInstructions / 2) - Threshold -= VectorBonus / 2; - - return Cost < std::max(1, Threshold); + return FinalizeAnalysis(); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// Dump stats about this call's analysis. -LLVM_DUMP_METHOD void CallAnalyzer::dump() { +LLVM_DUMP_METHOD void DefaultCallAnalyzer::dump() { #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n" DEBUG_PRINT_STAT(NumConstantArgs); DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); @@ -2073,8 +2168,8 @@ LLVM_DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "... (caller:" << Caller->getName() << ")\n"); - CallAnalyzer CA(CalleeTTI, GetAssumptionCache, GetBFI, PSI, ORE, *Callee, - Call, Params); + DefaultCallAnalyzer CA(CalleeTTI, GetAssumptionCache, GetBFI, PSI, ORE, + *Callee, Call, Params); InlineResult ShouldInline = CA.analyze(); LLVM_DEBUG(CA.dump()); @@ -2121,15 +2216,16 @@ switch (Call->getCalledFunction()->getIntrinsicID()) { default: break; - // Disallow inlining of @llvm.icall.branch.funnel because current - // backend can't separate call targets from call arguments. + // Disallow inlining of @llvm.icall.branch.funnel because current + // backend can't separate call targets from call arguments. case llvm::Intrinsic::icall_branch_funnel: return "disallowed inlining of @llvm.icall.branch.funnel"; - // Disallow inlining functions that call @llvm.localescape. Doing this - // correctly would require major changes to the inliner. + // Disallow inlining functions that call @llvm.localescape. Doing this + // correctly would require major changes to the inliner. case llvm::Intrinsic::localescape: return "disallowed inlining of @llvm.localescape"; - // Disallow inlining of functions that initialize VarArgs with va_start. + // Disallow inlining of functions that initialize VarArgs with + // va_start. case llvm::Intrinsic::vastart: return "contains VarArgs initialized with va_start"; }