diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -232,7 +232,9 @@ size_t getTotalSizeEstimate(); virtual ~DevelopmentModeMLInlineAdvisor(); - void updateNativeSizeEstimate(int64_t Change) { CurrentNativeSize += Change; } + void updateNativeSizeEstimate(int64_t Change) { + *CurrentNativeSize += Change; + } void resetNativeSize(Function *F) { FAM.invalidate(*F); } @@ -242,7 +244,7 @@ std::unique_ptr getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override; - size_t getNativeSizeEstimate(const Function &F) const; + Optional getNativeSizeEstimate(const Function &F) const; private: bool isLogging() const { return !!Logger; } @@ -251,8 +253,8 @@ const bool IsDoingInference; std::unique_ptr Logger; - const int32_t InitialNativeSize; - int32_t CurrentNativeSize = 0; + const Optional InitialNativeSize; + Optional CurrentNativeSize; }; /// A variant of MLInlineAdvice that tracks all non-trivial inlining @@ -261,9 +263,10 @@ public: LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB, OptimizationRemarkEmitter &ORE, bool Recommendation, - TrainingLogger &Logger, size_t CallerSizeEstimateBefore, - size_t CalleeSizeEstimateBefore, bool DefaultDecision, - bool Mandatory = false) + TrainingLogger &Logger, + Optional CallerSizeEstimateBefore, + Optional CalleeSizeEstimateBefore, + bool DefaultDecision, bool Mandatory = false) : MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger), CallerSizeEstimateBefore(CallerSizeEstimateBefore), CalleeSizeEstimateBefore(CalleeSizeEstimateBefore), @@ -279,11 +282,12 @@ MLInlineAdvice::recordInliningImpl(); getAdvisor()->resetNativeSize(Caller); int Reward = std::numeric_limits::max(); - if (!getAdvisor()->isForcedToStop()) { - int NativeSizeAfter = getAdvisor()->getNativeSizeEstimate(*Caller) + - CalleeSizeEstimateBefore; + if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() && + !getAdvisor()->isForcedToStop()) { + int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller) + + *CalleeSizeEstimateBefore; Reward = NativeSizeAfter - - (CallerSizeEstimateBefore + CalleeSizeEstimateBefore); + (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore); getAdvisor()->updateNativeSizeEstimate(Reward); } log(Reward, /*Success=*/true); @@ -292,10 +296,11 @@ void recordInliningWithCalleeDeletedImpl() override { MLInlineAdvice::recordInliningWithCalleeDeletedImpl(); getAdvisor()->resetNativeSize(Caller); - if (!getAdvisor()->isForcedToStop()) { - int NativeSizeAfter = getAdvisor()->getNativeSizeEstimate(*Caller); + if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() && + !getAdvisor()->isForcedToStop()) { + int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller); int Reward = NativeSizeAfter - - (CallerSizeEstimateBefore + CalleeSizeEstimateBefore); + (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore); getAdvisor()->updateNativeSizeEstimate(Reward); log(Reward, /*Success=*/true); } @@ -324,8 +329,8 @@ static const int64_t NoReward = 0; TrainingLogger &Logger; - const size_t CallerSizeEstimateBefore; - const size_t CalleeSizeEstimateBefore; + const Optional CallerSizeEstimateBefore; + const Optional CalleeSizeEstimateBefore; const bool DefaultDecision; const bool Mandatory; }; @@ -448,9 +453,11 @@ writeRawTensorsAsFeatureLists( OutFile, TensorSpec::createSpec(DecisionName, {1}), Outputs[0].data(), NumberOfRecords); - writeTensorsAsFeatureLists(OutFile, - TensorSpec::createSpec(RewardName, {1}), - Rewards.data(), NumberOfRecords); + + if (InlineSizeEstimatorAnalysis::isEvaluatorRequested()) + writeTensorsAsFeatureLists(OutFile, + TensorSpec::createSpec(RewardName, {1}), + Rewards.data(), NumberOfRecords); for (size_t I = 1; I < Outputs.size(); ++I) writeRawTensorsAsFeatureLists(OutFile, MUTR->outputSpecs()[I], @@ -479,8 +486,10 @@ Logger->print(); } -size_t +Optional DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const { + if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested()) + return None; auto &R = FAM.getResult(const_cast(F)); if (!R) { @@ -496,6 +505,7 @@ CallBase &CB, OptimizationRemarkEmitter &ORE) { if (!isLogging()) return MLInlineAdvisor::getMandatoryAdvice(CB, ORE); + return std::make_unique( /*Advisor=*/this, /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/true, /*Logger=*/*Logger, @@ -524,13 +534,15 @@ } size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() { + if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested()) + return 0; size_t Ret = 0; for (auto &F : M) { if (F.isDeclaration()) continue; if (isFunctionDeleted(&F)) continue; - Ret += getNativeSizeEstimate(F); + Ret += *getNativeSizeEstimate(F); } return Ret; } @@ -642,14 +654,6 @@ Module &M, ModuleAnalysisManager &MAM, std::function GetDefaultAdvice) { auto &Ctx = M.getContext(); - if (TrainingLog.empty() != - !InlineSizeEstimatorAnalysis::isEvaluatorRequested()) { - Ctx.emitError("For development mode, if training logs are requested, then " - "a size estimator must be available; either that, or neither " - "are specified."); - return nullptr; - } - std::unique_ptr Runner; ModelUnderTrainingRunner *MUTRPtr = nullptr; bool IsDoingInference = false; diff --git a/llvm/test/Transforms/Inline/ML/development-training-log.ll b/llvm/test/Transforms/Inline/ML/development-training-log.ll --- a/llvm/test/Transforms/Inline/ML/development-training-log.ll +++ b/llvm/test/Transforms/Inline/ML/development-training-log.ll @@ -1,8 +1,10 @@ ; Test that we can produce a log if we have or do not have a model, in development mode. ; REQUIRES: have_tf_api ; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s -; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefix=EXTRA-OUTPUTS +; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK ; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s +; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%S/../../../../lib/Analysis/models/inliner -S < %s | FileCheck %s --check-prefix=NOREWARD +; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -S < %s | FileCheck %s --check-prefix=NOREWARD target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-pc-linux-gnu" @@ -50,8 +52,9 @@ ; CHECK-NEXT: feature: { int64_list: { value: [0] } } ; CHECK-NEXT: } ; CHECK-NEXT: } +; NOREWARD-NOT: key: "delta_size" value: { ; CHECK-NOT: fake_extra_output ; EXTRA-OUTPUTS: key: "fake_extra_output" value: { ; EXTRA-OUTPUTS-NEXT: feature: { int64_list: { value: [1] } } ; EXTRA-OUTPUTS-NEXT: } ; EXTRA-OUTPUTS-NEXT: } \ No newline at end of file