diff --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h @@ -0,0 +1,59 @@ +//===- ModelUnderTrainingRunner.h -- 'development' mode runner --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// + +#ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H +#define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H + +#include "llvm/Config/llvm-config.h" + +#ifdef LLVM_HAVE_TF_API +#include "llvm/Analysis/MLModelRunner.h" +#include "llvm/Analysis/Utils/TFUtils.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { + +/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs +/// to dynamically load and evaluate a TF SavedModel +/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is +/// sacrificed for ease of use while training. +class ModelUnderTrainingRunner final : public MLModelRunner { +public: + ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath, + const std::vector &InputSpecs, + const std::vector &OutputSpecs); + + // Disallows copy and assign. + ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete; + ModelUnderTrainingRunner & + operator=(const ModelUnderTrainingRunner &) = delete; + + bool isValid() const { return !!Evaluator; } + + const std::vector &outputLoggedFeatureSpecs() const { + return OutputSpecs; + } + + const Optional & + lastEvaluationResult() const { + return LastEvaluationResult; + } + +private: + std::unique_ptr Evaluator; + const std::vector OutputSpecs; + Optional LastEvaluationResult; + void *evaluateUntyped() override; + void *getTensorUntyped(size_t Index) override; +}; + +} // namespace llvm +#endif // define(LLVM_HAVE_TF_API) +#endif // LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -103,6 +103,7 @@ MemoryLocation.cpp MemorySSA.cpp MemorySSAUpdater.cpp + ModelUnderTrainingRunner.cpp ModuleDebugInfoPrinter.cpp ModuleSummaryAnalysis.cpp MustExecute.cpp diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -16,6 +16,7 @@ #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/InlineSizeEstimatorAnalysis.h" #include "llvm/Analysis/MLInlineAdvisor.h" +#include "llvm/Analysis/ModelUnderTrainingRunner.h" #include "llvm/Analysis/NoInferenceModelRunner.h" #include "llvm/Analysis/Utils/TFUtils.h" #include "llvm/IR/LLVMContext.h" @@ -95,7 +96,6 @@ /// Because this is a protobuf, we cannot just stream the events as they come. /// Internally, TrainingLogger stores data in column-major format, because that /// lines up with how TF SequenceExample represents it. -class ModelUnderTrainingRunner; class TrainingLogger final { public: TrainingLogger(StringRef LogFileName, const ModelUnderTrainingRunner *MUTR); @@ -262,55 +262,21 @@ const int64_t Mandatory; }; -/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs -/// to dynamically load and evaluate a TF SavedModel -/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is -/// sacrificed for ease of use while training. -class ModelUnderTrainingRunner final : public MLModelRunner { -public: - ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath); - - // Disallows copy and assign. - ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete; - ModelUnderTrainingRunner & - operator=(const ModelUnderTrainingRunner &) = delete; - - bool isValid() const { return !!Evaluator; } - - const std::vector &outputLoggedFeatureSpecs() const { - return OutputSpecs; - } - - const Optional & - lastEvaluationResult() const { - return LastEvaluationResult; - } - - static const std::vector getInputFeatures() { - std::vector InputSpecs; - for (size_t I = 0; I < NumberOfFeatures; ++I) - InputSpecs.push_back(TensorSpec::createSpec( - TFFeedPrefix + FeatureNameMap[I], {1})); - append_range(InputSpecs, TrainingOnlyFeatures); - return InputSpecs; - } - -private: - std::unique_ptr Evaluator; - std::vector OutputSpecs; - Optional LastEvaluationResult; - void *evaluateUntyped() override; - void *getTensorUntyped(size_t Index) override; - - // The training framework needs some additional features. - const static std::vector TrainingOnlyFeatures; -}; - -const std::vector ModelUnderTrainingRunner::TrainingOnlyFeatures{ +static const std::vector TrainingOnlyFeatures{ TensorSpec::createSpec(TFFeedPrefix + "inlining_default", {1}), TensorSpec::createSpec(TFFeedPrefix + "discount", {1}), TensorSpec::createSpec(TFFeedPrefix + "reward", {1}), TensorSpec::createSpec(TFFeedPrefix + "step_type", {1})}; + +static const std::vector getInputFeatures() { + std::vector InputSpecs; + for (size_t I = 0; I < NumberOfFeatures; ++I) + InputSpecs.push_back( + TensorSpec::createSpec(TFFeedPrefix + FeatureNameMap[I], {1})); + append_range(InputSpecs, TrainingOnlyFeatures); + return InputSpecs; +} + } // namespace TrainingLogger::TrainingLogger(StringRef LogFileName, @@ -451,40 +417,6 @@ return Ret; } -ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx, - const std::string &ModelPath) - : MLModelRunner(Ctx) { - std::vector InputSpecs = - ModelUnderTrainingRunner::getInputFeatures(); - if (auto MaybeOutSpecs = - loadOutputSpecs(Ctx, DecisionName, ModelPath, TFOutputSpecOverride)) - OutputSpecs = std::move(*MaybeOutSpecs); - else - return; - - Evaluator = std::make_unique( - ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; }, - OutputSpecs.size()); - if (!Evaluator || !Evaluator->isValid()) { - Ctx.emitError("Failed to create inliner saved model evaluator"); - Evaluator.reset(); - return; - } -} - -void *ModelUnderTrainingRunner::evaluateUntyped() { - LastEvaluationResult = Evaluator->evaluate(); - if (!LastEvaluationResult.hasValue()) { - Ctx.emitError("Error evaluating model."); - return nullptr; - } - return LastEvaluationResult->getTensorValue(0); -} - -void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) { - return Evaluator->getUntypedInput(Index); -} - std::unique_ptr llvm::getDevelopmentModeAdvisor( Module &M, ModuleAnalysisManager &MAM, std::function GetDefaultAdvice) { @@ -493,11 +425,13 @@ ModelUnderTrainingRunner *MUTRPtr = nullptr; bool IsDoingInference = false; if (TFModelUnderTrainingPath.empty()) - Runner.reset(new NoInferenceModelRunner( - Ctx, ModelUnderTrainingRunner::getInputFeatures())); + Runner.reset(new NoInferenceModelRunner(Ctx, getInputFeatures())); else { - auto MUTR = std::make_unique( - Ctx, TFModelUnderTrainingPath); + std::unique_ptr MUTR; + if (auto MaybeOutputSpecs = loadOutputSpecs( + Ctx, DecisionName, TFModelUnderTrainingPath, TFOutputSpecOverride)) + MUTR = std::make_unique( + Ctx, TFModelUnderTrainingPath, getInputFeatures(), *MaybeOutputSpecs); if (!MUTR || !MUTR->isValid()) { Ctx.emitError("Could not load the policy model from the provided path"); return nullptr; diff --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp @@ -0,0 +1,49 @@ +//===- ModelUnderTrainingRunner.cpp - 'development' mode runner -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Implementation of a MLModelRunner for 'development' mode, i.e. evaluation +// happens off a model that's provided from the command line and is interpreted. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Config/config.h" +#if defined(LLVM_HAVE_TF_API) + +#include "llvm/Analysis/ModelUnderTrainingRunner.h" + +using namespace llvm; + +ModelUnderTrainingRunner::ModelUnderTrainingRunner( + LLVMContext &Ctx, const std::string &ModelPath, + const std::vector &InputSpecs, + const std::vector &OutputSpecs) + : MLModelRunner(Ctx), OutputSpecs(OutputSpecs) { + Evaluator = std::make_unique( + ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; }, + OutputSpecs.size()); + if (!Evaluator || !Evaluator->isValid()) { + Ctx.emitError("Failed to create inliner saved model evaluator"); + Evaluator.reset(); + return; + } +} + +void *ModelUnderTrainingRunner::evaluateUntyped() { + LastEvaluationResult = Evaluator->evaluate(); + if (!LastEvaluationResult.hasValue()) { + Ctx.emitError("Error evaluating model."); + return nullptr; + } + return LastEvaluationResult->getUntypedTensorValue(0); +} + +void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) { + return Evaluator->getUntypedInput(Index); +} + +#endif // defined(LLVM_HAVE_TF_API)