diff --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h --- a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h +++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h @@ -10,6 +10,8 @@ #ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H #define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/iterator_range.h" #include "llvm/Analysis/TensorSpec.h" #include "llvm/Config/llvm-config.h" @@ -32,8 +34,12 @@ ModelUnderTrainingRunner & operator=(const ModelUnderTrainingRunner &) = delete; - const std::vector &outputLoggedFeatureSpecs() const { - return OutputSpecs; + const std::vector &extraOutputsForLoggingSpecs() const { + return ExtraOutputsForLogging; + } + + const void *getUntypedExtraOutputValue(size_t ExtraOutputIndex) const { + return lastEvaluationResult()->getUntypedTensorValue(ExtraOutputIndex + 1); } const Optional & @@ -49,22 +55,21 @@ StringRef DecisionName, const std::vector &InputSpecs, StringRef OutputSpecsPathOverride = ""); - static std::unique_ptr - createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath, - StringRef DecisionName, - const std::vector &InputSpecs, - const std::vector &OutputSpecs); -private: - ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath, - const std::vector &InputSpecs, - const std::vector &OutputSpecs); + ModelUnderTrainingRunner( + LLVMContext &Ctx, const std::string &ModelPath, + const std::vector &InputSpecs, + const std::vector &OutputSpecs, + const std::vector &ExtraOutputsForLogging = {}); + + bool isValid() const { return !!Evaluator; } +private: std::unique_ptr Evaluator; - const std::vector OutputSpecs; + const std::vector OutputSpecs; + const std::vector ExtraOutputsForLogging; Optional LastEvaluationResult; void *evaluateUntyped() override; - bool isValid() const { return !!Evaluator; } }; } // namespace llvm diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h --- a/llvm/include/llvm/Analysis/TensorSpec.h +++ b/llvm/include/llvm/Analysis/TensorSpec.h @@ -82,6 +82,10 @@ return getDataType() == Type; } + TensorSpec(const std::string &NewName, const TensorSpec &Other) + : TensorSpec(NewName, Other.Port, Other.Type, Other.ElementSize, + Other.Shape) {} + private: TensorSpec(const std::string &Name, int Port, TensorType Type, size_t ElementSize, const std::vector &Shape); @@ -106,23 +110,6 @@ Optional getTensorSpecFromJSON(LLVMContext &Ctx, const json::Value &Value); -struct LoggedFeatureSpec { - TensorSpec Spec; - std::optional LoggingName; - const std::string &getLoggingName() const { - return LoggingName ? *LoggingName : Spec.name(); - } -}; - -/// Load the output specs. If SpecFileOverride is not empty, that path is used. -/// Otherwise, the file is assumed to be called 'output_spec.json' and be found -/// under ModelPath (the model directory). -/// The first output tensor name must match ExpectedDecisionName. -/// In case of error, the return is None and the error is logged. -Optional> -loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, - StringRef ModelPath, StringRef SpecFileOverride = StringRef()); - #define TFUTILS_GETDATATYPE_DEF(T, Name) \ template <> TensorType TensorSpec::getDataType(); SUPPORTED_TENSOR_TYPES(TFUTILS_GETDATATYPE_DEF) diff --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h --- a/llvm/include/llvm/Analysis/Utils/TFUtils.h +++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h @@ -77,10 +77,6 @@ const std::vector &InputSpecs, const std::vector &OutputSpecs, const char *Tags = "serve"); - TFModelEvaluator(StringRef SavedModelPath, - const std::vector &InputSpecs, - function_ref GetOutputSpecs, - size_t OutputSpecsSize, const char *Tags = "serve"); ~TFModelEvaluator(); TFModelEvaluator(const TFModelEvaluator &) = delete; diff --git a/llvm/include/llvm/Analysis/Utils/TrainingLogger.h b/llvm/include/llvm/Analysis/Utils/TrainingLogger.h --- a/llvm/include/llvm/Analysis/Utils/TrainingLogger.h +++ b/llvm/include/llvm/Analysis/Utils/TrainingLogger.h @@ -54,7 +54,7 @@ /// NOTE: the FeatureSpecs are expected to be in the same order (i.e. have /// corresponding indices) with any MLModelRunner implementations /// corresponding to the model being trained/logged. - Logger(const std::vector &FeatureSpecs, + Logger(const std::vector &FeatureSpecs, const TensorSpec &RewardSpec, bool IncludeReward); ~Logger(); @@ -91,7 +91,7 @@ const StringMap> &Loggers); private: - std::vector FeatureSpecs; + std::vector FeatureSpecs; TensorSpec RewardSpec; const bool IncludeReward; std::unique_ptr LoggerData; diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -10,6 +10,7 @@ // loading of a model from a command line option. // //===----------------------------------------------------------------------===// +#include "llvm/Analysis/TensorSpec.h" #include "llvm/Config/config.h" #if defined(LLVM_HAVE_TF_API) @@ -114,9 +115,6 @@ const ModelUnderTrainingRunner *const MUTR; std::unique_ptr L; BitVector Effects; - /// There's at least one output. We'll set this to a different value if MUTR - /// is avaliable. - size_t OutputCount = 1; /// Set these 2 clearly OOB, to make sure we set them later. size_t DefaultDecisionPos = std::numeric_limits::max(); size_t DecisionPos = std::numeric_limits::max(); @@ -285,21 +283,16 @@ const ModelUnderTrainingRunner *MUTR) : LogFileName(LogFileName), MUTR(MUTR) { // The first output is the inlining decision. - if (MUTR) - OutputCount = MUTR->outputLoggedFeatureSpecs().size(); - std::vector FT; + std::vector FT(FeatureMap.begin(), FeatureMap.end()); - for (size_t I = 0; I < NumberOfFeatures; ++I) - FT.push_back({FeatureMap.at(I), None}); - if (MUTR && MUTR->outputLoggedFeatureSpecs().size() > 1) - append_range(FT, drop_begin(MUTR->outputLoggedFeatureSpecs())); + if (MUTR) + append_range(FT, MUTR->extraOutputsForLoggingSpecs()); DefaultDecisionPos = FT.size(); - FT.push_back( - {TensorSpec::createSpec(DefaultDecisionName, {1}), None}); + FT.push_back(TensorSpec::createSpec(DefaultDecisionName, {1})); DecisionPos = FT.size(); - FT.push_back({TensorSpec::createSpec(DecisionName, {1}), None}); + FT.push_back(TensorSpec::createSpec(DecisionName, {1})); L = std::make_unique( FT, TensorSpec::createSpec(RewardName, {1}), @@ -315,13 +308,13 @@ L->logInt64Value(CurrentFeature, &F); } - for (size_t I = 1; I < OutputCount; ++I) { - const auto &Result = *MUTR->lastEvaluationResult(); - const char *RawData = - reinterpret_cast(Result.getUntypedTensorValue(I)); - L->logSpecifiedTensorValue(CurrentFeature, RawData); - ++CurrentFeature; - } + if (MUTR) + for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size(); ++I) { + const char *RawData = + reinterpret_cast(MUTR->getUntypedExtraOutputValue(I)); + L->logSpecifiedTensorValue(CurrentFeature, RawData); + ++CurrentFeature; + } assert(CurrentFeature == DefaultDecisionPos); L->logInt64Value(DefaultDecisionPos, &Event.DefaultDecision); diff --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp --- a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp +++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp @@ -11,22 +11,93 @@ // //===----------------------------------------------------------------------===// +#include "llvm/ADT/STLExtras.h" #include "llvm/Config/config.h" #if defined(LLVM_HAVE_TF_API) - #include "llvm/Analysis/ModelUnderTrainingRunner.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/Path.h" using namespace llvm; +namespace { +struct LoggedFeatureSpec { + TensorSpec Spec; + std::optional LoggingName; +}; + +Optional> +loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, + StringRef ModelPath, StringRef SpecFileOverride) { + SmallVector OutputSpecsPath; + StringRef FileName = SpecFileOverride; + if (FileName.empty()) { + llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json"); + FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()}; + } + + auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName); + if (!BufferOrError) { + Ctx.emitError("Error opening output specs file: " + FileName + " : " + + BufferOrError.getError().message()); + return None; + } + auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer()); + if (!ParsedJSONValues) { + Ctx.emitError("Could not parse specs file: " + FileName); + return None; + } + auto ValuesArray = ParsedJSONValues->getAsArray(); + if (!ValuesArray) { + Ctx.emitError("Expected an array of {tensor_spec:, " + "logging_name:} dictionaries"); + return None; + } + std::vector Ret; + for (const auto &Value : *ValuesArray) + if (const auto *Obj = Value.getAsObject()) + if (const auto *SpecPart = Obj->get("tensor_spec")) + if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart)) + if (auto LoggingName = Obj->getString("logging_name")) { + if (!TensorSpec->isElementType() && + !TensorSpec->isElementType() && + !TensorSpec->isElementType()) { + Ctx.emitError( + "Only int64, int32, and float tensors are supported. " + "Found unsupported type for tensor named " + + TensorSpec->name()); + return None; + } + Ret.push_back({*TensorSpec, LoggingName->str()}); + } + + if (ValuesArray->size() != Ret.size()) { + Ctx.emitError( + "Unable to parse output spec. It should be a json file containing an " + "array of dictionaries. Each dictionary must have a 'tensor_spec' key, " + "with a json object describing a TensorSpec; and a 'logging_name' key, " + "which is a string to use as name when logging this tensor in the " + "training log."); + return None; + } + if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) { + Ctx.emitError("The first output spec must describe the decision tensor, " + "and must have the logging_name " + + StringRef(ExpectedDecisionName)); + return None; + } + return Ret; +} +} // namespace ModelUnderTrainingRunner::ModelUnderTrainingRunner( LLVMContext &Ctx, const std::string &ModelPath, const std::vector &InputSpecs, - const std::vector &OutputSpecs) + const std::vector &OutputSpecs, + const std::vector &ExtraOutputsForLogging) : MLModelRunner(Ctx, MLModelRunner::Kind::Development, InputSpecs.size()), - OutputSpecs(OutputSpecs) { - Evaluator = std::make_unique( - ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; }, - OutputSpecs.size()); + OutputSpecs(OutputSpecs), ExtraOutputsForLogging(ExtraOutputsForLogging) { + Evaluator = + std::make_unique(ModelPath, InputSpecs, OutputSpecs); if (!Evaluator || !Evaluator->isValid()) { Ctx.emitError("Failed to create saved model evaluator"); Evaluator.reset(); @@ -53,25 +124,32 @@ const std::vector &InputSpecs, StringRef OutputSpecsPathOverride) { if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath, - OutputSpecsPathOverride)) - return createAndEnsureValid(Ctx, ModelPath, DecisionName, InputSpecs, - *MaybeOutputSpecs); - Ctx.emitError("Could not load the policy model from the provided path"); - return nullptr; -} + OutputSpecsPathOverride)) { + std::unique_ptr MUTR; + std::vector OutputSpecs; + std::vector ExtraOutputsForLogging; + append_range(OutputSpecs, + map_range(*MaybeOutputSpecs, [](const LoggedFeatureSpec &LFS) { + return LFS.Spec; + })); + append_range(ExtraOutputsForLogging, + map_range(drop_begin(*MaybeOutputSpecs), + [](const LoggedFeatureSpec &LFS) { + return TensorSpec(LFS.LoggingName + ? *LFS.LoggingName + : LFS.Spec.name(), + LFS.Spec); + })); -std::unique_ptr -ModelUnderTrainingRunner::createAndEnsureValid( - LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName, - const std::vector &InputSpecs, - const std::vector &OutputSpecs) { - std::unique_ptr MUTR; - MUTR.reset( - new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs, OutputSpecs)); - if (MUTR && MUTR->isValid()) - return MUTR; + MUTR.reset(new ModelUnderTrainingRunner( + Ctx, ModelPath, InputSpecs, OutputSpecs, ExtraOutputsForLogging)); + if (MUTR && MUTR->isValid()) + return MUTR; - Ctx.emitError("Could not load or create model evaluator."); + Ctx.emitError("Could not load or create model evaluator."); + return nullptr; + } + Ctx.emitError("Could not load the policy model from the provided path"); return nullptr; } diff --git a/llvm/lib/Analysis/TFLiteUtils.cpp b/llvm/lib/Analysis/TFLiteUtils.cpp --- a/llvm/lib/Analysis/TFLiteUtils.cpp +++ b/llvm/lib/Analysis/TFLiteUtils.cpp @@ -53,8 +53,8 @@ public: TFModelEvaluatorImpl(StringRef SavedModelPath, const std::vector &InputSpecs, - function_ref GetOutputSpecs, - size_t OutputSpecsSize, const char *Tags); + const std::vector &OutputSpecs, + const char *Tags); bool isValid() const { return IsValid; } size_t outputSize() const { return Output.size(); } @@ -98,9 +98,8 @@ TFModelEvaluatorImpl::TFModelEvaluatorImpl( StringRef SavedModelPath, const std::vector &InputSpecs, - function_ref GetOutputSpecs, size_t OutputSpecsSize, - const char *Tags = "serve") - : Input(InputSpecs.size()), Output(OutputSpecsSize) { + const std::vector &OutputSpecs, const char *Tags = "serve") + : Input(InputSpecs.size()), Output(OutputSpecs.size()) { // INFO and DEBUG messages could be numerous and not particularly interesting tflite::LoggerOptions::SetMinimumLogSeverity(tflite::TFLITE_LOG_WARNING); // FIXME: make ErrorReporter a member (may also need subclassing @@ -171,8 +170,8 @@ return; } - for (size_t I = 0; I < OutputSpecsSize; ++I) { - auto OutputSpec = GetOutputSpecs(I); + for (size_t I = 0; I < OutputSpecs.size(); ++I) { + const auto &OutputSpec = OutputSpecs[I]; Output[I] = Interpreter->output_tensor( OutputsMap[OutputSpec.name() + ":" + std::to_string(OutputSpec.port())]); @@ -181,23 +180,15 @@ } } -TFModelEvaluator::TFModelEvaluator( - StringRef SavedModelPath, const std::vector &InputSpecs, - function_ref GetOutputSpecs, size_t OutputSpecsSize, - const char *Tags) - : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs, - OutputSpecsSize, Tags)) { - if (!Impl->isValid()) - Impl.reset(); -} - TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath, const std::vector &InputSpecs, const std::vector &OutputSpecs, const char *Tags) - : TFModelEvaluator( - SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; }, - OutputSpecs.size(), Tags) {} + : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs, + Tags)) { + if (!Impl->isValid()) + Impl.reset(); +} TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {} diff --git a/llvm/lib/Analysis/TensorSpec.cpp b/llvm/lib/Analysis/TensorSpec.cpp --- a/llvm/lib/Analysis/TensorSpec.cpp +++ b/llvm/lib/Analysis/TensorSpec.cpp @@ -18,8 +18,6 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/JSON.h" #include "llvm/Support/ManagedStatic.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include #include @@ -79,66 +77,4 @@ return None; } -Optional> -loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, - StringRef ModelPath, StringRef SpecFileOverride) { - SmallVector OutputSpecsPath; - StringRef FileName = SpecFileOverride; - if (FileName.empty()) { - llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json"); - FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()}; - } - - auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName); - if (!BufferOrError) { - Ctx.emitError("Error opening output specs file: " + FileName + " : " + - BufferOrError.getError().message()); - return None; - } - auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer()); - if (!ParsedJSONValues) { - Ctx.emitError("Could not parse specs file: " + FileName); - return None; - } - auto ValuesArray = ParsedJSONValues->getAsArray(); - if (!ValuesArray) { - Ctx.emitError("Expected an array of {tensor_spec:, " - "logging_name:} dictionaries"); - return None; - } - std::vector Ret; - for (const auto &Value : *ValuesArray) - if (const auto *Obj = Value.getAsObject()) - if (const auto *SpecPart = Obj->get("tensor_spec")) - if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart)) - if (auto LoggingName = Obj->getString("logging_name")) { - if (!TensorSpec->isElementType() && - !TensorSpec->isElementType() && - !TensorSpec->isElementType()) { - Ctx.emitError( - "Only int64, int32, and float tensors are supported. " - "Found unsupported type for tensor named " + - TensorSpec->name()); - return None; - } - Ret.push_back({*TensorSpec, LoggingName->str()}); - } - - if (ValuesArray->size() != Ret.size()) { - Ctx.emitError( - "Unable to parse output spec. It should be a json file containing an " - "array of dictionaries. Each dictionary must have a 'tensor_spec' key, " - "with a json object describing a TensorSpec; and a 'logging_name' key, " - "which is a string to use as name when logging this tensor in the " - "training log."); - return None; - } - if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) { - Ctx.emitError("The first output spec must describe the decision tensor, " - "and must have the logging_name " + - StringRef(ExpectedDecisionName)); - return None; - } - return Ret; -} } // namespace llvm diff --git a/llvm/lib/Analysis/TrainingLogger.cpp b/llvm/lib/Analysis/TrainingLogger.cpp --- a/llvm/lib/Analysis/TrainingLogger.cpp +++ b/llvm/lib/Analysis/TrainingLogger.cpp @@ -52,7 +52,7 @@ namespace llvm { class LoggerDataImpl { - const std::vector LoggedFeatureSpecs; + const std::vector LoggedFeatureSpecs; const TensorSpec RewardSpec; const bool IncludeReward; @@ -63,7 +63,7 @@ size_t NrRecords) const { bool Ret = true; for (const auto &TSpecs : LoggedFeatureSpecs) { - const auto &Name = TSpecs.getLoggingName(); + const auto &Name = TSpecs.name(); const auto &FL = SE.feature_lists().feature_list().at(Name).feature(); if (NrRecords != static_cast(FL.size())) { dbgs() << "[TF-UTILS]: " << Name << " has missing records. Expected " @@ -89,12 +89,12 @@ assert(FeatureLists.size() == LoggedFeatureSpecs.size()); for (size_t I = 0; I < FeatureLists.size(); ++I) { const auto &LFS = LoggedFeatureSpecs[I]; - (*FL)[LFS.getLoggingName()] = std::move(FeatureLists[I]); + (*FL)[LFS.name()] = std::move(FeatureLists[I]); } } public: - LoggerDataImpl(const std::vector &LoggedSpecs, + LoggerDataImpl(const std::vector &LoggedSpecs, const TensorSpec &RewardSpec, bool IncludeReward) : LoggedFeatureSpecs(LoggedSpecs), RewardSpec(RewardSpec), IncludeReward(IncludeReward), FeatureLists(LoggedFeatureSpecs.size()) {} @@ -110,7 +110,7 @@ } char *addNewTensor(size_t FeatureID) { - const auto &Spec = LoggedFeatureSpecs[FeatureID].Spec; + const auto &Spec = LoggedFeatureSpecs[FeatureID]; if (Spec.isElementType()) { auto *RF = FeatureLists[FeatureID] .add_feature() @@ -146,7 +146,7 @@ }; } // namespace llvm -Logger::Logger(const std::vector &FeatureSpecs, +Logger::Logger(const std::vector &FeatureSpecs, const TensorSpec &RewardSpec, bool IncludeReward) : FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec), IncludeReward(IncludeReward), @@ -180,22 +180,22 @@ #undef LOG_FINAL_REWARD void Logger::logFloatValue(size_t FeatureID, const float *Value) { - assert(FeatureSpecs[FeatureID].Spec.isElementType()); + assert(FeatureSpecs[FeatureID].isElementType()); logSpecifiedTensorValue(FeatureID, reinterpret_cast(Value)); } void Logger::logInt64Value(size_t FeatureID, const int64_t *Value) { - assert(FeatureSpecs[FeatureID].Spec.isElementType()); + assert(FeatureSpecs[FeatureID].isElementType()); logSpecifiedTensorValue(FeatureID, reinterpret_cast(Value)); } void Logger::logInt32Value(size_t FeatureID, const int32_t *Value) { - assert(FeatureSpecs[FeatureID].Spec.isElementType()); + assert(FeatureSpecs[FeatureID].isElementType()); logSpecifiedTensorValue(FeatureID, reinterpret_cast(Value)); } void Logger::logSpecifiedTensorValue(size_t FeatureID, const char *RawData) { - const auto &Spec = FeatureSpecs[FeatureID].Spec; + const auto &Spec = FeatureSpecs[FeatureID]; char *Buff = addEntryAndGetFloatOrInt64Buffer(FeatureID); if (Spec.isElementType()) for (size_t I = 0; I < Spec.getElementCount(); ++I) diff --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp --- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp @@ -517,16 +517,13 @@ Logger *Log = nullptr; if (!TrainingLog.empty()) { - std::vector LFS; - for (const auto &FS : InputFeatures) - LFS.push_back({FS, None}); + std::vector LFS = InputFeatures; if (auto *MUTR = dyn_cast(Runner.get())) - if (MUTR->outputLoggedFeatureSpecs().size() > 1) - append_range(LFS, drop_begin(MUTR->outputLoggedFeatureSpecs())); + append_range(LFS, MUTR->extraOutputsForLoggingSpecs()); // We always log the output; in particular, if we're not evaluating, we // don't have an output spec json file. That's why we handle the // 'normal' output separately. - LFS.push_back({Output, None}); + LFS.push_back(Output); auto I = LogMap.insert(std::make_pair( MF.getFunction().getName(), std::make_unique(LFS, Reward, /*IncludeReward*/ true))); @@ -1105,12 +1102,11 @@ getRunner().getTensorUntyped(CurrentFeature))); } if (auto *MUTR = dyn_cast(&getRunner())) - for (size_t I = 1; I < MUTR->outputLoggedFeatureSpecs().size(); + for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size(); ++I, ++CurrentFeature) Log->logSpecifiedTensorValue( CurrentFeature, - reinterpret_cast( - MUTR->lastEvaluationResult()->getUntypedTensorValue(I))); + reinterpret_cast(MUTR->getUntypedExtraOutputValue(I))); // The output is right after the features and the extra outputs Log->logInt64Value(CurrentFeature, &Ret); return Ret; diff --git a/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp --- a/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp @@ -236,16 +236,13 @@ Logger *Log = nullptr; if (!TrainingLog.empty()) { - std::vector LFS; - for (const auto &FS : InputFeatures) - LFS.push_back({FS, None}); + std::vector LFS = InputFeatures; if (auto *MUTR = dyn_cast(Runner.get())) - if (MUTR->outputLoggedFeatureSpecs().size() > 1) - append_range(LFS, drop_begin(MUTR->outputLoggedFeatureSpecs())); + append_range(LFS, MUTR->extraOutputsForLoggingSpecs()); // We always log the output; in particular, if we're not evaluating, we // don't have an output spec json file. That's why we handle the // 'normal' output separately. - LFS.push_back({Output, None}); + LFS.push_back(Output); auto I = LogMap.insert(std::make_pair( MF.getFunction().getName(), std::make_unique(LFS, Reward, /*IncludeReward*/ true))); @@ -318,12 +315,11 @@ } if (auto *MUTR = dyn_cast(&getRunner())) { - for (size_t I = 1; I < MUTR->outputLoggedFeatureSpecs().size(); + for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size(); ++I, ++CurrentFeature) Log->logSpecifiedTensorValue( CurrentFeature, - reinterpret_cast( - MUTR->lastEvaluationResult()->getUntypedTensorValue(I))); + reinterpret_cast(MUTR->getUntypedExtraOutputValue(I))); } float Ret = static_cast(Prio); diff --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp --- a/llvm/unittests/Analysis/TFUtilsTest.cpp +++ b/llvm/unittests/Analysis/TFUtilsTest.cpp @@ -100,20 +100,19 @@ TensorSpec::createSpec("this_feature_does_not_exist", {2, 5})}; LLVMContext Ctx; - auto Evaluator = ModelUnderTrainingRunner::createAndEnsureValid( - Ctx, getModelPath(), "StatefulPartitionedCall", InputSpecs, - {LoggedFeatureSpec{ - TensorSpec::createSpec("StatefulPartitionedCall", {1}), - None}}); - int32_t *V = Evaluator->getTensor(0); + ModelUnderTrainingRunner Evaluator( + Ctx, getModelPath(), InputSpecs, + {TensorSpec::createSpec("StatefulPartitionedCall", {1})}); + EXPECT_TRUE(Evaluator.isValid()); + int32_t *V = Evaluator.getTensor(0); // Fill it up with 1s, we know the output. for (auto I = 0; I < KnownSize; ++I) V[I] = 1; - float *F = Evaluator->getTensor(1); + float *F = Evaluator.getTensor(1); for (auto I = 0; I < 2 * 5; ++I) F[I] = 3.14 + I; - float Ret = Evaluator->evaluate(); + float Ret = Evaluator.evaluate(); EXPECT_EQ(static_cast(Ret), 80); // The input vector should be unchanged for (auto I = 0; I < KnownSize; ++I) diff --git a/llvm/unittests/Analysis/TrainingLoggerTest.cpp b/llvm/unittests/Analysis/TrainingLoggerTest.cpp --- a/llvm/unittests/Analysis/TrainingLoggerTest.cpp +++ b/llvm/unittests/Analysis/TrainingLoggerTest.cpp @@ -42,11 +42,9 @@ } while (false) TEST(TrainingLoggerTest, Logger) { - std::vector Features; - Features.push_back( - {TensorSpec::createSpec("the_float", {2, 3}), None}); - Features.push_back({TensorSpec::createSpec("the_int", {2}), - std::string("alternate_name")}); + std::vector Features{ + TensorSpec::createSpec("the_float", {2, 3}), + TensorSpec::createSpec("alternate_name", {2})}; auto Rewards = TensorSpec::createSpec("reward", {1}); Logger L(Features, Rewards, true); @@ -78,11 +76,9 @@ } TEST(TrainingLoggerTest, LoggerInt32FeaturesAndReward) { - std::vector Features; - Features.push_back( - {TensorSpec::createSpec("the_float", {2, 3}), None}); - Features.push_back({TensorSpec::createSpec("the_int", {2}), - std::string("alternate_name")}); + std::vector Features{ + TensorSpec::createSpec("the_float", {2, 3}), + TensorSpec::createSpec("alternate_name", {2})}; auto Rewards = TensorSpec::createSpec("reward", {1}); Logger L(Features, Rewards, true); @@ -114,11 +110,9 @@ } TEST(TrainingLoggerTest, LoggerNoReward) { - std::vector Features; - Features.push_back( - {TensorSpec::createSpec("the_float", {2, 3}), None}); - Features.push_back({TensorSpec::createSpec("the_int", {2}), - std::string("alternate_name")}); + std::vector Features{ + TensorSpec::createSpec("the_float", {2, 3}), + TensorSpec::createSpec("alternate_name", {2})}; auto Rewards = TensorSpec::createSpec("reward", {1}); Logger L(Features, Rewards, false); @@ -144,9 +138,9 @@ } TEST(TrainingLoggerTest, LoggerFinalReward) { - std::vector Features; - Features.push_back({TensorSpec::createSpec("the_float", {1}), None}); - Features.push_back({TensorSpec::createSpec("the_int", {1}), None}); + std::vector Features{ + TensorSpec::createSpec("the_float", {1}), + TensorSpec::createSpec("the_int", {1})}; auto Rewards = TensorSpec::createSpec("reward", {1}); Logger L(Features, Rewards, true); @@ -169,9 +163,9 @@ } TEST(TrainingLoggerTest, LoggerGroup) { - std::vector Features; - Features.push_back({TensorSpec::createSpec("the_float", {1}), None}); - Features.push_back({TensorSpec::createSpec("the_int", {1}), None}); + std::vector Features{ + TensorSpec::createSpec("the_float", {1}), + TensorSpec::createSpec("the_int", {1})}; auto Rewards = TensorSpec::createSpec("reward", {1}); StringMap> Loggers;