diff --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h --- a/llvm/include/llvm/Analysis/Utils/TFUtils.h +++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h @@ -105,9 +105,14 @@ Optional LoggingName; }; -bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName, - StringRef ExpectedDecisionName, - std::vector &Ret); +/// Load the output specs. If SpecFileOverride is not empty, that path is used. +/// Otherwise, the file is assumed to be called 'output_spec.json' and be found +/// under ModelPath (the model directory). +/// The first output tensor name must match ExpectedDecisionName. +/// In case of error, the return is None and the error is logged. +Optional> +loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, + StringRef ModelPath, StringRef SpecFileOverride = StringRef()); /// Logging utility - given an ordered specification of features, and assuming /// a scalar reward, allow logging feature values and rewards, and then print diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -21,7 +21,6 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ManagedStatic.h" -#include "llvm/Support/Path.h" #include @@ -472,14 +471,10 @@ TensorSpec::createSpec(TFFeedPrefix + FeatureNameMap[I], {1})); InputSpecs.insert(InputSpecs.end(), TrainingOnlyFeatures.begin(), TrainingOnlyFeatures.end()); - SmallVector OutputSpecsPath; - StringRef OutputSpecPath = TFOutputSpecOverride; - if (OutputSpecPath.empty()) { - llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json"); - OutputSpecPath = {OutputSpecsPath.data(), OutputSpecsPath.size()}; - } - - if (!loadOutputSpecs(Ctx, OutputSpecPath, DecisionName, OutputSpecs)) + if (auto MaybeOutSpecs = + loadOutputSpecs(Ctx, DecisionName, ModelPath, TFOutputSpecOverride)) + OutputSpecs = std::move(*MaybeOutSpecs); + else return; Evaluator = std::make_unique( diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -19,6 +19,7 @@ #include "llvm/Support/JSON.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include "tensorflow/c/c_api.h" @@ -216,27 +217,34 @@ return None; } -bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName, - StringRef ExpectedDecisionName, - std::vector &Ret) { +Optional> +loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, + StringRef ModelPath, StringRef SpecFileOverride) { + SmallVector OutputSpecsPath; + StringRef FileName = SpecFileOverride; + if (FileName.empty()) { + llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json"); + FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()}; + } + auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName); if (!BufferOrError) { Ctx.emitError("Error opening output specs file: " + FileName + " : " + BufferOrError.getError().message()); - return false; + return None; } auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer()); if (!ParsedJSONValues) { Ctx.emitError("Could not parse specs file: " + FileName); - return false; + return None; } auto ValuesArray = ParsedJSONValues->getAsArray(); if (!ValuesArray) { Ctx.emitError("Expected an array of {tensor_spec:, " "logging_name:} dictionaries"); - return false; + return None; } - + std::vector Ret; for (const auto &Value : *ValuesArray) if (const auto *Obj = Value.getAsObject()) if (const auto *SpecPart = Obj->get("tensor_spec")) @@ -249,7 +257,7 @@ "Only int64, int32, and float tensors are supported. " "Found unsupported type for tensor named " + TensorSpec->name()); - return false; + return None; } Ret.push_back({*TensorSpec, LoggingName->str()}); } @@ -261,15 +269,15 @@ "with a json object describing a TensorSpec; and a 'logging_name' key, " "which is a string to use as name when logging this tensor in the " "training log."); - return false; + return None; } if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) { Ctx.emitError("The first output spec must describe the decision tensor, " "and must have the logging_name " + StringRef(ExpectedDecisionName)); - return false; + return None; } - return true; + return Ret; } class TFModelEvaluatorImpl {