diff --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h --- a/llvm/include/llvm/Analysis/Utils/TFUtils.h +++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h @@ -36,6 +36,35 @@ class TFModelEvaluatorImpl; class EvaluationResultImpl; +class TensorSpec final { +public: + template + static TensorSpec createSpec(const std::string &Name, + const std::vector &Shape, + int Port = 0) { + return TensorSpec(Name, Port, getDataType(), Shape); + } + + const std::string &name() const { return Name; } + int port() const { return Port; } + int typeIndex() const { return TypeIndex; } + const std::vector &shape() const { return Shape; } + +private: + TensorSpec(const std::string &Name, int Port, int TypeIndex, + const std::vector &Shape) + : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape) {} + + template static int getDataType() { + llvm_unreachable("Undefined tensor type"); + } + + std::string Name; + int Port = 0; + int TypeIndex = 0; + std::vector Shape; +}; + class TFModelEvaluator final { public: /// The result of a model evaluation. Handles the lifetime of the output @@ -60,8 +89,8 @@ }; TFModelEvaluator(StringRef SavedModelPath, - const std::vector &InputNames, - const std::vector &OutputNames, + const std::vector &InputSpecs, + const std::vector &OutputSpecs, const char *Tags = "serve"); ~TFModelEvaluator(); TFModelEvaluator(const TFModelEvaluator &) = delete; @@ -82,32 +111,23 @@ /// otherwise. bool isValid() const { return !!Impl; } - /// Initialize the input at Index as a tensor of the given type and - /// dimensions. - template - void initInput(size_t Index, const std::vector &Dimensions) { - return initInput(Index, getModelTypeIndex(), Dimensions); - } - private: - void *getUntypedInput(size_t Index); - template int getModelTypeIndex(); - void initInput(size_t Index, int TypeIndex, - const std::vector &Dimensions); + friend class TensorSpec; + void *getUntypedInput(size_t Index); std::unique_ptr Impl; }; -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); -template <> int TFModelEvaluator::getModelTypeIndex(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); +template <> int TensorSpec::getDataType(); } // namespace llvm diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -298,35 +298,12 @@ private: std::unique_ptr Evaluator; - // The training framework needs some additional features, that just need to - // be set to 0. - struct TensorSpec { - std::string Name; - std::function &Dim)> - Initializer; - }; - + // The training framework needs some additional features. const std::vector TrainingOnlyFeatures{ - {"inlining_default", - [](TFModelEvaluator *Evaluator, size_t Index, - const std::vector &Dim) { - Evaluator->initInput(Index, Dim); - }}, - {"discount", - [](TFModelEvaluator *Evaluator, size_t Index, - const std::vector &Dim) { - Evaluator->initInput(Index, Dim); - }}, - {"reward", - [](TFModelEvaluator *Evaluator, size_t Index, - const std::vector &Dim) { - Evaluator->initInput(Index, Dim); - }}, - {"step_type", [](TFModelEvaluator *Evaluator, size_t Index, - const std::vector &Dim) { - Evaluator->initInput(Index, Dim); - }}}; + TensorSpec::createSpec(TFFeedPrefix + "inlining_default", {1}), + TensorSpec::createSpec(TFFeedPrefix + "discount", {1}), + TensorSpec::createSpec(TFFeedPrefix + "reward", {1}), + TensorSpec::createSpec(TFFeedPrefix + "step_type", {1})}; }; } // namespace @@ -409,33 +386,22 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath) : MLModelRunner(Ctx) { - std::vector InputNames; - std::vector OutputNames; + std::vector InputSpecs; + std::vector OutputSpecs; for (size_t I = 0; I < NumberOfFeatures; ++I) - InputNames.push_back(TFFeedPrefix + FeatureNameMap[I]); - for (size_t I = 0; I < TrainingOnlyFeatures.size(); ++I) - InputNames.push_back(TFFeedPrefix + TrainingOnlyFeatures[I].Name); - OutputNames.push_back(TFDecisionName); + InputSpecs.push_back( + TensorSpec::createSpec(TFFeedPrefix + FeatureNameMap[I], {1})); + InputSpecs.insert(InputSpecs.end(), TrainingOnlyFeatures.begin(), + TrainingOnlyFeatures.end()); + OutputSpecs.push_back(TensorSpec::createSpec(TFDecisionName, {1})); Evaluator = - std::make_unique(ModelPath, InputNames, OutputNames); + std::make_unique(ModelPath, InputSpecs, OutputSpecs); if (!Evaluator || !Evaluator->isValid()) { Ctx.emitError("Failed to create inliner saved model evaluator"); Evaluator.reset(); return; } - - static const std::vector Dim{1}; - - size_t InputIndex = 0; - for (; InputIndex < NumberOfFeatures; ++InputIndex) { - Evaluator->initInput(InputIndex, Dim); - } - - for (; InputIndex < InputNames.size(); ++InputIndex) { - TrainingOnlyFeatures[InputIndex - NumberOfFeatures].Initializer( - Evaluator.get(), InputIndex, Dim); - } } bool ModelUnderTrainingRunner::run() { diff --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp --- a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp +++ b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp @@ -244,19 +244,18 @@ if (!isEvaluatorRequested()) { return; } - std::vector InputNames{"serving_default_input_1"}; - std::vector OutputName{"StatefulPartitionedCall"}; + std::vector InputSpecs{TensorSpec::createSpec( + "serving_default_input_1", + {1, static_cast( + IRToNativeSizeLearning::FunctionFeatures::FeatureCount)})}; + std::vector OutputSpecs{ + TensorSpec::createSpec("StatefulPartitionedCall", {1})}; Evaluator = std::make_unique( - TFIR2NativeModelPath.getValue().c_str(), InputNames, OutputName); + TFIR2NativeModelPath.getValue().c_str(), InputSpecs, OutputSpecs); if (!Evaluator || !Evaluator->isValid()) { Evaluator.reset(); return; } - static const std::vector Dim{ - 1, static_cast( - IRToNativeSizeLearning::FunctionFeatures::FeatureCount)}; - - Evaluator->initInput(0, Dim); } InlineSizeEstimatorAnalysis::Result diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -63,6 +63,7 @@ } // namespace namespace llvm { + class EvaluationResultImpl { public: EvaluationResultImpl(size_t OutputSize) @@ -86,8 +87,8 @@ class TFModelEvaluatorImpl { public: TFModelEvaluatorImpl(StringRef SavedModelPath, - const std::vector &InputNames, - const std::vector &OutputNames, + const std::vector &InputSpecs, + const std::vector &OutputSpecs, const char *Tags); bool isValid() const { return IsValid; } @@ -132,16 +133,17 @@ /// Reusable utility for ensuring we can bind the requested Name to a node in /// the SavedModel Graph. - bool checkReportAndInvalidate(const TF_Output &Output, StringRef Name); + bool checkReportAndInvalidate(const TF_Output &Output, + const TensorSpec &Spec); }; } // namespace llvm TFModelEvaluatorImpl::TFModelEvaluatorImpl( - StringRef SavedModelPath, const std::vector &InputNames, - const std::vector &OutputNames, const char *Tags) + StringRef SavedModelPath, const std::vector &InputSpecs, + const std::vector &OutputSpecs, const char *Tags) : Graph(createTFGraph()), Options(createTFSessionOptions()), - InputFeed(InputNames.size()), Input(InputNames.size()), - OutputFeed(OutputNames.size()) { + InputFeed(InputSpecs.size()), Input(InputSpecs.size()), + OutputFeed(OutputSpecs.size()) { if (!ensureInitTF()) { errs() << "Tensorflow should have been initialized"; return; @@ -155,25 +157,29 @@ errs() << TF_Message(Status.get()); invalidate(); } - for (size_t I = 0; I < InputNames.size(); ++I) { - InputFeed[I] = { - TF_GraphOperationByName(Graph.get(), (InputNames[I]).c_str()), 0}; - if (!checkReportAndInvalidate(InputFeed[I], InputNames[I])) + for (size_t I = 0; I < InputSpecs.size(); ++I) { + auto &Spec = InputSpecs[I]; + InputFeed[I] = {TF_GraphOperationByName(Graph.get(), (Spec.name()).c_str()), + Spec.port()}; + if (!checkReportAndInvalidate(InputFeed[I], Spec)) return; + initInput(I, static_cast(Spec.typeIndex()), Spec.shape()); } - for (size_t I = 0; I < OutputNames.size(); ++I) { + for (size_t I = 0; I < OutputSpecs.size(); ++I) { + auto &Spec = OutputSpecs[I]; OutputFeed[I] = { - TF_GraphOperationByName(Graph.get(), (OutputNames[I]).c_str()), 0}; - if (!checkReportAndInvalidate(OutputFeed[I], OutputNames[I])) + TF_GraphOperationByName(Graph.get(), (Spec.name()).c_str()), + Spec.port()}; + if (!checkReportAndInvalidate(OutputFeed[I], Spec)) return; } } TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath, - const std::vector &InputNames, - const std::vector &OutputNames, + const std::vector &InputSpecs, + const std::vector &OutputSpecs, const char *Tags) - : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputNames, OutputNames, + : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs, Tags)) { if (!Impl->isValid()) Impl.reset(); @@ -193,10 +199,10 @@ } bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TF_Output &Output, - StringRef Name) { + const TensorSpec &Spec) { if (Output.oper) return true; - errs() << "Could not find TF_Output named: " + Name; + errs() << "Could not find TF_Output named: " + Spec.name(); IsValid = false; return IsValid; } @@ -242,50 +248,25 @@ return TF_TensorData(Impl->getOutput()[Index]); } -void TFModelEvaluator::initInput(size_t Index, int TypeIndex, - const std::vector &Dimensions) { - Impl->initInput(Index, static_cast(TypeIndex), Dimensions); -} +template <> int TensorSpec::getDataType() { return TF_FLOAT; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_FLOAT; -} +template <> int TensorSpec::getDataType() { return TF_DOUBLE; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_DOUBLE; -} +template <> int TensorSpec::getDataType() { return TF_INT8; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_INT8; -} +template <> int TensorSpec::getDataType() { return TF_UINT8; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_UINT8; -} +template <> int TensorSpec::getDataType() { return TF_INT16; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_INT16; -} +template <> int TensorSpec::getDataType() { return TF_UINT16; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_UINT16; -} - -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_INT32; -} +template <> int TensorSpec::getDataType() { return TF_INT32; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_UINT32; -} +template <> int TensorSpec::getDataType() { return TF_UINT32; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_INT64; -} +template <> int TensorSpec::getDataType() { return TF_INT64; } -template <> int TFModelEvaluator::getModelTypeIndex() { - return TF_UINT64; -} +template <> int TensorSpec::getDataType() { return TF_UINT64; } TFModelEvaluator::EvaluationResult::~EvaluationResult() {} TFModelEvaluator::~TFModelEvaluator() {} diff --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp --- a/llvm/unittests/Analysis/TFUtilsTest.cpp +++ b/llvm/unittests/Analysis/TFUtilsTest.cpp @@ -37,15 +37,14 @@ TEST(TFUtilsTest, LoadAndExecuteTest) { // We use the ir2native model for test. We know it has one feature of // dimension (1, 214) - std::vector InputNames{"serving_default_input_1"}; - std::vector OutputName{"StatefulPartitionedCall"}; const static int64_t KnownSize = 214; + std::vector InputSpecs{TensorSpec::createSpec( + "serving_default_input_1", {1, KnownSize})}; + std::vector OutputSpecs{ + TensorSpec::createSpec("StatefulPartitionedCall", {1})}; - TFModelEvaluator Evaluator(getModelPath(), InputNames, OutputName); - static const std::vector Dim{1, KnownSize}; - + TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); EXPECT_TRUE(Evaluator.isValid()); - Evaluator.initInput(0, Dim); int32_t *V = Evaluator.getInput(0); // Fill it up with 1's, we know the output. @@ -77,15 +76,14 @@ TEST(TFUtilsTest, EvalError) { // We use the ir2native model for test. We know it has one feature of // dimension (1, 214) - std::vector InputNames{"serving_default_input_1"}; - std::vector OutputName{"StatefulPartitionedCall"}; const static int64_t KnownSize = 213; + std::vector InputSpecs{TensorSpec::createSpec( + "serving_default_input_1", {1, KnownSize})}; + std::vector OutputSpecs{ + TensorSpec::createSpec("StatefulPartitionedCall", {1})}; - TFModelEvaluator Evaluator(getModelPath(), InputNames, OutputName); - static const std::vector Dim{1, KnownSize}; - + TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); EXPECT_TRUE(Evaluator.isValid()); - Evaluator.initInput(0, Dim); int32_t *V = Evaluator.getInput(0); // Fill it up with 1's, we know the output.