diff --git a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h --- a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h +++ b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h @@ -10,6 +10,8 @@ #ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H #define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H +#include "llvm/Analysis/TensorSpec.h" + #include #include #include @@ -127,7 +129,7 @@ constexpr size_t NumberOfFeatures = static_cast(FeatureIndex::NumberOfFeatures); -extern const std::array FeatureNameMap; +extern const std::array FeatureMap; extern const char *const DecisionName; extern const char *const DefaultDecisionName; diff --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h --- a/llvm/include/llvm/Analysis/MLModelRunner.h +++ b/llvm/include/llvm/Analysis/MLModelRunner.h @@ -10,6 +10,7 @@ #ifndef LLVM_ANALYSIS_MLMODELRUNNER_H #define LLVM_ANALYSIS_MLMODELRUNNER_H +#include "llvm/Analysis/TensorSpec.h" #include "llvm/IR/PassManager.h" namespace llvm { @@ -41,7 +42,7 @@ getTensorUntyped(static_cast(FeatureID))); } - virtual void *getTensorUntyped(size_t Index) = 0; + void *getTensorUntyped(size_t Index) { return InputBuffers[Index]; } const void *getTensorUntyped(size_t Index) const { return (const_cast(this))->getTensorUntyped(Index); } @@ -50,13 +51,27 @@ Kind getKind() const { return Type; } protected: - MLModelRunner(LLVMContext &Ctx, Kind Type) : Ctx(Ctx), Type(Type) { + MLModelRunner(LLVMContext &Ctx, Kind Type, size_t NrInputs) + : Ctx(Ctx), Type(Type), InputBuffers(NrInputs) { assert(Type != Kind::Unknown); } virtual void *evaluateUntyped() = 0; + void setUpBufferForTensor(size_t Index, const TensorSpec &Spec, + void *Buffer) { + if (!Buffer) { + OwnedBuffers.emplace_back(Spec.getTotalTensorBufferSize()); + Buffer = OwnedBuffers.back().data(); + } + InputBuffers[Index] = Buffer; + } + LLVMContext &Ctx; const Kind Type; + +private: + std::vector InputBuffers; + std::vector> OwnedBuffers; }; } // namespace llvm diff --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h --- a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h +++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h @@ -10,6 +10,7 @@ #ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H #define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H +#include "llvm/Analysis/TensorSpec.h" #include "llvm/Config/llvm-config.h" #ifdef LLVM_HAVE_TF_API @@ -48,6 +49,11 @@ StringRef DecisionName, const std::vector &InputSpecs, StringRef OutputSpecsPathOverride = ""); + static std::unique_ptr + createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath, + StringRef DecisionName, + const std::vector &InputSpecs, + const std::vector &OutputSpecs); private: ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath, @@ -58,7 +64,6 @@ const std::vector OutputSpecs; Optional LastEvaluationResult; void *evaluateUntyped() override; - void *getTensorUntyped(size_t Index) override; bool isValid() const { return !!Evaluator; } }; diff --git a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h --- a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h +++ b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h @@ -10,13 +10,9 @@ #ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H #define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H -#include "llvm/Config/llvm-config.h" - -/// While not strictly necessary to conditionally compile this, it really -/// has no usecase outside the 'development' mode. -#ifdef LLVM_HAVE_TF_API #include "llvm/Analysis/MLModelRunner.h" -#include "llvm/Analysis/Utils/TFUtils.h" +#include "llvm/Analysis/TensorSpec.h" +#include "llvm/Config/llvm-config.h" namespace llvm { /// A pseudo model runner. We use it to store feature values when collecting /// logs for the default policy, in 'development' mode, but never ask it to @@ -34,10 +30,6 @@ void *evaluateUntyped() override { llvm_unreachable("We shouldn't call run on this model runner."); } - void *getTensorUntyped(size_t Index) override; - - std::vector> ValuesBuffer; }; } // namespace llvm -#endif // defined(LLVM_HAVE_TF_API) #endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H diff --git a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h --- a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h +++ b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h @@ -15,6 +15,7 @@ #define LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H #include "llvm/Analysis/MLModelRunner.h" +#include "llvm/Analysis/TensorSpec.h" #include "llvm/Support/ErrorHandling.h" #include @@ -30,21 +31,20 @@ /// FeatureNames' type should be an indexed collection of std::string, like /// std::array or std::vector, that has a size() method. template - ReleaseModeModelRunner(LLVMContext &Ctx, const FType &FeatureNames, + ReleaseModeModelRunner(LLVMContext &Ctx, const FType &InputSpec, StringRef DecisionName, StringRef FeedPrefix = "feed_", StringRef FetchPrefix = "fetch_") - : MLModelRunner(Ctx, MLModelRunner::Kind::Release), + : MLModelRunner(Ctx, MLModelRunner::Kind::Release, InputSpec.size()), CompiledModel(std::make_unique()) { assert(CompiledModel && "The CompiledModel should be valid"); - const size_t FeatureCount = FeatureNames.size(); - FeatureIndices.resize(FeatureCount); - - for (size_t I = 0; I < FeatureCount; ++I) { + for (size_t I = 0; I < InputSpec.size(); ++I) { const int Index = - CompiledModel->LookupArgIndex(FeedPrefix.str() + FeatureNames[I]); - assert(Index >= 0 && "Cannot find Feature in inlining model"); - FeatureIndices[I] = Index; + CompiledModel->LookupArgIndex(FeedPrefix.str() + InputSpec[I].name()); + void *Buffer = nullptr; + if (Index >= 0) + Buffer = CompiledModel->arg_data(Index); + setUpBufferForTensor(I, InputSpec[I], Buffer); } ResultIndex = CompiledModel->LookupResultIndex(FetchPrefix.str() + @@ -64,12 +64,6 @@ return CompiledModel->result_data(ResultIndex); } - void *getTensorUntyped(size_t Index) override { - return reinterpret_cast( - CompiledModel->arg_data(FeatureIndices[Index])); - } - - std::vector FeatureIndices; int32_t ResultIndex = -1; std::unique_ptr CompiledModel; }; diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h --- a/llvm/include/llvm/Analysis/TensorSpec.h +++ b/llvm/include/llvm/Analysis/TensorSpec.h @@ -74,6 +74,8 @@ size_t getElementCount() const { return ElementCount; } /// Get the size, in bytes, of one element. size_t getElementByteSize() const { return ElementSize; } + /// Get the total size of a memory buffer needed to store the whole tensor. + size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; } template bool isElementType() const { return getDataType() == Type; diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -272,8 +272,8 @@ static const std::vector getInputFeatures() { std::vector InputSpecs; for (size_t I = 0; I < NumberOfFeatures; ++I) - InputSpecs.push_back( - TensorSpec::createSpec(TFFeedPrefix + FeatureNameMap[I], {1})); + InputSpecs.push_back(TensorSpec::createSpec( + TFFeedPrefix + FeatureMap[I].name(), FeatureMap[I].shape())); append_range(InputSpecs, TrainingOnlyFeatures); return InputSpecs; } @@ -289,8 +289,7 @@ std::vector FT; for (size_t I = 0; I < NumberOfFeatures; ++I) - FT.push_back( - {TensorSpec::createSpec(FeatureNameMap.at(I), {1}), None}); + FT.push_back({FeatureMap.at(I), None}); if (MUTR && MUTR->outputLoggedFeatureSpecs().size() > 1) append_range(FT, drop_begin(MUTR->outputLoggedFeatureSpecs())); diff --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp --- a/llvm/lib/Analysis/MLInlineAdvisor.cpp +++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp @@ -37,7 +37,7 @@ llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) { auto AOTRunner = std::make_unique>( - M.getContext(), FeatureNameMap, DecisionName); + M.getContext(), FeatureMap, DecisionName); return std::make_unique(M, MAM, std::move(AOTRunner)); } #endif @@ -51,14 +51,14 @@ cl::init(2.0)); // clang-format off -const std::array llvm::FeatureNameMap{ +const std::array llvm::FeatureMap{ +#define POPULATE_NAMES(_, NAME) TensorSpec::createSpec(NAME, {1} ), // InlineCost features - these must come first -#define POPULATE_NAMES(INDEX_NAME, NAME) NAME, INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES) #undef POPULATE_NAMES // Non-cost features -#define POPULATE_NAMES(INDEX_NAME, NAME, COMMENT) NAME, +#define POPULATE_NAMES(_, NAME, __) TensorSpec::createSpec(NAME, {1} ), INLINE_FEATURE_ITERATOR(POPULATE_NAMES) #undef POPULATE_NAMES }; @@ -364,7 +364,7 @@ using namespace ore; OR << NV("Callee", Callee->getName()); for (size_t I = 0; I < NumberOfFeatures; ++I) - OR << NV(FeatureNameMap[I], + OR << NV(FeatureMap[I].name(), *getAdvisor()->getModelRunner().getTensor(I)); OR << NV("ShouldInline", isInliningRecommended()); } diff --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp --- a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp +++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp @@ -11,6 +11,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/Analysis/TensorSpec.h" #include "llvm/Config/config.h" #if defined(LLVM_HAVE_TF_API) @@ -22,7 +23,7 @@ LLVMContext &Ctx, const std::string &ModelPath, const std::vector &InputSpecs, const std::vector &OutputSpecs) - : MLModelRunner(Ctx, MLModelRunner::Kind::Development), + : MLModelRunner(Ctx, MLModelRunner::Kind::Development, InputSpecs.size()), OutputSpecs(OutputSpecs) { Evaluator = std::make_unique( ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; }, @@ -32,6 +33,10 @@ Evaluator.reset(); return; } + + for (size_t I = 0, E = InputSpecs.size(); I < E; ++I) { + setUpBufferForTensor(I, InputSpecs[I], Evaluator->getUntypedInput(I)); + } } void *ModelUnderTrainingRunner::evaluateUntyped() { @@ -43,24 +48,31 @@ return LastEvaluationResult->getUntypedTensorValue(0); } -void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) { - return Evaluator->getUntypedInput(Index); -} - std::unique_ptr ModelUnderTrainingRunner::createAndEnsureValid( LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName, const std::vector &InputSpecs, StringRef OutputSpecsPathOverride) { - std::unique_ptr MUTR; if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath, OutputSpecsPathOverride)) - MUTR.reset(new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs, - *MaybeOutputSpecs)); + return createAndEnsureValid(Ctx, ModelPath, DecisionName, InputSpecs, + *MaybeOutputSpecs); + Ctx.emitError("Could not load the policy model from the provided path"); + return nullptr; +} + +std::unique_ptr +ModelUnderTrainingRunner::createAndEnsureValid( + LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName, + const std::vector &InputSpecs, + const std::vector &OutputSpecs) { + std::unique_ptr MUTR; + MUTR.reset( + new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs, OutputSpecs)); if (MUTR && MUTR->isValid()) return MUTR; - Ctx.emitError("Could not load the policy model from the provided path"); + Ctx.emitError("Could not load or create model evaluator."); return nullptr; } diff --git a/llvm/lib/Analysis/NoInferenceModelRunner.cpp b/llvm/lib/Analysis/NoInferenceModelRunner.cpp --- a/llvm/lib/Analysis/NoInferenceModelRunner.cpp +++ b/llvm/lib/Analysis/NoInferenceModelRunner.cpp @@ -10,24 +10,14 @@ // logs for the default policy, in 'development' mode, but never ask it to // 'run'. //===----------------------------------------------------------------------===// -#include "llvm/Config/config.h" -#if defined(LLVM_HAVE_TF_API) - #include "llvm/Analysis/NoInferenceModelRunner.h" -#include "llvm/Analysis/Utils/TFUtils.h" using namespace llvm; NoInferenceModelRunner::NoInferenceModelRunner( LLVMContext &Ctx, const std::vector &Inputs) - : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp) { - ValuesBuffer.reserve(Inputs.size()); + : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp, Inputs.size()) { + size_t Index = 0; for (const auto &TS : Inputs) - ValuesBuffer.push_back(std::make_unique(TS.getElementCount() * - TS.getElementByteSize())); -} - -void *NoInferenceModelRunner::getTensorUntyped(size_t Index) { - return ValuesBuffer[Index].get(); + setUpBufferForTensor(Index++, TS, nullptr); } -#endif // defined(LLVM_HAVE_TF_API) diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -300,16 +300,29 @@ errs() << TF_Message(Status.get()); invalidate(); } + size_t NrSupported = 0; for (size_t I = 0; I < InputSpecs.size(); ++I) { auto &InputSpec = InputSpecs[I]; InputFeed[I] = { TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()), InputSpec.port()}; + if (!InputFeed[I].oper) { + continue; + } + if (NrSupported++ != I) { + errs() + << "Unsupported features must be placed at the end of the InputSpecs"; + invalidate(); + return; + } if (!checkReportAndInvalidate(InputFeed[I], InputSpec)) return; initInput(I, static_cast(getTFTypeIndex(InputSpec.type())), InputSpec.shape()); } + InputFeed.resize(NrSupported); + Input.resize(NrSupported); + for (size_t I = 0; I < OutputSpecsSize; ++I) { auto OutputSpec = GetOutputSpecs(I); OutputFeed[I] = { @@ -387,7 +400,9 @@ } void *TFModelEvaluator::getUntypedInput(size_t Index) { - return TF_TensorData(Impl->getInput()[Index]); + if (Index < Impl->getInput().size()) + return TF_TensorData(Impl->getInput()[Index]); + return nullptr; } TFModelEvaluator::EvaluationResult::EvaluationResult( diff --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp --- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp @@ -15,6 +15,7 @@ #include "RegAllocGreedy.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/MLModelRunner.h" +#include "llvm/Analysis/TensorSpec.h" #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TF_API) #include "llvm/Analysis/ModelUnderTrainingRunner.h" #include "llvm/Analysis/NoInferenceModelRunner.h" @@ -320,14 +321,16 @@ mutable DenseMap CachedFeatures; }; +#define _DECL_FEATURES(type, name, shape, _) \ + TensorSpec::createSpec(#name, shape), + +static const std::vector InputFeatures{ + {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}, +}; +#undef _DECL_FEATURES // =================================== // Release (AOT) - specifics // =================================== -const std::array FeatureNames{ -#define _GETNAME(_, NAME, __, ___) #NAME, - RA_EVICT_FEATURES_LIST(_GETNAME) -#undef _GETNAME -}; class ReleaseModeEvictionAdvisorAnalysis final : public RegAllocEvictionAdvisorAnalysis { public: @@ -349,7 +352,7 @@ getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override { if (!Runner) Runner = std::make_unique>( - MF.getFunction().getContext(), FeatureNames, DecisionName); + MF.getFunction().getContext(), InputFeatures, DecisionName); return std::make_unique( MF, RA, Runner.get(), getAnalysis(), getAnalysis()); @@ -363,13 +366,6 @@ // // Features we log #ifdef LLVM_HAVE_TF_API -#define _DECL_FEATURES(type, name, shape, _) \ - TensorSpec::createSpec(#name, shape), - -static const std::vector InputFeatures{ - {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)}, -}; -#undef _DECL_FEATURES static const TensorSpec Output = TensorSpec::createSpec(DecisionName, {1}); static const TensorSpec Reward = TensorSpec::createSpec("reward", {1}); diff --git a/llvm/unittests/Analysis/CMakeLists.txt b/llvm/unittests/Analysis/CMakeLists.txt --- a/llvm/unittests/Analysis/CMakeLists.txt +++ b/llvm/unittests/Analysis/CMakeLists.txt @@ -6,7 +6,7 @@ TransformUtils ) -set(MLGO_TESTS TFUtilsTest.cpp MLModelRunnerTest.cpp) +set(MLGO_TESTS TFUtilsTest.cpp) if (DEFINED LLVM_HAVE_TF_API) LIST(APPEND EXTRA_TESTS ${MLGO_TESTS}) else() @@ -39,6 +39,7 @@ LoopNestTest.cpp MemoryBuiltinsTest.cpp MemorySSATest.cpp + MLModelRunnerTest.cpp PhiValuesTest.cpp ProfileSummaryInfoTest.cpp ScalarEvolutionTest.cpp diff --git a/llvm/unittests/Analysis/MLModelRunnerTest.cpp b/llvm/unittests/Analysis/MLModelRunnerTest.cpp --- a/llvm/unittests/Analysis/MLModelRunnerTest.cpp +++ b/llvm/unittests/Analysis/MLModelRunnerTest.cpp @@ -8,10 +8,49 @@ #include "llvm/Analysis/MLModelRunner.h" #include "llvm/Analysis/NoInferenceModelRunner.h" +#include "llvm/Analysis/ReleaseModeModelRunner.h" #include "gtest/gtest.h" using namespace llvm; +namespace llvm { +// This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors +// of shape {1}, and 'evaluation' adds them. +// The interface is the one expected by ReleaseModelRunner. +class MockAOTModel final { + int64_t A = 0; + int64_t B = 0; + int64_t R = 0; + +public: + MockAOTModel() = default; + int LookupArgIndex(const std::string &Name) { + if (Name == "prefix_a") + return 0; + if (Name == "prefix_b") + return 1; + return -1; + } + int LookupResultIndex(const std::string &) { return 0; } + void Run() { R = A + B; } + void *result_data(int RIndex) { + if (RIndex == 0) + return &R; + return nullptr; + } + void *arg_data(int Index) { + switch (Index) { + case 0: + return &A; + case 1: + return &B; + default: + return nullptr; + } + } +}; +} // namespace llvm + TEST(NoInferenceModelRunner, AccessTensors) { const std::vector Inputs{ TensorSpec::createSpec("F1", {1}), @@ -30,4 +69,51 @@ ASSERT_EQ(NIMR.getTensor(0)[0], 1); ASSERT_EQ(NIMR.getTensor(1)[8], 9); ASSERT_EQ(NIMR.getTensor(2)[1], 0.2f); +} + +TEST(ReleaseModeRunner, NormalUse) { + LLVMContext Ctx; + std::vector Inputs{TensorSpec::createSpec("a", {1}), + TensorSpec::createSpec("b", {1})}; + auto Evaluator = std::make_unique>( + Ctx, Inputs, "", "prefix_"); + *Evaluator->getTensor(0) = 1; + *Evaluator->getTensor(1) = 2; + EXPECT_EQ(Evaluator->evaluate(), 3); + EXPECT_EQ(*Evaluator->getTensor(0), 1); + EXPECT_EQ(*Evaluator->getTensor(1), 2); +} + +TEST(ReleaseModeRunner, ExtraFeatures) { + LLVMContext Ctx; + std::vector Inputs{TensorSpec::createSpec("a", {1}), + TensorSpec::createSpec("b", {1}), + TensorSpec::createSpec("c", {1})}; + auto Evaluator = std::make_unique>( + Ctx, Inputs, "", "prefix_"); + *Evaluator->getTensor(0) = 1; + *Evaluator->getTensor(1) = 2; + *Evaluator->getTensor(2) = -3; + EXPECT_EQ(Evaluator->evaluate(), 3); + EXPECT_EQ(*Evaluator->getTensor(0), 1); + EXPECT_EQ(*Evaluator->getTensor(1), 2); + EXPECT_EQ(*Evaluator->getTensor(2), -3); +} + +TEST(ReleaseModeRunner, ExtraFeaturesOutOfOrder) { + LLVMContext Ctx; + std::vector Inputs{ + TensorSpec::createSpec("a", {1}), + TensorSpec::createSpec("c", {1}), + TensorSpec::createSpec("b", {1}), + }; + auto Evaluator = std::make_unique>( + Ctx, Inputs, "", "prefix_"); + *Evaluator->getTensor(0) = 1; // a + *Evaluator->getTensor(1) = 2; // c + *Evaluator->getTensor(2) = -3; // b + EXPECT_EQ(Evaluator->evaluate(), -2); // a + b + EXPECT_EQ(*Evaluator->getTensor(0), 1); + EXPECT_EQ(*Evaluator->getTensor(1), 2); + EXPECT_EQ(*Evaluator->getTensor(2), -3); } \ No newline at end of file diff --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp --- a/llvm/unittests/Analysis/TFUtilsTest.cpp +++ b/llvm/unittests/Analysis/TFUtilsTest.cpp @@ -10,6 +10,8 @@ #include "google/protobuf/struct.pb.h" #include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/feature.pb.h" +#include "llvm/Analysis/ModelUnderTrainingRunner.h" +#include "llvm/Analysis/TensorSpec.h" #include "llvm/AsmParser/Parser.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" @@ -102,6 +104,36 @@ EXPECT_FALSE(Evaluator.isValid()); } +TEST(TFUtilsTest, UnsupportedFeature) { + const static int64_t KnownSize = 214; + std::vector InputSpecs{ + TensorSpec::createSpec("serving_default_input_1", + {1, KnownSize}), + TensorSpec::createSpec("this_feature_does_not_exist", {2, 5})}; + + LLVMContext Ctx; + auto Evaluator = ModelUnderTrainingRunner::createAndEnsureValid( + Ctx, getModelPath(), "StatefulPartitionedCall", InputSpecs, + {LoggedFeatureSpec{ + TensorSpec::createSpec("StatefulPartitionedCall", {1}), + None}}); + int32_t *V = Evaluator->getTensor(0); + // Fill it up with 1s, we know the output. + for (auto I = 0; I < KnownSize; ++I) + V[I] = 1; + + float *F = Evaluator->getTensor(1); + for (auto I = 0; I < 2 * 5; ++I) + F[I] = 3.14 + I; + float Ret = Evaluator->evaluate(); + EXPECT_EQ(static_cast(Ret), 80); + // The input vector should be unchanged + for (auto I = 0; I < KnownSize; ++I) + EXPECT_EQ(V[I], 1); + for (auto I = 0; I < 2 * 5; ++I) + EXPECT_FLOAT_EQ(F[I], 3.14 + I); +} + #define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP) \ do { \ const auto &V = Expected.feature_lists() \