diff --git a/llvm/include/llvm/Analysis/EmitCModelRegistry.h b/llvm/include/llvm/Analysis/EmitCModelRegistry.h --- a/llvm/include/llvm/Analysis/EmitCModelRegistry.h +++ b/llvm/include/llvm/Analysis/EmitCModelRegistry.h @@ -6,6 +6,16 @@ // //===----------------------------------------------------------------------===// // +// This file implements a registry for EmitC generated models. The idea is that +// generated models register themselves here, and then optimization passes can +// look up each model by the generated string. This separates concerns between +// people who want to integrate new models for existing ML optimization passes +// (ml inline -Oz, for example) and people who want to expose new passes to ML. +// +// The normal case should be that EmitC models should be selected via a command +// line flag, whose string value is passed to the registry as a lookup. +// +//===----------------------------------------------------------------------===// #ifndef LLVM_ANALYSIS_EMITCMODELREGISTRY_H #define LLVM_ANALYSIS_EMITCMODELREGISTRY_H @@ -16,6 +26,9 @@ namespace llvm { +// Meyer singleton representing the registry. There will be one instance of the +// registry for each ModelT type, which represents the interface for a +// particular model (inlining, regalloc, etc). template class EmitCModelRegistry { public: static EmitCModelRegistry &get() { @@ -27,6 +40,7 @@ Models[Model->name()] = std::move(Model); } + // It is up to the consumer to handle the case where nullptr is returned. ModelT *getModel(const std::string &Name) { auto itr = Models.find(Name); if (itr == std::end(Models)) { @@ -41,6 +55,8 @@ std::unordered_map> Models; }; +// Helper class whose constructor performs a model registration. Constructing +// an object of this type is all you need to do to register the model. template class EmitCModelRegistrationHandle { public: EmitCModelRegistrationHandle(std::unique_ptr Model) { @@ -49,6 +65,7 @@ }; } // namespace llvm +// Macro which simplifies registering models with the registry. #define REGISTER_EMITC_MODEL(BaseModelType, LocalModelType) \ namespace { \ llvm::EmitCModelRegistrationHandle _handle_##LocalModelType( \ diff --git a/llvm/include/llvm/Analysis/MLInlineEmitCModel.h b/llvm/include/llvm/Analysis/MLInlineEmitCModel.h --- a/llvm/include/llvm/Analysis/MLInlineEmitCModel.h +++ b/llvm/include/llvm/Analysis/MLInlineEmitCModel.h @@ -17,9 +17,11 @@ namespace llvm { -// This is the base class +// This is the base class for all EmitC-generated models for the inlining -Oz +// problem. class MLInlineOzEmitCModel { public: + // Define a setter method for each input field #define DEFINE_SETTER(cpp_name, py_name, _) \ virtual void set_##py_name(emitc::Tensor) = 0; @@ -27,13 +29,18 @@ INLINE_COST_FEATURE_ITERATOR(DEFINE_SETTER); #undef FEATURE_SETTER + // These setters represent fields in every EmitC-generated model. We include + // them here for completeness, but they are not pure-virtual because they are + // not strictly necessary. virtual void set_inlining_default(emitc::Tensor x) {} virtual void set_step_type(emitc::Tensor x) {} virtual void set_discount(emitc::Tensor x) {} virtual void set_reward(emitc::Tensor x) {} + // Name of the model: this is used when inserting models into the registry virtual std::string name() = 0; + // Run the model virtual emitc::Tensor run() = 0; }; diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -148,8 +148,9 @@ VFABIDemangling.cpp ${GeneratedMLSources} - models/emitc/mymodel.emitc.cpp + # Start EmitC-generated files models/emitc/InlineOzTestModel.emitc.cpp + # End EmitC-generated files ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Analysis diff --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp --- a/llvm/lib/Analysis/MLInlineAdvisor.cpp +++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp @@ -41,7 +41,7 @@ using namespace llvm; static cl::opt MLInlineEmitCModelName( - "inliner-emitc-model-name", cl::Hidden, cl::desc("")); + "inliner-emitc-model-name", cl::Hidden, cl::desc("Name of the model to use for the ml inlining advisor.")); static cl::opt InteractiveChannelBaseName( "inliner-interactive-channel-base", cl::Hidden, @@ -50,7 +50,6 @@ "have the name .in, while the " "outgoing name should be .out")); - #if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL) // codegen-ed file #include "InlinerSizeModel.h" // NOLINT diff --git a/llvm/lib/Analysis/MLInlinerEmitCRunner.h b/llvm/lib/Analysis/MLInlinerEmitCRunner.h --- a/llvm/lib/Analysis/MLInlinerEmitCRunner.h +++ b/llvm/lib/Analysis/MLInlinerEmitCRunner.h @@ -19,6 +19,9 @@ namespace llvm { +// Temporary convenience method to convert between raw memory buffers and +// emitc::Tensor types. This process can be optimized, but for now it is an +// easy solution. template emitc::Tensor convertBufferToEmitCTensor(void *Buffer, TensorSpec Spec) { @@ -39,6 +42,7 @@ setUpBufferForTensor(I, Inputs[I], nullptr); } + // Look up required model from the registry Model = EmitCModelRegistry::get().getModel(ModelName); if (!Model) { Ctx.emitError("The requested model [" + ModelName + diff --git a/llvm/lib/Analysis/models/emitc/PluginInfo.cpp b/llvm/lib/Analysis/models/emitc/PluginInfo.cpp deleted file mode 100644 --- a/llvm/lib/Analysis/models/emitc/PluginInfo.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "MLInlinerSizeModel.h" -#include "llvm/Passes/PassPlugin.h" - -namespace { -uint32_t APIVersion = 0; -const char *PluginName = "EmitCModels"; -const char *PluginVersion = "0.0"; -} // namespace - -void RegisterPassBuilderCallback(llvm::PassBuilder &PB) { - // Do nothing... -} - -llvm::PassPluginLibraryInfo getEmitCModelsPluginInfo() { - return llvm::PassPluginLibraryInfo{.APIVersion = 0, - .PluginName = "EmitCModels", - .PluginVersion = "0.0", - .RegisterPassBuilderCallbacks = - &RegisterPassBuilderCallback}; -} diff --git a/llvm/lib/Analysis/models/gen-inline-oz-test-model.py b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py --- a/llvm/lib/Analysis/models/gen-inline-oz-test-model.py +++ b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py @@ -21,7 +21,7 @@ "port": 0, "type": "int64_t", "shape": [ - 1, + 1 ] } } @@ -34,8 +34,7 @@ """Returns the list of features for LLVM inlining.""" # int64 features inputs = [ - tf.TensorSpec(dtype=tf.int64, shape=(1,), name=key) - for key in [ + tf.TensorSpec(dtype=tf.int64, shape=(), name=key) for key in [ 'caller_basic_block_count', 'caller_conditionally_executed_blocks', 'caller_users', @@ -76,20 +75,16 @@ ] # float32 features - inputs.extend( - [ - tf.TensorSpec(dtype=tf.float32, shape=(1,), name=key) - for key in ['discount', 'reward'] - ] - ) + inputs.extend([ + tf.TensorSpec(dtype=tf.float32, shape=(), name=key) + for key in ['discount', 'reward'] + ]) # int32 features - inputs.extend( - [ - tf.TensorSpec(dtype=tf.int32, shape=(1,), name=key) - for key in ['step_type'] - ] - ) + inputs.extend([ + tf.TensorSpec(dtype=tf.int32, shape=(), name=key) + for key in ['step_type'] + ]) return inputs @@ -100,7 +95,6 @@ def get_output_spec(): return POLICY_OUTPUT_SPEC - def get_output_spec_path(path): return os.path.join(path, 'output_spec.json') @@ -108,11 +102,8 @@ def build_mock_model(path, signature): """Build and save the mock model with the given signature""" module = tf.Module() - def action(*inputs): - return { - signature['output']: tf.constant(value=1, shape=(1,), dtype=tf.int64) - } + return {signature['output']: tf.constant(value=1, dtype=tf.int64)} module.action = tf.function()(action) action = {'action': module.action.get_concrete_function(signature['inputs'])} @@ -128,7 +119,7 @@ return { 'inputs': get_input_signature(), 'output': get_output_signature(), - 'output_spec': get_output_spec(), + 'output_spec': get_output_spec() } diff --git a/llvm/test/Transforms/Inline/ML/ml-test-emitc-mode.ll b/llvm/test/Transforms/Inline/ML/ml-test-emitc-mode.ll --- a/llvm/test/Transforms/Inline/ML/ml-test-emitc-mode.ll +++ b/llvm/test/Transforms/Inline/ML/ml-test-emitc-mode.ll @@ -1,9 +1,6 @@ -; The default inliner doesn't elide @adder, it believes it's too costly to inline -; adder into switcher. The ML inliner carries out that inlining, resulting in -; a smaller result (part of it is that adder gets elided). +; This test uses Inputs/test-module.ll, as it shares it with a similar test +; for the 'development' and 'release' mode. The InlineOzTestModel inlines +; everything. ; -; This test uses Inputs/test-module.ll, as it will share it with a similar test -; for the 'development' and 'release' mode. -; -; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=emitc -inliner-emitc-model-name=mymodel -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK +; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=emitc -inliner-emitc-model-name=InlineOzTestModel -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK ; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=default -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=DEFAULT