diff --git a/mlir/include/mlir-c/ExecutionEngine.h b/mlir/include/mlir-c/ExecutionEngine.h --- a/mlir/include/mlir-c/ExecutionEngine.h +++ b/mlir/include/mlir-c/ExecutionEngine.h @@ -42,9 +42,9 @@ /// that will be loaded are specified via `numPaths` and `sharedLibPaths` /// respectively. /// TODO: figure out other options. -MLIR_CAPI_EXPORTED MlirExecutionEngine -mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths, - const MlirStringRef *sharedLibPaths); +MLIR_CAPI_EXPORTED MlirExecutionEngine mlirExecutionEngineCreate( + MlirModule op, int optLevel, int numPaths, + const MlirStringRef *sharedLibPaths, bool enableObjectDump); /// Destroy an ExecutionEngine instance. MLIR_CAPI_EXPORTED void mlirExecutionEngineDestroy(MlirExecutionEngine jit); diff --git a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h --- a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h +++ b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h @@ -46,6 +46,9 @@ /// Dump cached object to output file `filename`. void dumpToObjectFile(StringRef filename); + /// Returns `true` if cache hasn't been populated yet. + bool isEmpty(); + private: llvm::StringMap> cachedObjects; }; @@ -77,8 +80,8 @@ /// If `enableObjectCache` is set, the JIT compiler will create one to store /// the object generated for the given module. The contents of the cache can - /// be dumped to a file via the `dumpToObjectfile` method. - bool enableObjectCache = false; + /// be dumped to a file via the `dumpToObjectFile` method. + bool enableObjectDump = false; /// If enable `enableGDBNotificationListener` is set, the JIT compiler will /// notify the llvm's global GDB notification listener. @@ -101,7 +104,7 @@ /// be used to invoke the JIT-compiled function. class ExecutionEngine { public: - ExecutionEngine(bool enableObjectCache, bool enableGDBNotificationListener, + ExecutionEngine(bool enableObjectDump, bool enableGDBNotificationListener, bool enablePerfNotificationListener); /// Creates an execution engine for the given MLIR IR. @@ -199,6 +202,9 @@ /// Underlying cache. std::unique_ptr cache; + /// Names of functions that may be looked up. + std::vector functionNames; + /// GDB notification listener. llvm::JITEventListener *gdbListener; diff --git a/mlir/lib/Bindings/Python/ExecutionEngineModule.cpp b/mlir/lib/Bindings/Python/ExecutionEngineModule.cpp --- a/mlir/lib/Bindings/Python/ExecutionEngineModule.cpp +++ b/mlir/lib/Bindings/Python/ExecutionEngineModule.cpp @@ -72,12 +72,14 @@ //---------------------------------------------------------------------------- py::class_(m, "ExecutionEngine", py::module_local()) .def(py::init<>([](MlirModule module, int optLevel, - const std::vector &sharedLibPaths) { + const std::vector &sharedLibPaths, + bool enableObjectDump) { llvm::SmallVector libPaths; for (const std::string &path : sharedLibPaths) libPaths.push_back({path.c_str(), path.length()}); - MlirExecutionEngine executionEngine = mlirExecutionEngineCreate( - module, optLevel, libPaths.size(), libPaths.data()); + MlirExecutionEngine executionEngine = + mlirExecutionEngineCreate(module, optLevel, libPaths.size(), + libPaths.data(), enableObjectDump); if (mlirExecutionEngineIsNull(executionEngine)) throw std::runtime_error( "Failure while creating the ExecutionEngine."); @@ -85,6 +87,7 @@ }), py::arg("module"), py::arg("opt_level") = 2, py::arg("shared_libs") = py::list(), + py::arg("enable_object_dump") = true, "Create a new ExecutionEngine instance for the given Module. The " "module must contain only dialects that can be translated to LLVM. " "Perform transformations and code generation at the optimization " diff --git a/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp --- a/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp +++ b/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp @@ -19,7 +19,8 @@ extern "C" MlirExecutionEngine mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths, - const MlirStringRef *sharedLibPaths) { + const MlirStringRef *sharedLibPaths, + bool enableObjectDump) { static bool initOnce = [] { llvm::InitializeNativeTarget(); llvm::InitializeNativeTargetAsmParser(); // needed for inline_asm @@ -54,6 +55,7 @@ jitOptions.transformer = transformer; jitOptions.jitCodeGenOptLevel = llvmOptLevel; jitOptions.sharedLibPaths = libPaths; + jitOptions.enableObjectDump = enableObjectDump; auto jitOrError = ExecutionEngine::create(unwrap(op), jitOptions); if (!jitOrError) { consumeError(jitOrError.takeError()); diff --git a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp --- a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp @@ -96,12 +96,27 @@ file->keep(); } +bool SimpleObjectCache::isEmpty() { return cachedObjects.size() == 0; } + void ExecutionEngine::dumpToObjectFile(StringRef filename) { if (cache == nullptr) { llvm::errs() << "cannot dump ExecutionEngine object code to file: " "object cache is disabled\n"; return; } + // Compilation is lazy and it doesn't populate object cache unless requested. + // In case object dump is requested before cache is populated, we need to + // force compilation manually. + if (cache->isEmpty()) { + for (std::string &functionName : functionNames) { + auto result = lookupPacked(functionName); + if (!result) { + llvm::errs() << "Could not compile " << functionName << ":\n " + << result.takeError() << "\n"; + return; + } + } + } cache->dumpToObjectFile(filename); } @@ -214,10 +229,11 @@ } } -ExecutionEngine::ExecutionEngine(bool enableObjectCache, +ExecutionEngine::ExecutionEngine(bool enableObjectDump, bool enableGDBNotificationListener, bool enablePerfNotificationListener) - : cache(enableObjectCache ? new SimpleObjectCache() : nullptr), + : cache(enableObjectDump ? new SimpleObjectCache() : nullptr), + functionNames(), gdbListener(enableGDBNotificationListener ? llvm::JITEventListener::createGDBRegistrationListener() : nullptr), @@ -234,9 +250,17 @@ Expected> ExecutionEngine::create(Operation *m, const ExecutionEngineOptions &options) { auto engine = std::make_unique( - options.enableObjectCache, options.enableGDBNotificationListener, + options.enableObjectDump, options.enableGDBNotificationListener, options.enablePerfNotificationListener); + // Remember all entry-points if object dumping is enabled. + if (options.enableObjectDump) { + for (auto funcOp : m->getRegion(0).getOps()) { + StringRef funcName = funcOp.getSymName(); + engine->functionNames.push_back(funcName.str()); + } + } + std::unique_ptr ctx(new llvm::LLVMContext); auto llvmModule = options.llvmModuleBuilder ? options.llvmModuleBuilder(m, *ctx) diff --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp --- a/mlir/lib/ExecutionEngine/JitRunner.cpp +++ b/mlir/lib/ExecutionEngine/JitRunner.cpp @@ -229,7 +229,7 @@ engineOptions.transformer = config.transformer; engineOptions.jitCodeGenOptLevel = jitCodeGenOptLevel; engineOptions.sharedLibPaths = executionEngineLibs; - engineOptions.enableObjectCache = true; + engineOptions.enableObjectDump = true; auto expectedEngine = mlir::ExecutionEngine::create(module, engineOptions); if (!expectedEngine) return expectedEngine.takeError(); diff --git a/mlir/test/CAPI/execution_engine.c b/mlir/test/CAPI/execution_engine.c --- a/mlir/test/CAPI/execution_engine.c +++ b/mlir/test/CAPI/execution_engine.c @@ -63,7 +63,8 @@ lowerModuleToLLVM(ctx, module); mlirRegisterAllLLVMTranslations(ctx); MlirExecutionEngine jit = mlirExecutionEngineCreate( - module, /*optLevel=*/2, /*numPaths=*/0, /*sharedLibPaths=*/NULL); + module, /*optLevel=*/2, /*numPaths=*/0, /*sharedLibPaths=*/NULL, + /*enableObjectDump=*/false); if (mlirExecutionEngineIsNull(jit)) { fprintf(stderr, "Execution engine creation failed"); exit(2); diff --git a/mlir/test/python/execution_engine.py b/mlir/test/python/execution_engine.py --- a/mlir/test/python/execution_engine.py +++ b/mlir/test/python/execution_engine.py @@ -1,6 +1,6 @@ # RUN: %PYTHON %s 2>&1 | FileCheck %s # REQUIRES: host-supports-jit -import gc, sys +import gc, sys, os, tempfile from mlir.ir import * from mlir.passmanager import * from mlir.execution_engine import * @@ -552,3 +552,40 @@ run(testNanoTime) + + +# Test that nano time clock is available. +# CHECK-LABEL: TEST: testDumpToObjectFile +def testDumpToObjectFile(): + _, object_path = tempfile.mkstemp(suffix=".o") + + try: + with Context(): + module = Module.parse(""" + module { + func.func @main() attributes { llvm.emit_c_interface } { + return + } + }""") + + execution_engine = ExecutionEngine( + lowerToLLVM(module), + opt_level=3) + + # CHECK: Object file exists: True + print(f"Object file exists: {os.path.exists(object_path)}") + # CHECK: Object file is empty: True + print(f"Object file is empty: {os.path.getsize(object_path) == 0}") + + execution_engine.dump_to_object_file(object_path) + + # CHECK: Object file exists: True + print(f"Object file exists: {os.path.exists(object_path)}") + # CHECK: Object file is empty: False + print(f"Object file is empty: {os.path.getsize(object_path) == 0}") + + finally: + os.remove(object_path) + + +run(testDumpToObjectFile)