diff --git a/mlir/include/mlir-c/ExecutionEngine.h b/mlir/include/mlir-c/ExecutionEngine.h --- a/mlir/include/mlir-c/ExecutionEngine.h +++ b/mlir/include/mlir-c/ExecutionEngine.h @@ -38,10 +38,13 @@ /// ownership stays with the client and can be destroyed as soon as the call /// returns. `optLevel` is the optimization level to be used for transformation /// and code generation. LLVM passes at `optLevel` are run before code -/// generation. +/// generation. The number and array of paths corresponding to shared libraries +/// that will be loaded are specified via `numPaths` and `sharedLibPaths` +/// respectively. /// TODO: figure out other options. -MLIR_CAPI_EXPORTED MlirExecutionEngine mlirExecutionEngineCreate(MlirModule op, - int optLevel); +MLIR_CAPI_EXPORTED MlirExecutionEngine +mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths, + const MlirStringRef *sharedLibPaths); /// Destroy an ExecutionEngine instance. MLIR_CAPI_EXPORTED void mlirExecutionEngineDestroy(MlirExecutionEngine jit); diff --git a/mlir/lib/Bindings/Python/ExecutionEngine.cpp b/mlir/lib/Bindings/Python/ExecutionEngine.cpp --- a/mlir/lib/Bindings/Python/ExecutionEngine.cpp +++ b/mlir/lib/Bindings/Python/ExecutionEngine.cpp @@ -59,20 +59,26 @@ // Mapping of the top-level PassManager //---------------------------------------------------------------------------- py::class_(m, "ExecutionEngine") - .def(py::init<>([](PyModule &module, int optLevel) { - MlirExecutionEngine executionEngine = - mlirExecutionEngineCreate(module.get(), optLevel); + .def(py::init<>([](PyModule &module, int optLevel, + const std::vector &sharedLibPaths) { + llvm::SmallVector libPaths; + for (const std::string &path : sharedLibPaths) + libPaths.push_back({path.c_str(), path.length()}); + MlirExecutionEngine executionEngine = mlirExecutionEngineCreate( + module.get(), optLevel, libPaths.size(), libPaths.data()); if (mlirExecutionEngineIsNull(executionEngine)) throw std::runtime_error( "Failure while creating the ExecutionEngine."); return new PyExecutionEngine(executionEngine); }), py::arg("module"), py::arg("opt_level") = 2, + py::arg("shared_libs") = py::list(), "Create a new ExecutionEngine instance for the given Module. The " "module must contain only dialects that can be translated to LLVM. " "Perform transformations and code generation at the optimization " "level `opt_level` if specified, or otherwise at the default " - "level of two (-O2).") + "level of two (-O2). Load a list of libraries specified in " + "`shared_libs`.") .def_property_readonly(MLIR_PYTHON_CAPI_PTR_ATTR, &PyExecutionEngine::getCapsule) .def("_testing_release", &PyExecutionEngine::release, diff --git a/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp --- a/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp +++ b/mlir/lib/CAPI/ExecutionEngine/ExecutionEngine.cpp @@ -17,8 +17,9 @@ using namespace mlir; -extern "C" MlirExecutionEngine mlirExecutionEngineCreate(MlirModule op, - int optLevel) { +extern "C" MlirExecutionEngine +mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths, + const MlirStringRef *sharedLibPaths) { static bool initOnce = [] { llvm::InitializeNativeTarget(); llvm::InitializeNativeTargetAsmPrinter(); @@ -39,13 +40,18 @@ return MlirExecutionEngine{nullptr}; } + SmallVector libPaths; + for (unsigned i = 0; i < static_cast(numPaths); ++i) + libPaths.push_back(sharedLibPaths[i].data); + // Create a transformer to run all LLVM optimization passes at the // specified optimization level. auto llvmOptLevel = static_cast(optLevel); auto transformer = mlir::makeLLVMPassesTransformer( /*passes=*/{}, llvmOptLevel, /*targetMachine=*/tmOrError->get()); - auto jitOrError = ExecutionEngine::create( - unwrap(op), /*llvmModuleBuilder=*/{}, transformer, llvmOptLevel); + auto jitOrError = + ExecutionEngine::create(unwrap(op), /*llvmModuleBuilder=*/{}, transformer, + llvmOptLevel, libPaths); if (!jitOrError) { consumeError(jitOrError.takeError()); return MlirExecutionEngine{nullptr}; diff --git a/mlir/test/CAPI/execution_engine.c b/mlir/test/CAPI/execution_engine.c --- a/mlir/test/CAPI/execution_engine.c +++ b/mlir/test/CAPI/execution_engine.c @@ -48,7 +48,8 @@ // clang-format on lowerModuleToLLVM(ctx, module); mlirRegisterAllLLVMTranslations(ctx); - MlirExecutionEngine jit = mlirExecutionEngineCreate(module, /*optLevel=*/2); + MlirExecutionEngine jit = mlirExecutionEngineCreate( + module, /*optLevel=*/2, /*numPaths=*/0, /*sharedLibPaths=*/NULL); if (mlirExecutionEngineIsNull(jit)) { fprintf(stderr, "Execution engine creation failed"); exit(2); diff --git a/mlir/test/python/execution_engine.py b/mlir/test/python/execution_engine.py --- a/mlir/test/python/execution_engine.py +++ b/mlir/test/python/execution_engine.py @@ -219,7 +219,7 @@ run(testRankedMemRefCallback) -# Test addition of two memref +# Test addition of two memrefs. # CHECK-LABEL: TEST: testMemrefAdd def testMemrefAdd(): with Context(): @@ -308,3 +308,34 @@ log(np.allclose(arg1+arg2, res)) run(testDynamicMemrefAdd2D) + +# Test loading of shared libraries. +# CHECK-LABEL: TEST: testSharedLibLoad +def testSharedLibLoad(): + with Context(): + module = Module.parse( + """ + module { + func @main(%arg0: memref<1xf32>) attributes { llvm.emit_c_interface } { + %c0 = constant 0 : index + %cst42 = constant 42.0 : f32 + memref.store %cst42, %arg0[%c0] : memref<1xf32> + %u_memref = memref.cast %arg0 : memref<1xf32> to memref<*xf32> + call @print_memref_f32(%u_memref) : (memref<*xf32>) -> () + return + } + func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } + } """ + ) + arg0 = np.array([0.0]).astype(np.float32) + + arg0_memref_ptr = ctypes.pointer(ctypes.pointer(get_ranked_memref_descriptor(arg0))) + + execution_engine = ExecutionEngine(lowerToLLVM(module), opt_level=3, + shared_libs=["../../../../lib/libmlir_runner_utils.so", + "../../../../lib/libmlir_c_runner_utils.so"]) + execution_engine.invoke("main", arg0_memref_ptr) + # CHECK: Unranked Memref + # CHECK-NEXT: [42] + +run(testSharedLibLoad)