diff --git a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h --- a/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h +++ b/mlir/include/mlir/ExecutionEngine/ExecutionEngine.h @@ -34,7 +34,7 @@ namespace mlir { -class ModuleOp; +class Operation; /// A simple object cache following Lang's LLJITWithObjectCache example. class SimpleObjectCache : public llvm::ObjectCache { @@ -54,7 +54,7 @@ /// If `llvmModuleBuilder` is provided, it will be used to create LLVM module /// from the given MLIR module. Otherwise, a default `translateModuleToLLVMIR` /// function will be used to translate MLIR module to LLVM IR. - llvm::function_ref(ModuleOp, + llvm::function_ref(Operation *, llvm::LLVMContext &)> llvmModuleBuilder = nullptr; @@ -106,7 +106,7 @@ /// Creates an execution engine for the given module. static llvm::Expected> - create(ModuleOp m, const ExecutionEngineOptions &options = {}); + create(Operation *m, const ExecutionEngineOptions &options = {}); /// Looks up a packed-argument function wrapping the function with the given /// name and returns a pointer to it. Propagates errors in case of failure. diff --git a/mlir/include/mlir/ExecutionEngine/JitRunner.h b/mlir/include/mlir/ExecutionEngine/JitRunner.h --- a/mlir/include/mlir/ExecutionEngine/JitRunner.h +++ b/mlir/include/mlir/ExecutionEngine/JitRunner.h @@ -33,17 +33,18 @@ namespace mlir { class DialectRegistry; -class ModuleOp; +class Operation; struct LogicalResult; struct JitRunnerConfig { /// MLIR transformer applied after parsing the input into MLIR IR and before /// passing the MLIR module to the ExecutionEngine. - llvm::function_ref mlirTransformer = nullptr; + llvm::function_ref mlirTransformer = + nullptr; /// A custom function that is passed to ExecutionEngine. It processes MLIR /// module and creates LLVM IR module. - llvm::function_ref(ModuleOp, + llvm::function_ref(Operation *, llvm::LLVMContext &)> llvmModuleBuilder = nullptr; diff --git a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp --- a/mlir/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/mlir/lib/ExecutionEngine/ExecutionEngine.cpp @@ -232,7 +232,7 @@ } Expected> -ExecutionEngine::create(ModuleOp m, const ExecutionEngineOptions &options) { +ExecutionEngine::create(Operation *m, const ExecutionEngineOptions &options) { auto engine = std::make_unique( options.enableObjectCache, options.enableGDBNotificationListener, options.enablePerfNotificationListener); diff --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp --- a/mlir/lib/ExecutionEngine/JitRunner.cpp +++ b/mlir/lib/ExecutionEngine/JitRunner.cpp @@ -99,7 +99,7 @@ /// A custom function that is passed to ExecutionEngine. It processes MLIR /// module and creates LLVM IR module. - llvm::function_ref(ModuleOp, + llvm::function_ref(Operation *, llvm::LLVMContext &)> llvmModuleBuilder; @@ -111,8 +111,8 @@ } // namespace -static OwningOpRef parseMLIRInput(StringRef inputFilename, - MLIRContext *context) { +static OwningOpRef parseMLIRInput(StringRef inputFilename, + MLIRContext *context) { // Set up the input file. std::string errorMessage; auto file = openInputFile(inputFilename, &errorMessage); @@ -123,7 +123,14 @@ llvm::SourceMgr sourceMgr; sourceMgr.AddNewSourceBuffer(std::move(file), SMLoc()); - return parseSourceFile(sourceMgr, context); + OwningOpRef module = parseSourceFileForTool(sourceMgr, context); + if (!module) + return nullptr; + if (!module.get()->hasTrait()) { + llvm::errs() << "Error: top-level op must be a symbol table.\n"; + return nullptr; + } + return module; } static inline Error makeStringError(const Twine &message) { @@ -148,7 +155,7 @@ } // JIT-compile the given module and run "entryPoint" with "args" as arguments. -static Error compileAndExecute(Options &options, ModuleOp module, +static Error compileAndExecute(Options &options, Operation *module, StringRef entryPoint, CompileAndExecuteConfig config, void **args) { Optional jitCodeGenOptLevel; @@ -240,10 +247,11 @@ return Error::success(); } -static Error compileAndExecuteVoidFunction(Options &options, ModuleOp module, +static Error compileAndExecuteVoidFunction(Options &options, Operation *module, StringRef entryPoint, CompileAndExecuteConfig config) { - auto mainFunction = module.lookupSymbol(entryPoint); + auto mainFunction = dyn_cast_or_null( + SymbolTable::lookupSymbolIn(module, entryPoint)); if (!mainFunction || mainFunction.empty()) return makeStringError("entry point not found"); void *empty = nullptr; @@ -283,10 +291,11 @@ return Error::success(); } template -Error compileAndExecuteSingleReturnFunction(Options &options, ModuleOp module, +Error compileAndExecuteSingleReturnFunction(Options &options, Operation *module, StringRef entryPoint, CompileAndExecuteConfig config) { - auto mainFunction = module.lookupSymbol(entryPoint); + auto mainFunction = dyn_cast_or_null( + SymbolTable::lookupSymbolIn(module, entryPoint)); if (!mainFunction || mainFunction.isExternal()) return makeStringError("entry point not found"); @@ -320,6 +329,7 @@ // Create the options struct containing the command line options for the // runner. This must come before the command line options are parsed. Options options; + registerToolParserCLOptions(); llvm::cl::ParseCommandLineOptions(argc, argv, "MLIR CPU execution driver\n"); if (options.hostSupportsJit) { @@ -370,7 +380,7 @@ // Get the function used to compile and execute the module. using CompileAndExecuteFnT = - Error (*)(Options &, ModuleOp, StringRef, CompileAndExecuteConfig); + Error (*)(Options &, Operation *, StringRef, CompileAndExecuteConfig); auto compileAndExecuteFn = StringSwitch(options.mainFuncType.getValue()) .Case("i32", compileAndExecuteSingleReturnFunction) diff --git a/mlir/test/mlir-cpu-runner/invalid.mlir b/mlir/test/mlir-cpu-runner/invalid.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/mlir-cpu-runner/invalid.mlir @@ -0,0 +1,4 @@ +// RUN: not mlir-cpu-runner --no-implicit-module %s |& FileCheck %s + +// CHECK: Error: top-level op must be a symbol table. +llvm.func @main() diff --git a/mlir/tools/mlir-spirv-cpu-runner/mlir-spirv-cpu-runner.cpp b/mlir/tools/mlir-spirv-cpu-runner/mlir-spirv-cpu-runner.cpp --- a/mlir/tools/mlir-spirv-cpu-runner/mlir-spirv-cpu-runner.cpp +++ b/mlir/tools/mlir-spirv-cpu-runner/mlir-spirv-cpu-runner.cpp @@ -51,7 +51,10 @@ /// Each of these two modules is translated to LLVM IR module, then they are /// linked together and returned. static std::unique_ptr -convertMLIRModule(ModuleOp module, llvm::LLVMContext &context) { +convertMLIRModule(Operation *op, llvm::LLVMContext &context) { + auto module = dyn_cast(op); + if (!module) + return op->emitError("op must be a 'builtin.module"), nullptr; // Verify that there is only one nested module. auto modules = module.getOps(); if (!llvm::hasSingleElement(modules)) { @@ -71,8 +74,9 @@ return mainModule; } -static LogicalResult runMLIRPasses(ModuleOp module) { - PassManager passManager(module.getContext()); +static LogicalResult runMLIRPasses(Operation *module) { + PassManager passManager(module->getContext(), + module->getName().getStringRef()); applyPassManagerCLOptions(passManager); passManager.addPass(createGpuKernelOutliningPass()); passManager.addPass(createConvertGPUToSPIRVPass(/*mapMemorySpace=*/true));