diff --git a/flang/include/flang/Lower/CallInterface.h b/flang/include/flang/Lower/CallInterface.h --- a/flang/include/flang/Lower/CallInterface.h +++ b/flang/include/flang/Lower/CallInterface.h @@ -29,6 +29,7 @@ #include "flang/Common/reference.h" #include "flang/Evaluate/characteristics.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinOps.h" #include #include diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h --- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h +++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h @@ -19,6 +19,7 @@ #include "flang/Optimizer/Dialect/FIROps.h" #include "flang/Optimizer/Dialect/FIRType.h" #include "flang/Optimizer/Support/KindMapping.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" diff --git a/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h b/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h --- a/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h +++ b/flang/include/flang/Optimizer/Builder/LowLevelIntrinsics.h @@ -14,8 +14,10 @@ #define FLANG_OPTIMIZER_BUILDER_LOWLEVELINTRINSICS_H namespace mlir { +namespace func { class FuncOp; -} +} // namespace func +} // namespace mlir namespace fir { class FirOpBuilder; } @@ -23,10 +25,10 @@ namespace fir::factory { /// Get the `llvm.stacksave` intrinsic. -mlir::FuncOp getLlvmStackSave(FirOpBuilder &builder); +mlir::func::FuncOp getLlvmStackSave(FirOpBuilder &builder); /// Get the `llvm.stackrestore` intrinsic. -mlir::FuncOp getLlvmStackRestore(FirOpBuilder &builder); +mlir::func::FuncOp getLlvmStackRestore(FirOpBuilder &builder); } // namespace fir::factory diff --git a/flang/test/Fir/Todo/boxproc_host.fir b/flang/test/Fir/Todo/boxproc_host.fir --- a/flang/test/Fir/Todo/boxproc_host.fir +++ b/flang/test/Fir/Todo/boxproc_host.fir @@ -3,7 +3,7 @@ // Test that `fir.boxproc_host` fails conversion to llvm. // At the moment this test fails since `fir.boxproc` type does not have a conversion. -// CHECK: failed to legalize operation 'builtin.func' +// CHECK: failed to legalize operation 'func.func' func @test(%bproc: !fir.boxproc<(i32) -> ()>) { %tuple = fir.boxproc_host %bproc : (!fir.boxproc<(i32) -> ()>) -> (!fir.ref>) return diff --git a/flang/test/Fir/Todo/unboxproc.fir b/flang/test/Fir/Todo/unboxproc.fir --- a/flang/test/Fir/Todo/unboxproc.fir +++ b/flang/test/Fir/Todo/unboxproc.fir @@ -4,7 +4,7 @@ // Not implemented yet. // Currently fails since coversion for boxproc type is not implemented. -// CHECK: failed to legalize operation 'builtin.func' +// CHECK: failed to legalize operation 'func.func' func @boxing_match(%bproc: !fir.boxproc<(i32) -> ()>) { %ubproc:2 = fir.unboxproc %bproc : (!fir.boxproc<(i32) -> ()>) -> ((i32) -> (), !fir.ref>) return diff --git a/flang/unittests/Optimizer/Builder/CharacterTest.cpp b/flang/unittests/Optimizer/Builder/CharacterTest.cpp --- a/flang/unittests/Optimizer/Builder/CharacterTest.cpp +++ b/flang/unittests/Optimizer/Builder/CharacterTest.cpp @@ -16,6 +16,8 @@ struct CharacterTest : public testing::Test { public: void SetUp() override { + fir::support::loadDialects(context); + kindMap = std::make_unique(&context, "i10:80,l3:24,a1:8,r54:Double,c20:X86_FP80,r11:PPC_FP128," "r12:FP128,r13:X86_FP80,r14:Double,r15:Float,r16:Half,r23:BFloat"); @@ -31,7 +33,6 @@ mod.push_back(mod); builder.setInsertionPointToStart(entryBlock); - fir::support::loadDialects(context); firBuilder = std::make_unique(mod, *kindMap); } diff --git a/flang/unittests/Optimizer/Builder/ComplexTest.cpp b/flang/unittests/Optimizer/Builder/ComplexTest.cpp --- a/flang/unittests/Optimizer/Builder/ComplexTest.cpp +++ b/flang/unittests/Optimizer/Builder/ComplexTest.cpp @@ -15,6 +15,8 @@ struct ComplexTest : public testing::Test { public: void SetUp() override { + fir::support::loadDialects(context); + mlir::OpBuilder builder(&context); auto loc = builder.getUnknownLoc(); @@ -27,7 +29,6 @@ mod.push_back(mod); builder.setInsertionPointToStart(entryBlock); - fir::support::loadDialects(context); kindMap = std::make_unique(&context); firBuilder = std::make_unique(mod, *kindMap); helper = std::make_unique(*firBuilder, loc); diff --git a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp --- a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp +++ b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp @@ -17,6 +17,8 @@ struct FIRBuilderTest : public testing::Test { public: void SetUp() override { + fir::support::loadDialects(context); + llvm::ArrayRef defs; fir::KindMapping kindMap(&context, defs); mlir::OpBuilder builder(&context); @@ -31,7 +33,6 @@ mod.push_back(mod); builder.setInsertionPointToStart(entryBlock); - fir::support::loadDialects(context); firBuilder = std::make_unique(mod, kindMap); } diff --git a/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h --- a/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h +++ b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h @@ -17,6 +17,8 @@ struct RuntimeCallTest : public testing::Test { public: void SetUp() override { + fir::support::loadDialects(context); + mlir::OpBuilder builder(&context); auto loc = builder.getUnknownLoc(); @@ -29,7 +31,6 @@ mod.push_back(mod); builder.setInsertionPointToStart(entryBlock); - fir::support::loadDialects(context); kindMap = std::make_unique(&context); firBuilder = std::make_unique(mod, *kindMap); diff --git a/mlir/benchmark/python/benchmark_sparse.py b/mlir/benchmark/python/benchmark_sparse.py --- a/mlir/benchmark/python/benchmark_sparse.py +++ b/mlir/benchmark/python/benchmark_sparse.py @@ -43,7 +43,7 @@ param2_type = ir.RankedTensorType.get([1500, 2000], f64) result_type = ir.RankedTensorType.get([1000, 2000], f64) with ir.InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(param1_type, param2_type, result_type) + @func.FuncOp.from_py_func(param1_type, param2_type, result_type) def sparse_kernel(x, y, z): return matmul_dsl(x, y, outs=[z]) diff --git a/mlir/benchmark/python/common.py b/mlir/benchmark/python/common.py --- a/mlir/benchmark/python/common.py +++ b/mlir/benchmark/python/common.py @@ -41,7 +41,7 @@ return tensor -def get_kernel_func_from_module(module: ir.Module) -> builtin.FuncOp: +def get_kernel_func_from_module(module: ir.Module) -> func.FuncOp: """Takes an mlir module object and extracts the function object out of it. This function only works for a module with one region, one block, and one operation. @@ -55,12 +55,12 @@ return module.operation.regions[0].blocks[0].operations[0] -def emit_timer_func() -> builtin.FuncOp: +def emit_timer_func() -> func.FuncOp: """Returns the declaration of nano_time function. If nano_time function is used, the `MLIR_RUNNER_UTILS` and `MLIR_C_RUNNER_UTILS` must be included. """ i64_type = ir.IntegerType.get_signless(64) - nano_time = builtin.FuncOp( + nano_time = func.FuncOp( "nano_time", ([], [i64_type]), visibility="private") nano_time.attributes["llvm.emit_c_interface"] = ir.UnitAttr.get() return nano_time @@ -76,7 +76,7 @@ """ i64_type = ir.IntegerType.get_signless(64) memref_of_i64_type = ir.MemRefType.get([-1], i64_type) - wrapped_func = builtin.FuncOp( + wrapped_func = func.FuncOp( # Same signature and an extra buffer of indices to save timings. "main", (func.arguments.types + [memref_of_i64_type], func.type.results), diff --git a/mlir/docs/Bindings/Python.md b/mlir/docs/Bindings/Python.md --- a/mlir/docs/Bindings/Python.md +++ b/mlir/docs/Bindings/Python.md @@ -639,7 +639,7 @@ with Context(): module = Module.create() with InsertionPoint(module.body), Location.unknown(): - func = builtin.FuncOp("main", ([], [])) + func = func.FuncOp("main", ([], [])) ``` Also see below for constructors generated from ODS. @@ -660,12 +660,12 @@ with InsertionPoint(module.body), Location.unknown(): # Operations can be created in a generic way. func = Operation.create( - "builtin.func", results=[], operands=[], + "func.func", results=[], operands=[], attributes={"type":TypeAttr.get(FunctionType.get([], []))}, successors=None, regions=1) # The result will be downcasted to the concrete `OpView` subclass if # available. - assert isinstance(func, builtin.FuncOp) + assert isinstance(func, func.FuncOp) ``` Regions are created for an operation when constructing it on the C++ side. They diff --git a/mlir/docs/Dialects/ShapeDialect.md b/mlir/docs/Dialects/ShapeDialect.md --- a/mlir/docs/Dialects/ShapeDialect.md +++ b/mlir/docs/Dialects/ShapeDialect.md @@ -34,7 +34,7 @@ ```mlir shape.function_library @shplib { -builtin.func @matmul(%lhs: !shape.value_shape, %rhs: !shape.value_shape) -> !shape.shape { +func.func @matmul(%lhs: !shape.value_shape, %rhs: !shape.value_shape) -> !shape.shape { %c1 = shape.const_size 1 %c2 = shape.const_size 2 // We could also allow rank etc operations directly on value_shape too, that diff --git a/mlir/docs/PassManagement.md b/mlir/docs/PassManagement.md --- a/mlir/docs/PassManagement.md +++ b/mlir/docs/PassManagement.md @@ -532,12 +532,12 @@ default view: ```shell -$ mlir-opt -pass-pipeline='builtin.func(my-pass,my-pass)' foo.mlir -pass-statistics +$ mlir-opt -pass-pipeline='func.func(my-pass,my-pass)' foo.mlir -pass-statistics ===-------------------------------------------------------------------------=== ... Pass statistics report ... ===-------------------------------------------------------------------------=== -'builtin.func' Pipeline +'func.func' Pipeline MyPass (S) 15 exampleStat - An example statistic VerifierPass @@ -551,7 +551,7 @@ together: ```shell -$ mlir-opt -pass-pipeline='builtin.func(my-pass, my-pass)' foo.mlir -pass-statistics -pass-statistics-display=list +$ mlir-opt -pass-pipeline='func.func(my-pass, my-pass)' foo.mlir -pass-statistics -pass-statistics-display=list ===-------------------------------------------------------------------------=== ... Pass statistics report ... @@ -657,7 +657,7 @@ * `op-name` * This corresponds to the mnemonic name of an operation to run passes on, - e.g. `builtin.func` or `builtin.module`. + e.g. `func.func` or `builtin.module`. * `pass-name` | `pass-pipeline-name` * This corresponds to the argument of a registered pass or pass pipeline, e.g. `cse` or `canonicalize`. @@ -676,7 +676,7 @@ Can also be specified as (via the `-pass-pipeline` flag): ```shell -$ mlir-opt foo.mlir -pass-pipeline='builtin.func(cse,canonicalize),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1}' +$ mlir-opt foo.mlir -pass-pipeline='func.func(cse,canonicalize),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1}' ``` In order to support round-tripping a pass to the textual representation using @@ -997,7 +997,7 @@ `-mlir-timing-display=list`. ```shell -$ mlir-opt foo.mlir -mlir-disable-threading -pass-pipeline='builtin.func(cse,canonicalize)' -convert-func-to-llvm -mlir-timing -mlir-timing-display=list +$ mlir-opt foo.mlir -mlir-disable-threading -pass-pipeline='func.func(cse,canonicalize)' -convert-func-to-llvm -mlir-timing -mlir-timing-display=list ===-------------------------------------------------------------------------=== ... Pass execution timing report ... @@ -1022,7 +1022,7 @@ invalidated and recomputed. This is the default display mode. ```shell -$ mlir-opt foo.mlir -mlir-disable-threading -pass-pipeline='builtin.func(cse,canonicalize)' -convert-func-to-llvm -mlir-timing +$ mlir-opt foo.mlir -mlir-disable-threading -pass-pipeline='func.func(cse,canonicalize)' -convert-func-to-llvm -mlir-timing ===-------------------------------------------------------------------------=== ... Pass execution timing report ... @@ -1030,7 +1030,7 @@ Total Execution Time: 0.0249 seconds ---Wall Time--- --- Name --- - 0.0058 ( 70.8%) 'builtin.func' Pipeline + 0.0058 ( 70.8%) 'func.func' Pipeline 0.0004 ( 4.3%) CSE 0.0002 ( 2.6%) (A) DominanceInfo 0.0004 ( 4.8%) VerifierPass @@ -1053,7 +1053,7 @@ cpu time. ```shell -$ mlir-opt foo.mlir -pass-pipeline='builtin.func(cse,canonicalize)' -convert-func-to-llvm -mlir-timing +$ mlir-opt foo.mlir -pass-pipeline='func.func(cse,canonicalize)' -convert-func-to-llvm -mlir-timing ===-------------------------------------------------------------------------=== ... Pass execution timing report ... @@ -1061,7 +1061,7 @@ Total Execution Time: 0.0078 seconds ---User Time--- ---Wall Time--- --- Name --- - 0.0177 ( 88.5%) 0.0057 ( 71.3%) 'builtin.func' Pipeline + 0.0177 ( 88.5%) 0.0057 ( 71.3%) 'func.func' Pipeline 0.0044 ( 22.0%) 0.0015 ( 18.9%) CSE 0.0029 ( 14.5%) 0.0012 ( 15.2%) (A) DominanceInfo 0.0038 ( 18.9%) 0.0015 ( 18.7%) VerifierPass @@ -1089,7 +1089,7 @@ * Print the IR before every pass in the pipeline. ```shell -$ mlir-opt foo.mlir -pass-pipeline='builtin.func(cse)' -print-ir-before=cse +$ mlir-opt foo.mlir -pass-pipeline='func.func(cse)' -print-ir-before=cse *** IR Dump Before CSE *** func @simple_constant() -> (i32, i32) { @@ -1105,7 +1105,7 @@ * Print the IR after every pass in the pipeline. ```shell -$ mlir-opt foo.mlir -pass-pipeline='builtin.func(cse)' -print-ir-after=cse +$ mlir-opt foo.mlir -pass-pipeline='func.func(cse)' -print-ir-after=cse *** IR Dump After CSE *** func @simple_constant() -> (i32, i32) { @@ -1126,7 +1126,7 @@ printing. ```shell -$ mlir-opt foo.mlir -pass-pipeline='builtin.func(cse,cse)' -print-ir-after=cse -print-ir-after-change +$ mlir-opt foo.mlir -pass-pipeline='func.func(cse,cse)' -print-ir-after=cse -print-ir-after-change *** IR Dump After CSE *** func @simple_constant() -> (i32, i32) { @@ -1141,7 +1141,7 @@ above. ```shell -$ mlir-opt foo.mlir -pass-pipeline='builtin.func(cse,bad-pass)' -print-ir-failure +$ mlir-opt foo.mlir -pass-pipeline='func.func(cse,bad-pass)' -print-ir-failure *** IR Dump After BadPass Failed *** func @simple_constant() -> (i32, i32) { @@ -1157,9 +1157,9 @@ is disabled(`-mlir-disable-threading`) ```shell -$ mlir-opt foo.mlir -mlir-disable-threading -pass-pipeline='builtin.func(cse)' -print-ir-after=cse -print-ir-module-scope +$ mlir-opt foo.mlir -mlir-disable-threading -pass-pipeline='func.func(cse)' -print-ir-after=cse -print-ir-module-scope -*** IR Dump After CSE *** ('builtin.func' operation: @bar) +*** IR Dump After CSE *** ('func.func' operation: @bar) func @bar(%arg0: f32, %arg1: f32) -> f32 { ... } @@ -1170,7 +1170,7 @@ return %c1_i32, %c1_i32_0 : i32, i32 } -*** IR Dump After CSE *** ('builtin.func' operation: @simple_constant) +*** IR Dump After CSE *** ('func.func' operation: @simple_constant) func @bar(%arg0: f32, %arg1: f32) -> f32 { ... } @@ -1194,7 +1194,7 @@ reproducible may have the form: ```mlir -// configuration: -pass-pipeline='builtin.func(cse,canonicalize),inline' -verify-each +// configuration: -pass-pipeline='func.func(cse,canonicalize),inline' -verify-each module { func @foo() { @@ -1229,7 +1229,7 @@ the following reproducer will be generated: ```mlir -// configuration: -pass-pipeline='builtin.func(canonicalize)' -verify-each -mlir-disable-threading +// configuration: -pass-pipeline='func.func(canonicalize)' -verify-each -mlir-disable-threading module { func @foo() { diff --git a/mlir/docs/SymbolsAndSymbolTables.md b/mlir/docs/SymbolsAndSymbolTables.md --- a/mlir/docs/SymbolsAndSymbolTables.md +++ b/mlir/docs/SymbolsAndSymbolTables.md @@ -31,7 +31,7 @@ within the parent `SymbolTable`. This name is semantically similarly to an SSA result value, and may be referred to by other operations to provide a symbolic link, or use, to the symbol. An example of a `Symbol` operation is -[`builtin.func`](Dialects/Builtin.md/#func-mlirfuncop). `builtin.func` defines a +[`func.func`](Dialects/Builtin.md/#func-mlirfuncop). `func.func` defines a symbol name, which is [referred to](#referencing-a-symbol) by operations like [`func.call`](Dialects/Func.md/#funccall-callop). @@ -77,7 +77,7 @@ Below is an example of how an operation can reference a symbol operation: ```mlir -// This `builtin.func` operation defines a symbol named `symbol`. +// This `func.func` operation defines a symbol named `symbol`. func @symbol() // Our `foo.user` operation contains a SymbolRefAttr with the name of the @@ -106,7 +106,7 @@ // Here we define another nested symbol table, except this time it also defines // a symbol. module @module_symbol { - // This `builtin.func` operation defines a symbol named `nested_symbol`. + // This `func.func` operation defines a symbol named `nested_symbol`. func @nested_symbol() } diff --git a/mlir/docs/TargetLLVMIR.md b/mlir/docs/TargetLLVMIR.md --- a/mlir/docs/TargetLLVMIR.md +++ b/mlir/docs/TargetLLVMIR.md @@ -348,7 +348,7 @@ Examples: -This convention is implemented in the conversion of `builtin.func` and `func.call` to +This convention is implemented in the conversion of `func.func` and `func.call` to the LLVM dialect, with the former unpacking the descriptor into a set of individual values and the latter packing those values back into a descriptor so as to make it transparently usable by other operations. Conversions from other diff --git a/mlir/include/mlir/Dialect/Affine/LoopUtils.h b/mlir/include/mlir/Dialect/Affine/LoopUtils.h --- a/mlir/include/mlir/Dialect/Affine/LoopUtils.h +++ b/mlir/include/mlir/Dialect/Affine/LoopUtils.h @@ -262,7 +262,7 @@ void mapLoopToProcessorIds(scf::ForOp forOp, ArrayRef processorId, ArrayRef numProcessors); -/// Gathers all AffineForOps in 'builtin.func' grouped by loop depth. +/// Gathers all AffineForOps in 'func.func' grouped by loop depth. void gatherLoops(func::FuncOp func, std::vector> &depthToLoops); diff --git a/mlir/include/mlir/Dialect/Func/Transforms/Passes.td b/mlir/include/mlir/Dialect/Func/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Func/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Func/Transforms/Passes.td @@ -14,9 +14,9 @@ def FuncBufferize : Pass<"func-bufferize", "ModuleOp"> { let summary = "Bufferize func/call/return ops"; let description = [{ - A bufferize pass that bufferizes builtin.func and func.call ops. + A bufferize pass that bufferizes func.func and func.call ops. - Because this pass updates builtin.func ops, it must be a module pass. It is + Because this pass updates func.func ops, it must be a module pass. It is useful to keep this pass separate from other bufferizations so that the other ones can be run at function-level in parallel. diff --git a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp --- a/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp @@ -2612,7 +2612,7 @@ } } -/// Gathers all AffineForOps in 'builtin.func' grouped by loop depth. +/// Gathers all AffineForOps in 'func.func' grouped by loop depth. void mlir::gatherLoops(FuncOp func, std::vector> &depthToLoops) { for (auto &block : func) diff --git a/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt b/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt --- a/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt @@ -13,6 +13,7 @@ LINK_LIBS PUBLIC MLIRDialect + MLIRFunc MLIRIR MLIRTensor MLIRMemRef diff --git a/mlir/lib/Dialect/Func/Transforms/FuncBufferize.cpp b/mlir/lib/Dialect/Func/Transforms/FuncBufferize.cpp --- a/mlir/lib/Dialect/Func/Transforms/FuncBufferize.cpp +++ b/mlir/lib/Dialect/Func/Transforms/FuncBufferize.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements bufferization of builtin.func's and func.call's. +// This file implements bufferization of func.func's and func.call's. // //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp b/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp @@ -362,7 +362,7 @@ hoistRedundantVectorTransfersOnTensor(funcOp); // Run CSE to cleanup after canonicalization. - OpPassManager dynamicPM("builtin.func"); + OpPassManager dynamicPM("func.func"); dynamicPM.addPass(createCSEPass()); if (failed(runPipeline(dynamicPM, funcOp))) return signalPassFailure(); diff --git a/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Quant/Transforms/CMakeLists.txt @@ -10,6 +10,7 @@ LINK_LIBS PUBLIC MLIRArithmetic + MLIRFunc MLIRIR MLIRQuant MLIRQuantUtils diff --git a/mlir/lib/Dialect/SPIRV/Transforms/CMakeLists.txt b/mlir/lib/Dialect/SPIRV/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/SPIRV/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/SPIRV/Transforms/CMakeLists.txt @@ -15,6 +15,7 @@ ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SPIRV LINK_LIBS PUBLIC + MLIRFunc MLIRSPIRV MLIRTransformUtils ) diff --git a/mlir/lib/Dialect/Shape/IR/CMakeLists.txt b/mlir/lib/Dialect/Shape/IR/CMakeLists.txt --- a/mlir/lib/Dialect/Shape/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/Shape/IR/CMakeLists.txt @@ -16,6 +16,7 @@ MLIRCastInterfaces MLIRControlFlowInterfaces MLIRDialect + MLIRFunc MLIRInferTypeOpInterface MLIRIR MLIRSideEffectInterfaces diff --git a/mlir/lib/Pass/PassRegistry.cpp b/mlir/lib/Pass/PassRegistry.cpp --- a/mlir/lib/Pass/PassRegistry.cpp +++ b/mlir/lib/Pass/PassRegistry.cpp @@ -322,7 +322,7 @@ /// /// A pipeline is defined as a series of names, each of which may in itself /// recursively contain a nested pipeline. A name is either the name of a pass - /// (e.g. "cse") or the name of an operation type (e.g. "builtin.func"). If + /// (e.g. "cse") or the name of an operation type (e.g. "buitin.module"). If /// the name is the name of a pass, the InnerPipeline is empty, since passes /// cannot contain inner pipelines. struct PipelineElement { diff --git a/mlir/python/mlir/dialects/_builtin_ops_ext.py b/mlir/python/mlir/dialects/_builtin_ops_ext.py --- a/mlir/python/mlir/dialects/_builtin_ops_ext.py +++ b/mlir/python/mlir/dialects/_builtin_ops_ext.py @@ -3,17 +3,10 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception try: - from typing import Optional, Sequence, Union - - import inspect - from ..ir import * except ImportError as e: raise RuntimeError("Error loading imports from extension module") from e -ARGUMENT_ATTRIBUTE_NAME = "arg_attrs" -RESULT_ATTRIBUTE_NAME = "res_attrs" - class ModuleOp: """Specialization for the module op class.""" diff --git a/mlir/python/mlir/dialects/_func_ops_ext.py b/mlir/python/mlir/dialects/_func_ops_ext.py --- a/mlir/python/mlir/dialects/_func_ops_ext.py +++ b/mlir/python/mlir/dialects/_func_ops_ext.py @@ -4,13 +4,16 @@ try: from ..ir import * - from .builtin import FuncOp from ._ods_common import get_default_loc_context as _get_default_loc_context - from typing import Any, List, Optional, Union + import inspect + + from typing import Any, List, Optional, Sequence, Union except ImportError as e: raise RuntimeError("Error loading imports from extension module") from e +ARGUMENT_ATTRIBUTE_NAME = "arg_attrs" +RESULT_ATTRIBUTE_NAME = "res_attrs" class ConstantOp: """Specialization for the constant op class.""" @@ -23,75 +26,6 @@ return self.results[0].type -class CallOp: - """Specialization for the call op class.""" - - def __init__(self, - calleeOrResults: Union[FuncOp, List[Type]], - argumentsOrCallee: Union[List, FlatSymbolRefAttr, str], - arguments: Optional[List] = None, - *, - loc=None, - ip=None): - """Creates an call operation. - - The constructor accepts three different forms: - - 1. A function op to be called followed by a list of arguments. - 2. A list of result types, followed by the name of the function to be - called as string, following by a list of arguments. - 3. A list of result types, followed by the name of the function to be - called as symbol reference attribute, followed by a list of arguments. - - For example - - f = func.FuncOp("foo", ...) - func.CallOp(f, [args]) - func.CallOp([result_types], "foo", [args]) - - In all cases, the location and insertion point may be specified as keyword - arguments if not provided by the surrounding context managers. - """ - - # TODO: consider supporting constructor "overloads", e.g., through a custom - # or pybind-provided metaclass. - if isinstance(calleeOrResults, FuncOp): - if not isinstance(argumentsOrCallee, list): - raise ValueError( - "when constructing a call to a function, expected " + - "the second argument to be a list of call arguments, " + - f"got {type(argumentsOrCallee)}") - if arguments is not None: - raise ValueError("unexpected third argument when constructing a call" + - "to a function") - - super().__init__( - calleeOrResults.type.results, - FlatSymbolRefAttr.get( - calleeOrResults.name.value, - context=_get_default_loc_context(loc)), - argumentsOrCallee, - loc=loc, - ip=ip) - return - - if isinstance(argumentsOrCallee, list): - raise ValueError("when constructing a call to a function by name, " + - "expected the second argument to be a string or a " + - f"FlatSymbolRefAttr, got {type(argumentsOrCallee)}") - - if isinstance(argumentsOrCallee, FlatSymbolRefAttr): - super().__init__( - calleeOrResults, argumentsOrCallee, arguments, loc=loc, ip=ip) - elif isinstance(argumentsOrCallee, str): - super().__init__( - calleeOrResults, - FlatSymbolRefAttr.get( - argumentsOrCallee, context=_get_default_loc_context(loc)), - arguments, - loc=loc, - ip=ip) - class FuncOp: """Specialization for the func op class.""" @@ -295,3 +229,72 @@ return wrapped return decorator + +class CallOp: + """Specialization for the call op class.""" + + def __init__(self, + calleeOrResults: Union[FuncOp, List[Type]], + argumentsOrCallee: Union[List, FlatSymbolRefAttr, str], + arguments: Optional[List] = None, + *, + loc=None, + ip=None): + """Creates an call operation. + + The constructor accepts three different forms: + + 1. A function op to be called followed by a list of arguments. + 2. A list of result types, followed by the name of the function to be + called as string, following by a list of arguments. + 3. A list of result types, followed by the name of the function to be + called as symbol reference attribute, followed by a list of arguments. + + For example + + f = func.FuncOp("foo", ...) + func.CallOp(f, [args]) + func.CallOp([result_types], "foo", [args]) + + In all cases, the location and insertion point may be specified as keyword + arguments if not provided by the surrounding context managers. + """ + + # TODO: consider supporting constructor "overloads", e.g., through a custom + # or pybind-provided metaclass. + if isinstance(calleeOrResults, FuncOp): + if not isinstance(argumentsOrCallee, list): + raise ValueError( + "when constructing a call to a function, expected " + + "the second argument to be a list of call arguments, " + + f"got {type(argumentsOrCallee)}") + if arguments is not None: + raise ValueError("unexpected third argument when constructing a call" + + "to a function") + + super().__init__( + calleeOrResults.type.results, + FlatSymbolRefAttr.get( + calleeOrResults.name.value, + context=_get_default_loc_context(loc)), + argumentsOrCallee, + loc=loc, + ip=ip) + return + + if isinstance(argumentsOrCallee, list): + raise ValueError("when constructing a call to a function by name, " + + "expected the second argument to be a string or a " + + f"FlatSymbolRefAttr, got {type(argumentsOrCallee)}") + + if isinstance(argumentsOrCallee, FlatSymbolRefAttr): + super().__init__( + calleeOrResults, argumentsOrCallee, arguments, loc=loc, ip=ip) + elif isinstance(argumentsOrCallee, str): + super().__init__( + calleeOrResults, + FlatSymbolRefAttr.get( + argumentsOrCallee, context=_get_default_loc_context(loc)), + arguments, + loc=loc, + ip=ip) diff --git a/mlir/test/Analysis/test-alias-analysis-modref.mlir b/mlir/test/Analysis/test-alias-analysis-modref.mlir --- a/mlir/test/Analysis/test-alias-analysis-modref.mlir +++ b/mlir/test/Analysis/test-alias-analysis-modref.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(test-alias-analysis-modref)' -split-input-file -allow-unregistered-dialect 2>&1 | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(test-alias-analysis-modref)' -split-input-file -allow-unregistered-dialect 2>&1 | FileCheck %s // CHECK-LABEL: Testing : "no_side_effects" // CHECK: alloc -> func.region0#0: NoModRef diff --git a/mlir/test/Analysis/test-alias-analysis.mlir b/mlir/test/Analysis/test-alias-analysis.mlir --- a/mlir/test/Analysis/test-alias-analysis.mlir +++ b/mlir/test/Analysis/test-alias-analysis.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(test-alias-analysis)' -split-input-file -allow-unregistered-dialect 2>&1 | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(test-alias-analysis)' -split-input-file -allow-unregistered-dialect 2>&1 | FileCheck %s // CHECK-LABEL: Testing : "simple" // CHECK-DAG: func.region0#0 <-> func.region0#1: MayAlias diff --git a/mlir/test/Analysis/test-dominance.mlir b/mlir/test/Analysis/test-dominance.mlir --- a/mlir/test/Analysis/test-dominance.mlir +++ b/mlir/test/Analysis/test-dominance.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-print-dominance)" -split-input-file 2>&1 | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(test-print-dominance)" -split-input-file 2>&1 | FileCheck %s // CHECK-LABEL: Testing : func_condBranch func @func_condBranch(%cond : i1) { diff --git a/mlir/test/Analysis/test-liveness.mlir b/mlir/test/Analysis/test-liveness.mlir --- a/mlir/test/Analysis/test-liveness.mlir +++ b/mlir/test/Analysis/test-liveness.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-print-liveness)" -split-input-file 2>&1 | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(test-print-liveness)" -split-input-file 2>&1 | FileCheck %s // CHECK-LABEL: Testing : func_empty func @func_empty() { diff --git a/mlir/test/Analysis/test-match-reduction.mlir b/mlir/test/Analysis/test-match-reduction.mlir --- a/mlir/test/Analysis/test-match-reduction.mlir +++ b/mlir/test/Analysis/test-match-reduction.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-match-reduction)" -verify-diagnostics -split-input-file +// RUN: mlir-opt %s -pass-pipeline="func.func(test-match-reduction)" -verify-diagnostics -split-input-file // Verify that the generic reduction detection utility works on different // dialects. diff --git a/mlir/test/Analysis/test-shape-fn-report.mlir b/mlir/test/Analysis/test-shape-fn-report.mlir --- a/mlir/test/Analysis/test-shape-fn-report.mlir +++ b/mlir/test/Analysis/test-shape-fn-report.mlir @@ -15,7 +15,7 @@ // The shape function library with some local functions. shape.function_library @shape_lib { // Test shape function that returns the shape of input arg as result shape. - builtin.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { + func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { %0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape return %0 : !shape.shape } diff --git a/mlir/test/Analysis/test-topoligical-sort.mlir b/mlir/test/Analysis/test-topoligical-sort.mlir --- a/mlir/test/Analysis/test-topoligical-sort.mlir +++ b/mlir/test/Analysis/test-topoligical-sort.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-print-topological-sort)" 2>&1 | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(test-print-topological-sort)" 2>&1 | FileCheck %s // CHECK-LABEL: Testing : region // CHECK: arith.addi {{.*}} : index diff --git a/mlir/test/CAPI/execution_engine.c b/mlir/test/CAPI/execution_engine.c --- a/mlir/test/CAPI/execution_engine.c +++ b/mlir/test/CAPI/execution_engine.c @@ -26,7 +26,7 @@ void lowerModuleToLLVM(MlirContext ctx, MlirModule module) { MlirPassManager pm = mlirPassManagerCreate(ctx); MlirOpPassManager opm = mlirPassManagerGetNestedUnder( - pm, mlirStringRefCreateFromCString("builtin.func")); + pm, mlirStringRefCreateFromCString("func.func")); mlirPassManagerAddOwnedPass(pm, mlirCreateConversionConvertFuncToLLVM()); mlirOpPassManagerAddOwnedPass(opm, mlirCreateConversionConvertArithmeticToLLVM()); diff --git a/mlir/test/CAPI/ir.c b/mlir/test/CAPI/ir.c --- a/mlir/test/CAPI/ir.c +++ b/mlir/test/CAPI/ir.c @@ -101,7 +101,7 @@ mlirIdentifierGet(ctx, mlirStringRefCreateFromCString("sym_name")), funcNameAttr)}; MlirOperationState funcState = mlirOperationStateGet( - mlirStringRefCreateFromCString("builtin.func"), location); + mlirStringRefCreateFromCString("func.func"), location); mlirOperationStateAddAttributes(&funcState, 2, funcAttrs); mlirOperationStateAddOwnedRegions(&funcState, 1, &funcBodyRegion); MlirOperation func = mlirOperationCreate(&funcState); diff --git a/mlir/test/CAPI/pass.c b/mlir/test/CAPI/pass.c --- a/mlir/test/CAPI/pass.c +++ b/mlir/test/CAPI/pass.c @@ -42,7 +42,7 @@ // Run the print-op-stats pass on the top-level module: // CHECK-LABEL: Operations encountered: // CHECK: arith.addi , 1 - // CHECK: builtin.func , 1 + // CHECK: func.func , 1 // CHECK: func.return , 1 { MlirPassManager pm = mlirPassManagerCreate(ctx); @@ -84,12 +84,12 @@ // Run the print-op-stats pass on functions under the top-level module: // CHECK-LABEL: Operations encountered: // CHECK: arith.addi , 1 - // CHECK: builtin.func , 1 + // CHECK: func.func , 1 // CHECK: func.return , 1 { MlirPassManager pm = mlirPassManagerCreate(ctx); MlirOpPassManager nestedFuncPm = mlirPassManagerGetNestedUnder( - pm, mlirStringRefCreateFromCString("builtin.func")); + pm, mlirStringRefCreateFromCString("func.func")); MlirPass printOpStatPass = mlirCreateTransformsPrintOpStats(); mlirOpPassManagerAddOwnedPass(nestedFuncPm, printOpStatPass); MlirLogicalResult success = mlirPassManagerRun(pm, module); @@ -100,14 +100,14 @@ // Run the print-op-stats pass on functions under the nested module: // CHECK-LABEL: Operations encountered: // CHECK: arith.addf , 1 - // CHECK: builtin.func , 1 + // CHECK: func.func , 1 // CHECK: func.return , 1 { MlirPassManager pm = mlirPassManagerCreate(ctx); MlirOpPassManager nestedModulePm = mlirPassManagerGetNestedUnder( pm, mlirStringRefCreateFromCString("builtin.module")); MlirOpPassManager nestedFuncPm = mlirOpPassManagerGetNestedUnder( - nestedModulePm, mlirStringRefCreateFromCString("builtin.func")); + nestedModulePm, mlirStringRefCreateFromCString("func.func")); MlirPass printOpStatPass = mlirCreateTransformsPrintOpStats(); mlirOpPassManagerAddOwnedPass(nestedFuncPm, printOpStatPass); MlirLogicalResult success = mlirPassManagerRun(pm, module); @@ -132,19 +132,19 @@ MlirOpPassManager nestedModulePm = mlirPassManagerGetNestedUnder( pm, mlirStringRefCreateFromCString("builtin.module")); MlirOpPassManager nestedFuncPm = mlirOpPassManagerGetNestedUnder( - nestedModulePm, mlirStringRefCreateFromCString("builtin.func")); + nestedModulePm, mlirStringRefCreateFromCString("func.func")); MlirPass printOpStatPass = mlirCreateTransformsPrintOpStats(); mlirOpPassManagerAddOwnedPass(nestedFuncPm, printOpStatPass); // Print the top level pass manager - // CHECK: Top-level: builtin.module(builtin.func(print-op-stats)) + // CHECK: Top-level: builtin.module(func.func(print-op-stats)) fprintf(stderr, "Top-level: "); mlirPrintPassPipeline(mlirPassManagerGetAsOpPassManager(pm), printToStderr, NULL); fprintf(stderr, "\n"); // Print the pipeline nested one level down - // CHECK: Nested Module: builtin.func(print-op-stats) + // CHECK: Nested Module: func.func(print-op-stats) fprintf(stderr, "Nested Module: "); mlirPrintPassPipeline(nestedModulePm, printToStderr, NULL); fprintf(stderr, "\n"); @@ -165,8 +165,8 @@ // Try parse a pipeline. MlirLogicalResult status = mlirParsePassPipeline( mlirPassManagerGetAsOpPassManager(pm), - mlirStringRefCreateFromCString( - "builtin.module(builtin.func(print-op-stats), builtin.func(print-op-stats))")); + mlirStringRefCreateFromCString("builtin.module(func.func(print-op-stats)," + " func.func(print-op-stats))")); // Expect a failure, we haven't registered the print-op-stats pass yet. if (mlirLogicalResultIsSuccess(status)) { fprintf(stderr, "Unexpected success parsing pipeline without registering the pass\n"); @@ -176,15 +176,16 @@ mlirRegisterTransformsPrintOpStats(); status = mlirParsePassPipeline( mlirPassManagerGetAsOpPassManager(pm), - mlirStringRefCreateFromCString( - "builtin.module(builtin.func(print-op-stats), builtin.func(print-op-stats))")); + mlirStringRefCreateFromCString("builtin.module(func.func(print-op-stats)," + " func.func(print-op-stats))")); // Expect a failure, we haven't registered the print-op-stats pass yet. if (mlirLogicalResultIsFailure(status)) { fprintf(stderr, "Unexpected failure parsing pipeline after registering the pass\n"); exit(EXIT_FAILURE); } - // CHECK: Round-trip: builtin.module(builtin.func(print-op-stats), builtin.func(print-op-stats)) + // CHECK: Round-trip: builtin.module(func.func(print-op-stats), + // func.func(print-op-stats)) fprintf(stderr, "Round-trip: "); mlirPrintPassPipeline(mlirPassManagerGetAsOpPassManager(pm), printToStderr, NULL); diff --git a/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir b/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir --- a/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir +++ b/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s // CHECK-LABEL: @vector_ops func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>, %arg3: vector<4xi64>) -> vector<4xf32> { diff --git a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir --- a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir +++ b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s // CHECK-LABEL: @vec_bin func @vec_bin(%arg0: vector<2x2x2xf32>) -> vector<2x2x2xf32> { diff --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir --- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir +++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-complex-to-standard)" | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-complex-to-standard)" | FileCheck %s // CHECK-LABEL: func @complex_abs // CHECK-SAME: %[[ARG:.*]]: complex diff --git a/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir b/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir --- a/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir +++ b/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-complex-to-standard),convert-complex-to-llvm,builtin.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-complex-to-standard),convert-complex-to-llvm,func.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | FileCheck %s // CHECK-LABEL: llvm.func @complex_abs // CHECK-SAME: %[[ARG:.*]]: ![[C_TY:.*]]) diff --git a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir --- a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir +++ b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" -split-input-file %s | FileCheck %s -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},reconcile-unrealized-casts" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR +// RUN: mlir-opt -pass-pipeline="func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" -split-input-file %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},reconcile-unrealized-casts" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR // BAREPTR-LABEL: func @check_noalias // BAREPTR-SAME: %{{.*}}: !llvm.ptr {llvm.noalias}, %{{.*}}: !llvm.ptr {llvm.noalias} diff --git a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir --- a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir +++ b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" %s -split-input-file | FileCheck %s -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32},reconcile-unrealized-casts" %s -split-input-file | FileCheck --check-prefix=CHECK32 %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" %s -split-input-file | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32},reconcile-unrealized-casts" %s -split-input-file | FileCheck --check-prefix=CHECK32 %s // CHECK-LABEL: func @empty() { // CHECK-NEXT: llvm.return diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -4,7 +4,7 @@ gpu.module @test_module { // CHECK-LABEL: func @gpu_index_ops() // CHECK32-LABEL: func @gpu_index_ops() - builtin.func @gpu_index_ops() + func.func @gpu_index_ops() -> (index, index, index, index, index, index, index, index, index, index, index, index) { // CHECK32-NOT: = llvm.sext %{{.*}} : i32 to i64 @@ -61,7 +61,7 @@ gpu.module @test_module { // CHECK-LABEL: func @gpu_index_comp // CHECK32-LABEL: func @gpu_index_comp - builtin.func @gpu_index_comp(%idx : index) -> index { + func.func @gpu_index_comp(%idx : index) -> index { // CHECK: = llvm.add %{{.*}}, %{{.*}} : i64 // CHECK32: = llvm.add %{{.*}}, %{{.*}} : i32 %0 = arith.addi %idx, %idx : index @@ -109,7 +109,7 @@ gpu.module @test_module { // CHECK-LABEL: func @gpu_shuffle() - builtin.func @gpu_shuffle() -> (f32, f32, f32, f32) { + func.func @gpu_shuffle() -> (f32, f32, f32, f32) { // CHECK: %[[#VALUE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32 %arg0 = arith.constant 1.0 : f32 // CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : i32 @@ -148,7 +148,7 @@ gpu.module @test_module { // CHECK-LABEL: func @gpu_sync() - builtin.func @gpu_sync() { + func.func @gpu_sync() { // CHECK: nvvm.barrier0 gpu.barrier func.return @@ -161,7 +161,7 @@ // CHECK: llvm.func @__nv_fabsf(f32) -> f32 // CHECK: llvm.func @__nv_fabs(f64) -> f64 // CHECK-LABEL: func @gpu_fabs - builtin.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.abs %arg_f32 : f32 // CHECK: llvm.call @__nv_fabsf(%{{.*}}) : (f32) -> f32 %result64 = math.abs %arg_f64 : f64 @@ -176,7 +176,7 @@ // CHECK: llvm.func @__nv_ceilf(f32) -> f32 // CHECK: llvm.func @__nv_ceil(f64) -> f64 // CHECK-LABEL: func @gpu_ceil - builtin.func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.ceil %arg_f32 : f32 // CHECK: llvm.call @__nv_ceilf(%{{.*}}) : (f32) -> f32 %result64 = math.ceil %arg_f64 : f64 @@ -191,7 +191,7 @@ // CHECK: llvm.func @__nv_floorf(f32) -> f32 // CHECK: llvm.func @__nv_floor(f64) -> f64 // CHECK-LABEL: func @gpu_floor - builtin.func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.floor %arg_f32 : f32 // CHECK: llvm.call @__nv_floorf(%{{.*}}) : (f32) -> f32 %result64 = math.floor %arg_f64 : f64 @@ -206,7 +206,7 @@ // CHECK: llvm.func @__nv_cosf(f32) -> f32 // CHECK: llvm.func @__nv_cos(f64) -> f64 // CHECK-LABEL: func @gpu_cos - builtin.func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.cos %arg_f32 : f32 // CHECK: llvm.call @__nv_cosf(%{{.*}}) : (f32) -> f32 %result64 = math.cos %arg_f64 : f64 @@ -220,7 +220,7 @@ // CHECK: llvm.func @__nv_expf(f32) -> f32 // CHECK: llvm.func @__nv_exp(f64) -> f64 // CHECK-LABEL: func @gpu_exp - builtin.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.exp %arg_f32 : f32 // CHECK: llvm.call @__nv_expf(%{{.*}}) : (f32) -> f32 %result64 = math.exp %arg_f64 : f64 @@ -234,7 +234,7 @@ // CHECK: llvm.func @__nv_exp2f(f32) -> f32 // CHECK: llvm.func @__nv_exp2(f64) -> f64 // CHECK-LABEL: func @gpu_exp2 - builtin.func @gpu_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.exp2 %arg_f32 : f32 // CHECK: llvm.call @__nv_exp2f(%{{.*}}) : (f32) -> f32 %result64 = math.exp2 %arg_f64 : f64 @@ -249,7 +249,7 @@ // CHECK: llvm.func @__nv_logf(f32) -> f32 // CHECK: llvm.func @__nv_log(f64) -> f64 // CHECK-LABEL: func @gpu_log - builtin.func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log %arg_f32 : f32 // CHECK: llvm.call @__nv_logf(%{{.*}}) : (f32) -> f32 %result64 = math.log %arg_f64 : f64 @@ -264,7 +264,7 @@ // CHECK: llvm.func @__nv_log10f(f32) -> f32 // CHECK: llvm.func @__nv_log10(f64) -> f64 // CHECK-LABEL: func @gpu_log10 - builtin.func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log10 %arg_f32 : f32 // CHECK: llvm.call @__nv_log10f(%{{.*}}) : (f32) -> f32 %result64 = math.log10 %arg_f64 : f64 @@ -279,7 +279,7 @@ // CHECK: llvm.func @__nv_log1pf(f32) -> f32 // CHECK: llvm.func @__nv_log1p(f64) -> f64 // CHECK-LABEL: func @gpu_log1p - builtin.func @gpu_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log1p %arg_f32 : f32 // CHECK: llvm.call @__nv_log1pf(%{{.*}}) : (f32) -> f32 %result64 = math.log1p %arg_f64 : f64 @@ -294,7 +294,7 @@ // CHECK: llvm.func @__nv_log2f(f32) -> f32 // CHECK: llvm.func @__nv_log2(f64) -> f64 // CHECK-LABEL: func @gpu_log2 - builtin.func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log2 %arg_f32 : f32 // CHECK: llvm.call @__nv_log2f(%{{.*}}) : (f32) -> f32 %result64 = math.log2 %arg_f64 : f64 @@ -309,7 +309,7 @@ // CHECK: llvm.func @__nv_sinf(f32) -> f32 // CHECK: llvm.func @__nv_sin(f64) -> f64 // CHECK-LABEL: func @gpu_sin - builtin.func @gpu_sin(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_sin(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.sin %arg_f32 : f32 // CHECK: llvm.call @__nv_sinf(%{{.*}}) : (f32) -> f32 %result64 = math.sin %arg_f64 : f64 @@ -324,7 +324,7 @@ // CHECK: llvm.func @__nv_tanhf(f32) -> f32 // CHECK: llvm.func @__nv_tanh(f64) -> f64 // CHECK-LABEL: func @gpu_tanh - builtin.func @gpu_tanh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { + func.func @gpu_tanh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = math.tanh %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : f16 to f32 // CHECK-NEXT: llvm.call @__nv_tanhf(%{{.*}}) : (f32) -> f32 @@ -343,7 +343,7 @@ // CHECK: llvm.func @__nv_rsqrtf(f32) -> f32 // CHECK: llvm.func @__nv_rsqrt(f64) -> f64 // CHECK-LABEL: func @gpu_rsqrt - builtin.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + func.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = math.rsqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : f16 to f32 @@ -363,7 +363,7 @@ // CHECK: llvm.func @__nv_sqrtf(f32) -> f32 // CHECK: llvm.func @__nv_sqrt(f64) -> f64 // CHECK-LABEL: func @gpu_sqrt - builtin.func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + func.func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = math.sqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : f16 to f32 @@ -383,7 +383,7 @@ // CHECK: llvm.func @__nv_atanf(f32) -> f32 // CHECK: llvm.func @__nv_atan(f64) -> f64 // CHECK-LABEL: func @gpu_atan - builtin.func @gpu_atan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + func.func @gpu_atan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = math.atan %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : f16 to f32 @@ -403,7 +403,7 @@ // CHECK: llvm.func @__nv_atan2f(f32, f32) -> f32 // CHECK: llvm.func @__nv_atan2(f64, f64) -> f64 // CHECK-LABEL: func @gpu_atan2 - builtin.func @gpu_atan2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + func.func @gpu_atan2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = math.atan2 %arg_f16, %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : f16 to f32 @@ -427,7 +427,7 @@ // CHECK: llvm.func @__nv_expf(f32) -> f32 // CHECK: llvm.func @__nv_exp(f64) -> f64 // CHECK-LABEL: func @gpu_exp - builtin.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.exp %arg_f32 : f32 // CHECK: llvm.call @__nv_expf(%{{.*}}) : (f32) -> f32 %result64 = math.exp %arg_f64 : f64 @@ -444,7 +444,7 @@ // CHECK: llvm.func @__nv_expm1f(f32) -> f32 // CHECK: llvm.func @__nv_expm1(f64) -> f64 // CHECK-LABEL: func @gpu_expm1 - builtin.func @gpu_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.expm1 %arg_f32 : f32 // CHECK: llvm.call @__nv_expm1f(%{{.*}}) : (f32) -> f32 %result64 = math.expm1 %arg_f64 : f64 @@ -459,7 +459,7 @@ // CHECK: llvm.func @__nv_powf(f32, f32) -> f32 // CHECK: llvm.func @__nv_pow(f64, f64) -> f64 // CHECK-LABEL: func @gpu_pow - builtin.func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.powf %arg_f32, %arg_f32 : f32 // CHECK: llvm.call @__nv_powf(%{{.*}}, %{{.*}}) : (f32, f32) -> f32 %result64 = math.powf %arg_f64, %arg_f64 : f64 diff --git a/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir --- a/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/wmma-ops-to-nvvm.mlir @@ -6,7 +6,7 @@ // CHECK-LABEL: func @gpu_wmma_load_op() -> // CHECK-SAME: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> { // CHECK32-LABEL: func @gpu_wmma_load_op() -> - builtin.func @gpu_wmma_load_op() -> (!gpu.mma_matrix<16x16xf16, "AOp">) { + func.func @gpu_wmma_load_op() -> (!gpu.mma_matrix<16x16xf16, "AOp">) { %wg = memref.alloca() {alignment = 32} : memref<32x32xf16, 3> %i = arith.constant 16 : index %j = arith.constant 16 : index @@ -46,7 +46,7 @@ // CHECK-SAME: (%[[D:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>) { // CHECK32-LABEL: func @gpu_wmma_store_op // CHECK32-SAME: (%[[D:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>) { - builtin.func @gpu_wmma_store_op(%arg0 : !gpu.mma_matrix<16x16xf16, "COp">) -> () { + func.func @gpu_wmma_store_op(%arg0 : !gpu.mma_matrix<16x16xf16, "COp">) -> () { %sg = memref.alloca(){alignment = 32} : memref<32x32xf16, 3> %i = arith.constant 16 : index %j = arith.constant 16 : index @@ -92,7 +92,7 @@ // CHECK-LABEL: func @gpu_wmma_mma_op // CHECK-SAME: (%[[A:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>, %[[B:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>, %[[C:.*]]: !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)>) - builtin.func @gpu_wmma_mma_op(%A : !gpu.mma_matrix<16x16xf16, "AOp">, %B : !gpu.mma_matrix<16x16xf16, "BOp">, %C : !gpu.mma_matrix<16x16xf16, "COp">) -> (!gpu.mma_matrix<16x16xf16, "COp">) { + func.func @gpu_wmma_mma_op(%A : !gpu.mma_matrix<16x16xf16, "AOp">, %B : !gpu.mma_matrix<16x16xf16, "BOp">, %C : !gpu.mma_matrix<16x16xf16, "COp">) -> (!gpu.mma_matrix<16x16xf16, "COp">) { %D = gpu.subgroup_mma_compute %A, %B, %C : !gpu.mma_matrix<16x16xf16, "AOp">, !gpu.mma_matrix<16x16xf16, "BOp"> -> !gpu.mma_matrix<16x16xf16, "COp"> // CHECK: %[[A1:.*]] = llvm.extractvalue %[[A]][0 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> // CHECK: %[[A2:.*]] = llvm.extractvalue %[[A]][1 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> @@ -163,7 +163,7 @@ // CHECK: %[[E3:.+]] = llvm.extractvalue %[[ACC]][3 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> // CHECK: nvvm.wmma.store %{{.*}}, %{{.*}}, %[[E0]], %[[E1]], %[[E2]], %[[E3]] {eltype = #nvvm.mma_type, k = 16 : i32, layout = #nvvm.mma_layout, m = 16 : i32, n = 16 : i32} : !llvm.ptr, vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16> - builtin.func @gpu_wmma_mma_loop_op(%arg0: memref<128x128xf16>, %arg1: memref<128x128xf16>, %arg2: memref<128x128xf16>) { + func.func @gpu_wmma_mma_loop_op(%arg0: memref<128x128xf16>, %arg1: memref<128x128xf16>, %arg2: memref<128x128xf16>) { %c0 = arith.constant 0 : index %c128 = arith.constant 128 : index %c32 = arith.constant 32 : index @@ -202,7 +202,7 @@ // CHECK: %[[M3:.+]] = llvm.insertvalue %[[V2]], %[[M2]][2 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> // CHECK: %[[M4:.+]] = llvm.insertvalue %[[V2]], %[[M3]][3 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> // CHECK: llvm.return %[[M4]] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> - builtin.func @gpu_wmma_constant_op() ->(!gpu.mma_matrix<16x16xf16, "COp">) { + func.func @gpu_wmma_constant_op() ->(!gpu.mma_matrix<16x16xf16, "COp">) { %cst = arith.constant 1.0 : f16 %C = gpu.subgroup_mma_constant_matrix %cst : !gpu.mma_matrix<16x16xf16, "COp"> return %C : !gpu.mma_matrix<16x16xf16, "COp"> @@ -232,7 +232,7 @@ // CHECK: %[[C3:.*]] = llvm.fadd %[[A3]], %[[B3]] : vector<2xf16> // CHECK: %[[M4:.*]] = llvm.insertvalue %[[C3]], %[[M3]][3 : i32] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> // CHECK: llvm.return %[[M4]] : !llvm.struct<(vector<2xf16>, vector<2xf16>, vector<2xf16>, vector<2xf16>)> - builtin.func @gpu_wmma_elementwise(%A : !gpu.mma_matrix<16x16xf16, "COp">, %B : !gpu.mma_matrix<16x16xf16, "COp">) ->(!gpu.mma_matrix<16x16xf16, "COp">) { + func.func @gpu_wmma_elementwise(%A : !gpu.mma_matrix<16x16xf16, "COp">, %B : !gpu.mma_matrix<16x16xf16, "COp">) ->(!gpu.mma_matrix<16x16xf16, "COp">) { %C = gpu.subgroup_mma_elementwise addf %A, %B : (!gpu.mma_matrix<16x16xf16, "COp">, !gpu.mma_matrix<16x16xf16, "COp">) -> !gpu.mma_matrix<16x16xf16, "COp"> return %C : !gpu.mma_matrix<16x16xf16, "COp"> } diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir --- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir +++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir @@ -4,7 +4,7 @@ gpu.module @test_module { // CHECK-LABEL: func @gpu_index_ops() // CHECK32-LABEL: func @gpu_index_ops() - builtin.func @gpu_index_ops() + func.func @gpu_index_ops() -> (index, index, index, index, index, index, index, index, index, index, index, index) { // CHECK32-NOT: = llvm.sext %{{.*}} : i32 to i64 @@ -61,7 +61,7 @@ gpu.module @test_module { // CHECK-LABEL: func @gpu_index_comp // CHECK32-LABEL: func @gpu_index_comp - builtin.func @gpu_index_comp(%idx : index) -> index { + func.func @gpu_index_comp(%idx : index) -> index { // CHECK: = llvm.add %{{.*}}, %{{.*}} : i64 // CHECK32: = llvm.add %{{.*}}, %{{.*}} : i32 %0 = arith.addi %idx, %idx : index @@ -75,7 +75,7 @@ gpu.module @test_module { // CHECK-LABEL: func @gpu_sync() - builtin.func @gpu_sync() { + func.func @gpu_sync() { // CHECK: rocdl.barrier gpu.barrier func.return @@ -88,7 +88,7 @@ // CHECK: llvm.func @__ocml_fabs_f32(f32) -> f32 // CHECK: llvm.func @__ocml_fabs_f64(f64) -> f64 // CHECK-LABEL: func @gpu_fabs - builtin.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.abs %arg_f32 : f32 // CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) : (f32) -> f32 %result64 = math.abs %arg_f64 : f64 @@ -103,7 +103,7 @@ // CHECK: llvm.func @__ocml_ceil_f32(f32) -> f32 // CHECK: llvm.func @__ocml_ceil_f64(f64) -> f64 // CHECK-LABEL: func @gpu_ceil - builtin.func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.ceil %arg_f32 : f32 // CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) : (f32) -> f32 %result64 = math.ceil %arg_f64 : f64 @@ -118,7 +118,7 @@ // CHECK: llvm.func @__ocml_floor_f32(f32) -> f32 // CHECK: llvm.func @__ocml_floor_f64(f64) -> f64 // CHECK-LABEL: func @gpu_floor - builtin.func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.floor %arg_f32 : f32 // CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) : (f32) -> f32 %result64 = math.floor %arg_f64 : f64 @@ -133,7 +133,7 @@ // CHECK: llvm.func @__ocml_cos_f32(f32) -> f32 // CHECK: llvm.func @__ocml_cos_f64(f64) -> f64 // CHECK-LABEL: func @gpu_cos - builtin.func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.cos %arg_f32 : f32 // CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) : (f32) -> f32 %result64 = math.cos %arg_f64 : f64 @@ -148,7 +148,7 @@ // CHECK: llvm.func @__ocml_exp_f32(f32) -> f32 // CHECK: llvm.func @__ocml_exp_f64(f64) -> f64 // CHECK-LABEL: func @gpu_exp - builtin.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %exp_f32 = math.exp %arg_f32 : f32 // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32 %result32 = math.exp %exp_f32 : f32 @@ -165,7 +165,7 @@ // CHECK: llvm.func @__ocml_exp2_f32(f32) -> f32 // CHECK: llvm.func @__ocml_exp2_f64(f64) -> f64 // CHECK-LABEL: func @gpu_exp2 - builtin.func @gpu_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_exp2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %exp2_f32 = math.exp2 %arg_f32 : f32 // CHECK: llvm.call @__ocml_exp2_f32(%{{.*}}) : (f32) -> f32 %result32 = math.exp2 %exp2_f32 : f32 @@ -185,7 +185,7 @@ // CHECK: llvm.func @__ocml_exp_f32(f32) -> f32 // CHECK: llvm.func @__ocml_exp_f64(f64) -> f64 // CHECK-LABEL: func @gpu_exp - builtin.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %exp_f32 = math.exp %arg_f32 : f32 // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32 %result32 = math.exp %exp_f32 : f32 @@ -204,7 +204,7 @@ // CHECK: llvm.func @__ocml_expm1_f32(f32) -> f32 // CHECK: llvm.func @__ocml_expm1_f64(f64) -> f64 // CHECK-LABEL: func @gpu_expm1 - builtin.func @gpu_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_expm1(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %expm1_f32 = math.expm1 %arg_f32 : f32 // CHECK: llvm.call @__ocml_expm1_f32(%{{.*}}) : (f32) -> f32 %result32 = math.expm1 %expm1_f32 : f32 @@ -221,7 +221,7 @@ // CHECK: llvm.func @__ocml_log_f32(f32) -> f32 // CHECK: llvm.func @__ocml_log_f64(f64) -> f64 // CHECK-LABEL: func @gpu_log - builtin.func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log %arg_f32 : f32 // CHECK: llvm.call @__ocml_log_f32(%{{.*}}) : (f32) -> f32 %result64 = math.log %arg_f64 : f64 @@ -236,7 +236,7 @@ // CHECK: llvm.func @__ocml_log1p_f32(f32) -> f32 // CHECK: llvm.func @__ocml_log1p_f64(f64) -> f64 // CHECK-LABEL: func @gpu_log1p - builtin.func @gpu_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log1p(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log1p %arg_f32 : f32 // CHECK: llvm.call @__ocml_log1p_f32(%{{.*}}) : (f32) -> f32 %result64 = math.log1p %arg_f64 : f64 @@ -251,7 +251,7 @@ // CHECK: llvm.func @__ocml_log10_f32(f32) -> f32 // CHECK: llvm.func @__ocml_log10_f64(f64) -> f64 // CHECK-LABEL: func @gpu_log10 - builtin.func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log10 %arg_f32 : f32 // CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (f32) -> f32 %result64 = math.log10 %arg_f64 : f64 @@ -266,7 +266,7 @@ // CHECK: llvm.func @__ocml_log2_f32(f32) -> f32 // CHECK: llvm.func @__ocml_log2_f64(f64) -> f64 // CHECK-LABEL: func @gpu_log2 - builtin.func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.log2 %arg_f32 : f32 // CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) : (f32) -> f32 %result64 = math.log2 %arg_f64 : f64 @@ -281,7 +281,7 @@ // CHECK: llvm.func @__ocml_rsqrt_f32(f32) -> f32 // CHECK: llvm.func @__ocml_rsqrt_f64(f64) -> f64 // CHECK-LABEL: func @gpu_rsqrt - builtin.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + func.func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = math.rsqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : f16 to f32 @@ -301,7 +301,7 @@ // CHECK: llvm.func @__ocml_sqrt_f32(f32) -> f32 // CHECK: llvm.func @__ocml_sqrt_f64(f64) -> f64 // CHECK-LABEL: func @gpu_sqrt - builtin.func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + func.func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { %result16 = math.sqrt %arg_f16 : f16 // CHECK: llvm.fpext %{{.*}} : f16 to f32 @@ -321,7 +321,7 @@ // CHECK: llvm.func @__ocml_tanh_f32(f32) -> f32 // CHECK: llvm.func @__ocml_tanh_f64(f64) -> f64 // CHECK-LABEL: func @gpu_tanh - builtin.func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.tanh %arg_f32 : f32 // CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (f32) -> f32 %result64 = math.tanh %arg_f64 : f64 @@ -336,7 +336,7 @@ // CHECK: llvm.func @__ocml_atan_f32(f32) -> f32 // CHECK: llvm.func @__ocml_atan_f64(f64) -> f64 // CHECK-LABEL: func @gpu_atan - builtin.func @gpu_atan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_atan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.atan %arg_f32 : f32 // CHECK: llvm.call @__ocml_atan_f32(%{{.*}}) : (f32) -> f32 %result64 = math.atan %arg_f64 : f64 @@ -351,7 +351,7 @@ // CHECK: llvm.func @__ocml_atan2_f32(f32, f32) -> f32 // CHECK: llvm.func @__ocml_atan2_f64(f64, f64) -> f64 // CHECK-LABEL: func @gpu_atan2 - builtin.func @gpu_atan2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_atan2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.atan2 %arg_f32, %arg_f32 : f32 // CHECK: llvm.call @__ocml_atan2_f32(%{{.*}}) : (f32, f32) -> f32 %result64 = math.atan2 %arg_f64, %arg_f64 : f64 @@ -366,7 +366,7 @@ // CHECK: llvm.func @__ocml_pow_f32(f32, f32) -> f32 // CHECK: llvm.func @__ocml_pow_f64(f64, f64) -> f64 // CHECK-LABEL: func @gpu_pow - builtin.func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { + func.func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { %result32 = math.powf %arg_f32, %arg_f32 : f32 // CHECK: llvm.call @__ocml_pow_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32 %result64 = math.powf %arg_f64, %arg_f64 : f64 diff --git a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir --- a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir +++ b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(convert-math-to-llvm)" | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(convert-math-to-llvm)" | FileCheck %s // CHECK-LABEL: @ops func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) { diff --git a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir --- a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir +++ b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=0 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-THREADS %s -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=0})" %s | FileCheck --check-prefix=CHECK-BLOCKS %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=0 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-THREADS %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=0})" %s | FileCheck --check-prefix=CHECK-BLOCKS %s // CHECK-THREADS-LABEL: @one_d_loop // CHECK-BLOCKS-LABEL: @one_d_loop diff --git a/mlir/test/Conversion/SCFToGPU/step_one.mlir b/mlir/test/Conversion/SCFToGPU/step_one.mlir --- a/mlir/test/Conversion/SCFToGPU/step_one.mlir +++ b/mlir/test/Conversion/SCFToGPU/step_one.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-11 %s -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=2 gpu-thread-dims=2})" %s | FileCheck --check-prefix=CHECK-22 %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-11 %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=2 gpu-thread-dims=2})" %s | FileCheck --check-prefix=CHECK-22 %s // CHECK-11-LABEL: @step_1 // CHECK-22-LABEL: @step_1 diff --git a/mlir/test/Conversion/SCFToGPU/step_positive.mlir b/mlir/test/Conversion/SCFToGPU/step_positive.mlir --- a/mlir/test/Conversion/SCFToGPU/step_positive.mlir +++ b/mlir/test/Conversion/SCFToGPU/step_positive.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck %s // CHECK-LABEL: @step_var func @step_var(%A : memref, %B : memref) { diff --git a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir --- a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir +++ b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-shape-constraints)" <%s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-shape-constraints)" <%s | FileCheck %s // There's not very much useful to check here other than pasting the output. // CHECK-LABEL: func @cstr_broadcastable( diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg-named)" %s -verify-diagnostics -o -| FileCheck %s +// RUN: mlir-opt --split-input-file -pass-pipeline="func.func(tosa-to-linalg-named)" %s -verify-diagnostics -o -| FileCheck %s // CHECK-LABEL: @matmul func @matmul(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x6xf32>) -> (tensor<1x5x6xf32>) { diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg)" %s -verify-diagnostics -o -| FileCheck %s +// RUN: mlir-opt --split-input-file -pass-pipeline="func.func(tosa-to-linalg)" %s -verify-diagnostics -o -| FileCheck %s // CHECK: #[[$MAP0:.*]] = affine_map<() -> ()> diff --git a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir --- a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir +++ b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-gpu)" -canonicalize | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-gpu)" -canonicalize | FileCheck %s #map0 = affine_map<(d0, d1) -> (d1, d0)> #map1 = affine_map<(d0, d1, d2) -> (d0, d2)> diff --git a/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir b/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir --- a/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir +++ b/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -convert-vector-to-rocdl | FileCheck %s gpu.module @test_read{ -builtin.func @transfer_readx2(%A : memref, %base: index) -> vector<2xf32> { +func.func @transfer_readx2(%A : memref, %base: index) -> vector<2xf32> { %f0 = arith.constant 0.0: f32 %f = vector.transfer_read %A[%base], %f0 {permutation_map = affine_map<(d0) -> (d0)>} : @@ -11,7 +11,7 @@ // CHECK-LABEL: @transfer_readx2 // CHECK: rocdl.buffer.load {{.*}} vector<2xf32> -builtin.func @transfer_readx4(%A : memref, %base: index) -> vector<4xf32> { +func.func @transfer_readx4(%A : memref, %base: index) -> vector<4xf32> { %f0 = arith.constant 0.0: f32 %f = vector.transfer_read %A[%base], %f0 {permutation_map = affine_map<(d0) -> (d0)>} : @@ -21,7 +21,7 @@ // CHECK-LABEL: @transfer_readx4 // CHECK: rocdl.buffer.load {{.*}} vector<4xf32> -builtin.func @transfer_read_dwordConfig(%A : memref, %base: index) -> vector<4xf32> { +func.func @transfer_read_dwordConfig(%A : memref, %base: index) -> vector<4xf32> { %f0 = arith.constant 0.0: f32 %f = vector.transfer_read %A[%base], %f0 {permutation_map = affine_map<(d0) -> (d0)>} : @@ -36,7 +36,7 @@ } gpu.module @test_write{ -builtin.func @transfer_writex2(%A : memref, %B : vector<2xf32>, %base: index) { +func.func @transfer_writex2(%A : memref, %B : vector<2xf32>, %base: index) { vector.transfer_write %B, %A[%base] {permutation_map = affine_map<(d0) -> (d0)>} : vector<2xf32>, memref @@ -45,7 +45,7 @@ // CHECK-LABEL: @transfer_writex2 // CHECK: rocdl.buffer.store {{.*}} vector<2xf32> -builtin.func @transfer_writex4(%A : memref, %B : vector<4xf32>, %base: index) { +func.func @transfer_writex4(%A : memref, %B : vector<4xf32>, %base: index) { vector.transfer_write %B, %A[%base] {permutation_map = affine_map<(d0) -> (d0)>} : vector<4xf32>, memref @@ -54,7 +54,7 @@ // CHECK-LABEL: @transfer_writex4 // CHECK: rocdl.buffer.store {{.*}} vector<4xf32> -builtin.func @transfer_write_dwordConfig(%A : memref, %B : vector<2xf32>, %base: index) { +func.func @transfer_write_dwordConfig(%A : memref, %B : vector<2xf32>, %base: index) { vector.transfer_write %B, %A[%base] {permutation_map = affine_map<(d0) -> (d0)>} : vector<2xf32>, memref diff --git a/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir b/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir --- a/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir +++ b/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s // CHECK-LABEL: func @transfer_read_2d( // CHECK: %[[ALLOC:.*]] = memref.alloca() : memref> diff --git a/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir b/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir --- a/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir +++ b/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s // CHECK-LABEL: func @transfer_read_2d( // CHECK: %[[V_INIT:.*]] = arith.constant dense<-4.200000e+01> : vector<4x9xf32> diff --git a/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir --- a/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir +++ b/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s // CHECK-LABEL: func @transfer_read_inbounds func @transfer_read_inbounds(%A : memref) -> (vector<2x3x4xf32>) { diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir --- a/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir +++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true})" -split-input-file | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true})" -split-input-file | FileCheck %s // Ensure that the permutation map is lowered (by inserting a transpose op) // before lowering the vector.transfer_read. diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir --- a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir +++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf)" -split-input-file -allow-unregistered-dialect | FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf)" -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL // CHECK-LABEL: func @vector_transfer_ops_0d( func @vector_transfer_ops_0d(%M: memref) { diff --git a/mlir/test/Dialect/Affine/canonicalize.mlir b/mlir/test/Dialect/Affine/canonicalize.mlir --- a/mlir/test/Dialect/Affine/canonicalize.mlir +++ b/mlir/test/Dialect/Affine/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s // ----- diff --git a/mlir/test/Dialect/Affine/loop-unswitch.mlir b/mlir/test/Dialect/Affine/loop-unswitch.mlir --- a/mlir/test/Dialect/Affine/loop-unswitch.mlir +++ b/mlir/test/Dialect/Affine/loop-unswitch.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(test-affine-loop-unswitch)" | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(test-affine-loop-unswitch)" | FileCheck %s // CHECK-DAG: #[[$SET:.*]] = affine_set<(d0) : (d0 - 2 >= 0)> diff --git a/mlir/test/Dialect/Affine/memref-stride-calculation.mlir b/mlir/test/Dialect/Affine/memref-stride-calculation.mlir --- a/mlir/test/Dialect/Affine/memref-stride-calculation.mlir +++ b/mlir/test/Dialect/Affine/memref-stride-calculation.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-memref-stride-calculation)" -o /dev/null | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(test-memref-stride-calculation)" -o /dev/null | FileCheck %s func @f(%0: index) { // CHECK-LABEL: Testing: f diff --git a/mlir/test/Dialect/ControlFlow/canonicalize.mlir b/mlir/test/Dialect/ControlFlow/canonicalize.mlir --- a/mlir/test/Dialect/ControlFlow/canonicalize.mlir +++ b/mlir/test/Dialect/ControlFlow/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck --dump-input-context 20 %s +// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck --dump-input-context 20 %s /// Test the folding of BranchOp. diff --git a/mlir/test/Dialect/LLVMIR/terminator.mlir b/mlir/test/Dialect/LLVMIR/terminator.mlir --- a/mlir/test/Dialect/LLVMIR/terminator.mlir +++ b/mlir/test/Dialect/LLVMIR/terminator.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline='builtin.func(canonicalize)' %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline='func.func(canonicalize)' %s | FileCheck %s // verify that terminators survive the canonicalizer // CHECK-LABEL: @return diff --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir --- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir +++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir @@ -784,7 +784,7 @@ // ----- -builtin.func @matmul_on_tensors( +func.func @matmul_on_tensors( %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) @@ -822,7 +822,7 @@ // ----- -builtin.func @matmul_on_tensors( +func.func @matmul_on_tensors( %arg0: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg1: tensor<518x518xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = false}, %arg2: tensor<256x256xf32> {linalg.buffer_layout = affine_map<(d0, d1) -> (d0, d1)>, linalg.inplaceable = true}) diff --git a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir --- a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir +++ b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-elementwise-to-linalg)" -split-input-file %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-elementwise-to-linalg)" -split-input-file %s | FileCheck %s // In-depth checking of the linalg.generic op for a very trivial case. // CHECK: #[[$MAP:.*]] = affine_map<() -> ()> diff --git a/mlir/test/Dialect/Linalg/detensorize_0d.mlir b/mlir/test/Dialect/Linalg/detensorize_0d.mlir --- a/mlir/test/Dialect/Linalg/detensorize_0d.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_0d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s +// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s #map = affine_map<() -> ()> diff --git a/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir b/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir --- a/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_br_operands.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s +// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s // TODO: Detensoring breaks if %arg0 or %arg1 are passed directly as tensors. Fix that. func @if_true_test(%arg0: i1, %arg1: i32) -> tensor attributes {} { diff --git a/mlir/test/Dialect/Linalg/detensorize_if.mlir b/mlir/test/Dialect/Linalg/detensorize_if.mlir --- a/mlir/test/Dialect/Linalg/detensorize_if.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_if.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s +// RUN: mlir-opt %s -split-input-file -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s #map0 = affine_map<() -> ()> diff --git a/mlir/test/Dialect/Linalg/detensorize_trivial.mlir b/mlir/test/Dialect/Linalg/detensorize_trivial.mlir --- a/mlir/test/Dialect/Linalg/detensorize_trivial.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_trivial.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL -// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF +// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL +// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF #map0 = affine_map<() -> ()> diff --git a/mlir/test/Dialect/Linalg/detensorize_while.mlir b/mlir/test/Dialect/Linalg/detensorize_while.mlir --- a/mlir/test/Dialect/Linalg/detensorize_while.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_while.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL -// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF +// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL +// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF #map0 = affine_map<() -> ()> diff --git a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir --- a/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_while_impure_cf.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL -// RUN: mlir-opt %s -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF +// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize{aggressive-mode})" | FileCheck %s -check-prefix=DET-ALL +// RUN: mlir-opt %s -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s -check-prefix=DET-CF #map0 = affine_map<() -> ()> #map1 = affine_map<(i) -> ()> diff --git a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir --- a/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir +++ b/mlir/test/Dialect/Linalg/detensorize_while_pure_cf.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="builtin.func(linalg-detensorize)" | FileCheck %s +// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline="func.func(linalg-detensorize)" | FileCheck %s #map0 = affine_map<() -> ()> diff --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir --- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir +++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(linalg-fold-unit-extent-dims)" | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(linalg-fold-unit-extent-dims)" | FileCheck %s #accesses = [ affine_map<(i, j, k, l, m) -> (i, k, m)>, diff --git a/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir b/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir --- a/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir +++ b/mlir/test/Dialect/Linalg/fold-unit-trip-loops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(linalg-fold-unit-extent-dims{fold-one-trip-loops-only})" | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline="func.func(linalg-fold-unit-extent-dims{fold-one-trip-loops-only})" | FileCheck %s #accesses = [ affine_map<(i, j, k, l, m) -> (i, k, m)>, diff --git a/mlir/test/Dialect/Linalg/fusion-sequence.mlir b/mlir/test/Dialect/Linalg/fusion-sequence.mlir --- a/mlir/test/Dialect/Linalg/fusion-sequence.mlir +++ b/mlir/test/Dialect/Linalg/fusion-sequence.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(test-linalg-tile-and-fuse{tile-sizes=16,32,64}),resolve-shaped-type-result-dims,canonicalize,cse" -split-input-file %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(test-linalg-tile-and-fuse{tile-sizes=16,32,64}),resolve-shaped-type-result-dims,canonicalize,cse" -split-input-file %s | FileCheck %s module { func @three_op_fusion(%arg0: memref, %arg1: memref, diff --git a/mlir/test/Dialect/Linalg/tile-and-fuse-no-fuse.mlir b/mlir/test/Dialect/Linalg/tile-and-fuse-no-fuse.mlir --- a/mlir/test/Dialect/Linalg/tile-and-fuse-no-fuse.mlir +++ b/mlir/test/Dialect/Linalg/tile-and-fuse-no-fuse.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-op=linalg.matmul fuse tile-sizes=0,0,0 run-enable-pass=false" -split-input-file | FileCheck %s -builtin.func @no_fuse_gemm(%arg0 : tensor, %arg1 : tensor) -> tensor { +func.func @no_fuse_gemm(%arg0 : tensor, %arg1 : tensor) -> tensor { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %cst = arith.constant 0.0 : f32 diff --git a/mlir/test/Dialect/Linalg/tile-and-fuse-on-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-fuse-on-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-and-fuse-on-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-and-fuse-on-tensors.mlir @@ -8,7 +8,7 @@ // MATMUL: fuse_input // MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<24x12xf32> -builtin.func @fuse_input(%arg0: tensor<24x12xf32>, +func.func @fuse_input(%arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>) -> tensor<24x25xf32> { %c0 = arith.constant 0 : index @@ -44,7 +44,7 @@ // MATMUL: fuse_output // MATMUL-SAME: %[[ARG2:[0-9a-zA-Z]*]]: tensor<24x25xf32> -builtin.func @fuse_output(%arg0: tensor<24x12xf32>, +func.func @fuse_output(%arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>) -> tensor<24x25xf32> { // MATMUL-DAG: %[[C0:.*]] = arith.constant 0 : index @@ -96,7 +96,7 @@ // MATMUL: fuse_reduction // MATMUL-SAME: %[[ARG1:[0-9a-zA-Z]*]]: tensor<12x25xf32> // MATMUL-SAME: %[[ARG3:[0-9a-zA-Z]*]]: tensor<12x7x25xf32> -builtin.func @fuse_reduction(%arg0: tensor<24x12xf32>, +func.func @fuse_reduction(%arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>, %arg3: tensor<12x7x25xf32>) -> tensor<24x25xf32> { @@ -140,7 +140,7 @@ // MATMUL: fuse_transposed // MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<24x12xf32> // MATMUL-SAME: %[[ARG3:[0-9a-zA-Z]*]]: tensor<12x24xf32> -builtin.func @fuse_transposed(%arg0: tensor<24x12xf32>, +func.func @fuse_transposed(%arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>, %arg3: tensor<12x24xf32>) -> tensor<24x25xf32> { @@ -175,7 +175,7 @@ // MATMUL: fuse_input_and_output // MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<24x12xf32> // MATMUL-SAME: %[[ARG2:[0-9a-zA-Z]*]]: tensor<24x25xf32> -builtin.func @fuse_input_and_output(%arg0: tensor<24x12xf32>, +func.func @fuse_input_and_output(%arg0: tensor<24x12xf32>, %arg1: tensor<12x25xf32>, %arg2: tensor<24x25xf32>) -> tensor<24x25xf32> { %c0 = arith.constant 0 : index @@ -210,7 +210,7 @@ // MATMUL: fuse_indexed // MATMUL-SAME: %[[ARG1:[0-9a-zA-Z]*]]: tensor<12x25xi32> -builtin.func @fuse_indexed(%arg0: tensor<24x12xi32>, +func.func @fuse_indexed(%arg0: tensor<24x12xi32>, %arg1: tensor<12x25xi32>, %arg2: tensor<24x25xi32>) -> tensor<24x25xi32> { %c0 = arith.constant 0 : index diff --git a/mlir/test/Dialect/Linalg/tile-and-fuse-sequence-on-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-fuse-sequence-on-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-and-fuse-sequence-on-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-and-fuse-sequence-on-tensors.mlir @@ -7,7 +7,7 @@ // CONV-SAME: %[[ARG2:[0-9a-zA-Z]*]]: tensor<10x10xf32> // CONV-SAME: %[[ARG3:[0-9a-zA-Z]*]]: tensor<9x9xf32> // CONV-SAME: %[[ARG4:[0-9a-zA-Z]*]]: tensor<8x8xf32> -builtin.func @fuse_conv_chain(%arg0: tensor<2x2xf32>, +func.func @fuse_conv_chain(%arg0: tensor<2x2xf32>, %arg1: tensor<11x11xf32>, %arg2: tensor<10x10xf32>, %arg3: tensor<9x9xf32>, @@ -52,7 +52,7 @@ // MATMUL: fuse_matmul_chain // MATMUL-SAME: %[[ARG0:[0-9a-zA-Z]*]]: tensor<8x8xf32> -builtin.func @fuse_matmul_chain(%arg0: tensor<8x8xf32>) -> tensor<8x8xf32> { +func.func @fuse_matmul_chain(%arg0: tensor<8x8xf32>) -> tensor<8x8xf32> { %c0 = arith.constant 0 : index %c12 = arith.constant 12 : index %c25 = arith.constant 25 : index diff --git a/mlir/test/Dialect/Quant/canonicalize.mlir b/mlir/test/Dialect/Quant/canonicalize.mlir --- a/mlir/test/Dialect/Quant/canonicalize.mlir +++ b/mlir/test/Dialect/Quant/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s // ----- // CHECK-LABEL: redundant_scast diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir --- a/mlir/test/Dialect/SCF/canonicalize.mlir +++ b/mlir/test/Dialect/SCF/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck %s // ----- diff --git a/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir b/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir --- a/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir +++ b/mlir/test/Dialect/SCF/for-loop-to-while-loop.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-for-to-while)' -split-input-file | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(scf-for-to-while)' -split-input-file | FileCheck %s // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // CHECK-LABEL: func @single_loop( diff --git a/mlir/test/Dialect/SCF/loop-range.mlir b/mlir/test/Dialect/SCF/loop-range.mlir --- a/mlir/test/Dialect/SCF/loop-range.mlir +++ b/mlir/test/Dialect/SCF/loop-range.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-for-loop-range-folding)' -split-input-file | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(scf-for-loop-range-folding)' -split-input-file | FileCheck %s func @fold_one_loop(%arg0: memref, %arg1: index, %arg2: index) { %c0 = arith.constant 0 : index diff --git a/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir b/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir --- a/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir +++ b/mlir/test/Dialect/SCF/parallel-loop-fusion.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(scf-parallel-loop-fusion)' -split-input-file | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(scf-parallel-loop-fusion)' -split-input-file | FileCheck %s func @fuse_empty_loops() { %c2 = arith.constant 2 : index diff --git a/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir b/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir --- a/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir +++ b/mlir/test/Dialect/SCF/parallel-loop-tiling-inbound-check.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4 no-min-max-bounds=true})' -split-input-file | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4 no-min-max-bounds=true})' -split-input-file | FileCheck %s func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, diff --git a/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir b/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir --- a/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir +++ b/mlir/test/Dialect/SCF/parallel-loop-tiling.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4})' -split-input-file | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(scf-parallel-loop-tiling{parallel-loop-tile-sizes=1,4})' -split-input-file | FileCheck %s func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, diff --git a/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir b/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir --- a/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir +++ b/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s //===----------------------------------------------------------------------===// // spv.AccessChain diff --git a/mlir/test/Dialect/Shape/invalid.mlir b/mlir/test/Dialect/Shape/invalid.mlir --- a/mlir/test/Dialect/Shape/invalid.mlir +++ b/mlir/test/Dialect/Shape/invalid.mlir @@ -172,7 +172,7 @@ shape.function_library @shape_lib { // Test shape function that returns the shape of input arg as result shape. - builtin.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { + func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { %0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape return %0 : !shape.shape } @@ -192,7 +192,7 @@ shape.function_library @shape_lib { // Test shape function that returns the shape of input arg as result shape. - builtin.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { + func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { %0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape return %0 : !shape.shape } @@ -212,7 +212,7 @@ shape.function_library @shape_lib { // Test shape function that returns the shape of input arg as result shape. - builtin.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { + func.func @same_result_shape(%arg: !shape.value_shape) -> !shape.shape { %0 = shape.shape_of %arg : !shape.value_shape -> !shape.shape return %0 : !shape.shape } diff --git a/mlir/test/Dialect/Tensor/invalid.mlir b/mlir/test/Dialect/Tensor/invalid.mlir --- a/mlir/test/Dialect/Tensor/invalid.mlir +++ b/mlir/test/Dialect/Tensor/invalid.mlir @@ -91,7 +91,7 @@ func @tensor.generate(%m : index, %n : index) -> tensor { - // expected-error @+4 {{'func.return' op expects parent op 'builtin.func'}} + // expected-error @+4 {{'func.return' op expects parent op 'func.func'}} %tnsr = tensor.generate %m, %n { ^bb0(%i : index, %j : index, %k : index): %elem = arith.constant 8.0 : f32 diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir --- a/mlir/test/Dialect/Vector/canonicalize.mlir +++ b/mlir/test/Dialect/Vector/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' -split-input-file -allow-unregistered-dialect | FileCheck %s // ----- diff --git a/mlir/test/IR/diagnostic-handler-filter.mlir b/mlir/test/IR/diagnostic-handler-filter.mlir --- a/mlir/test/IR/diagnostic-handler-filter.mlir +++ b/mlir/test/IR/diagnostic-handler-filter.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-diagnostic-filter{filters=mysource1})" -split-input-file -o - 2>&1 | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(test-diagnostic-filter{filters=mysource1})" -split-input-file -o - 2>&1 | FileCheck %s // This test verifies that diagnostic handler can emit the call stack successfully. // CHECK-LABEL: Test 'test1' diff --git a/mlir/test/IR/generic-visitors-interrupt.mlir b/mlir/test/IR/generic-visitors-interrupt.mlir --- a/mlir/test/IR/generic-visitors-interrupt.mlir +++ b/mlir/test/IR/generic-visitors-interrupt.mlir @@ -8,7 +8,7 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 walk was interrupted // ----- @@ -21,7 +21,7 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'foo' before all regions // CHECK: step 3 op 'bar' before all regions // CHECK: step 4 walk was interrupted @@ -40,7 +40,7 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'foo' before all regions // CHECK: step 3 op 'bar0' before all regions // CHECK: step 4 walk was interrupted @@ -59,7 +59,7 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'foo' before all regions // CHECK: step 3 op 'test.two_region_op' before all regions // CHECK: step 4 op 'work' before all regions @@ -83,7 +83,7 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'foo' before all regions // CHECK: step 3 op 'test.two_region_op' before all regions // CHECK: step 4 op 'work' before all regions @@ -106,10 +106,10 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'arith.addf' before all regions // CHECK: step 3 op 'func.return' before all regions -// CHECK: step 4 op 'builtin.func' after all regions +// CHECK: step 4 op 'func.func' after all regions // CHECK: step 5 op 'builtin.module' after all regions // ----- @@ -125,14 +125,14 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'foo' before all regions // CHECK: step 3 op 'bar0' before all regions // CHECK: step 4 op 'foo' before region #1 // CHECK: step 5 op 'bar1' before all regions // CHECK: step 6 op 'arith.addf' before all regions // CHECK: step 7 op 'func.return' before all regions -// CHECK: step 8 op 'builtin.func' after all regions +// CHECK: step 8 op 'func.func' after all regions // CHECK: step 9 op 'builtin.module' after all regions // ----- @@ -148,10 +148,10 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'foo' before all regions // CHECK: step 3 op 'bar0' before all regions // CHECK: step 4 op 'arith.addf' before all regions // CHECK: step 5 op 'func.return' before all regions -// CHECK: step 6 op 'builtin.func' after all regions +// CHECK: step 6 op 'func.func' after all regions // CHECK: step 7 op 'builtin.module' after all regions diff --git a/mlir/test/IR/generic-visitors.mlir b/mlir/test/IR/generic-visitors.mlir --- a/mlir/test/IR/generic-visitors.mlir +++ b/mlir/test/IR/generic-visitors.mlir @@ -20,7 +20,7 @@ } // CHECK: step 0 op 'builtin.module' before all regions -// CHECK: step 1 op 'builtin.func' before all regions +// CHECK: step 1 op 'func.func' before all regions // CHECK: step 2 op 'arith.constant' before all regions // CHECK: step 3 op 'arith.constant' before all regions // CHECK: step 4 op 'arith.constant' before all regions @@ -37,7 +37,7 @@ // CHECK: step 15 op 'scf.yield' before all regions // CHECK: step 16 op 'scf.for' after all regions // CHECK: step 17 op 'func.return' before all regions -// CHECK: step 18 op 'builtin.func' after all regions +// CHECK: step 18 op 'func.func' after all regions // CHECK: step 19 op 'builtin.module' after all regions // ----- diff --git a/mlir/test/IR/invalid-func-op.mlir b/mlir/test/IR/invalid-func-op.mlir --- a/mlir/test/IR/invalid-func-op.mlir +++ b/mlir/test/IR/invalid-func-op.mlir @@ -4,7 +4,7 @@ func @func_op() { // expected-error@+1 {{expected valid '@'-identifier for symbol name}} - builtin.func missingsigil() -> (i1, index, f32) + func.func missingsigil() -> (i1, index, f32) return } @@ -12,7 +12,7 @@ func @func_op() { // expected-error@+1 {{expected type instead of SSA identifier}} - builtin.func @mixed_named_arguments(f32, %a : i32) { + func.func @mixed_named_arguments(f32, %a : i32) { return } return @@ -22,7 +22,7 @@ func @func_op() { // expected-error@+1 {{expected SSA identifier}} - builtin.func @mixed_named_arguments(%a : i32, f32) -> () { + func.func @mixed_named_arguments(%a : i32, f32) -> () { return } return @@ -32,7 +32,7 @@ func @func_op() { // expected-error@+1 {{entry block must have 1 arguments to match function signature}} - builtin.func @mixed_named_arguments(f32) { + func.func @mixed_named_arguments(f32) { ^entry: return } @@ -43,7 +43,7 @@ func @func_op() { // expected-error@+1 {{type of entry block argument #0('i32') must match the type of the corresponding argument in function signature('f32')}} - builtin.func @mixed_named_arguments(f32) { + func.func @mixed_named_arguments(f32) { ^entry(%arg : i32): return } diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir --- a/mlir/test/IR/invalid-ops.mlir +++ b/mlir/test/IR/invalid-ops.mlir @@ -98,7 +98,7 @@ func @return_not_in_function() { "foo.region"() ({ - // expected-error@+1 {{'func.return' op expects parent op 'builtin.func'}} + // expected-error@+1 {{'func.return' op expects parent op 'func.func'}} return }): () -> () return diff --git a/mlir/test/IR/invalid.mlir b/mlir/test/IR/invalid.mlir --- a/mlir/test/IR/invalid.mlir +++ b/mlir/test/IR/invalid.mlir @@ -542,7 +542,7 @@ func @return_inside_loop() { affine.for %i = 1 to 100 { - // expected-error@+1 {{'func.return' op expects parent op 'builtin.func'}} + // expected-error@+1 {{'func.return' op expects parent op 'func.func'}} return } return diff --git a/mlir/test/IR/print-ir-invalid.mlir b/mlir/test/IR/print-ir-invalid.mlir --- a/mlir/test/IR/print-ir-invalid.mlir +++ b/mlir/test/IR/print-ir-invalid.mlir @@ -8,7 +8,7 @@ // The operation is invalid because the body does not have a terminator, print // the generic form. // CHECK: Invalid operation: -// CHECK-NEXT: "builtin.func"() ({ +// CHECK-NEXT: "func.func"() ({ // CHECK-NEXT: ^bb0: // CHECK-NEXT: }) // CHECK-SAME: sym_name = "test" diff --git a/mlir/test/IR/test-matchers.mlir b/mlir/test/IR/test-matchers.mlir --- a/mlir/test/IR/test-matchers.mlir +++ b/mlir/test/IR/test-matchers.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline="builtin.func(test-matchers)" -o /dev/null 2>&1 | FileCheck %s +// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline="func.func(test-matchers)" -o /dev/null 2>&1 | FileCheck %s func @test1(%a: f32, %b: f32, %c: f32) { %0 = arith.addf %a, %b: f32 diff --git a/mlir/test/IR/traits.mlir b/mlir/test/IR/traits.mlir --- a/mlir/test/IR/traits.mlir +++ b/mlir/test/IR/traits.mlir @@ -575,7 +575,7 @@ // checked for dominance func @illegalInsideDominanceFreeScope() -> () { test.graph_region { - builtin.func @test() -> i1 { + func.func @test() -> i1 { ^bb1: // expected-error @+1 {{operand #0 does not dominate this use}} %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) @@ -594,7 +594,7 @@ // checked for dominance func @illegalCDFGInsideDominanceFreeScope() -> () { test.graph_region { - builtin.func @test() -> i1 { + func.func @test() -> i1 { ^bb1: // expected-error @+1 {{operand #0 does not dominate this use}} %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) diff --git a/mlir/test/IR/visitors.mlir b/mlir/test/IR/visitors.mlir --- a/mlir/test/IR/visitors.mlir +++ b/mlir/test/IR/visitors.mlir @@ -23,7 +23,7 @@ // CHECK-LABEL: Op pre-order visit // CHECK: Visiting op 'builtin.module' -// CHECK: Visiting op 'builtin.func' +// CHECK: Visiting op 'func.func' // CHECK: Visiting op 'scf.for' // CHECK: Visiting op 'use0' // CHECK: Visiting op 'scf.if' @@ -34,14 +34,14 @@ // CHECK-LABEL: Block pre-order visits // CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module' -// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func' +// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func' // CHECK: Visiting block ^bb0 from region 0 from operation 'scf.for' // CHECK: Visiting block ^bb0 from region 0 from operation 'scf.if' // CHECK: Visiting block ^bb0 from region 1 from operation 'scf.if' // CHECK-LABEL: Region pre-order visits // CHECK: Visiting region 0 from operation 'builtin.module' -// CHECK: Visiting region 0 from operation 'builtin.func' +// CHECK: Visiting region 0 from operation 'func.func' // CHECK: Visiting region 0 from operation 'scf.for' // CHECK: Visiting region 0 from operation 'scf.if' // CHECK: Visiting region 1 from operation 'scf.if' @@ -54,21 +54,21 @@ // CHECK: Visiting op 'use3' // CHECK: Visiting op 'scf.for' // CHECK: Visiting op 'func.return' -// CHECK: Visiting op 'builtin.func' +// CHECK: Visiting op 'func.func' // CHECK: Visiting op 'builtin.module' // CHECK-LABEL: Block post-order visits // CHECK: Visiting block ^bb0 from region 0 from operation 'scf.if' // CHECK: Visiting block ^bb0 from region 1 from operation 'scf.if' // CHECK: Visiting block ^bb0 from region 0 from operation 'scf.for' -// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func' +// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func' // CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module' // CHECK-LABEL: Region post-order visits // CHECK: Visiting region 0 from operation 'scf.if' // CHECK: Visiting region 1 from operation 'scf.if' // CHECK: Visiting region 0 from operation 'scf.for' -// CHECK: Visiting region 0 from operation 'builtin.func' +// CHECK: Visiting region 0 from operation 'func.func' // CHECK: Visiting region 0 from operation 'builtin.module' // CHECK-LABEL: Op pre-order erasures @@ -100,14 +100,14 @@ // CHECK: Erasing op 'use3' // CHECK: Erasing op 'scf.for' // CHECK: Erasing op 'func.return' -// CHECK: Erasing op 'builtin.func' +// CHECK: Erasing op 'func.func' // CHECK: Erasing op 'builtin.module' // CHECK-LABEL: Block post-order erasures (no skip) // CHECK: Erasing block ^bb0 from region 0 from operation 'scf.if' // CHECK: Erasing block ^bb0 from region 1 from operation 'scf.if' // CHECK: Erasing block ^bb0 from region 0 from operation 'scf.for' -// CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.func' +// CHECK: Erasing block ^bb0 from region 0 from operation 'func.func' // CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.module' // ----- @@ -128,7 +128,7 @@ // CHECK-LABEL: Op pre-order visits // CHECK: Visiting op 'builtin.module' -// CHECK: Visiting op 'builtin.func' +// CHECK: Visiting op 'func.func' // CHECK: Visiting op 'regionOp0' // CHECK: Visiting op 'op0' // CHECK: Visiting op 'cf.br' @@ -139,14 +139,14 @@ // CHECK-LABEL: Block pre-order visits // CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module' -// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func' +// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func' // CHECK: Visiting block ^bb0 from region 0 from operation 'regionOp0' // CHECK: Visiting block ^bb1 from region 0 from operation 'regionOp0' // CHECK: Visiting block ^bb2 from region 0 from operation 'regionOp0' // CHECK-LABEL: Region pre-order visits // CHECK: Visiting region 0 from operation 'builtin.module' -// CHECK: Visiting region 0 from operation 'builtin.func' +// CHECK: Visiting region 0 from operation 'func.func' // CHECK: Visiting region 0 from operation 'regionOp0' // CHECK-LABEL: Op post-order visits @@ -157,19 +157,19 @@ // CHECK: Visiting op 'op2' // CHECK: Visiting op 'regionOp0' // CHECK: Visiting op 'func.return' -// CHECK: Visiting op 'builtin.func' +// CHECK: Visiting op 'func.func' // CHECK: Visiting op 'builtin.module' // CHECK-LABEL: Block post-order visits // CHECK: Visiting block ^bb0 from region 0 from operation 'regionOp0' // CHECK: Visiting block ^bb1 from region 0 from operation 'regionOp0' // CHECK: Visiting block ^bb2 from region 0 from operation 'regionOp0' -// CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.func' +// CHECK: Visiting block ^bb0 from region 0 from operation 'func.func' // CHECK: Visiting block ^bb0 from region 0 from operation 'builtin.module' // CHECK-LABEL: Region post-order visits // CHECK: Visiting region 0 from operation 'regionOp0' -// CHECK: Visiting region 0 from operation 'builtin.func' +// CHECK: Visiting region 0 from operation 'func.func' // CHECK: Visiting region 0 from operation 'builtin.module' // CHECK-LABEL: Op pre-order erasures (skip) @@ -208,5 +208,5 @@ // CHECK: Erasing block ^bb0 from region 0 from operation 'regionOp0' // CHECK: Erasing block ^bb0 from region 0 from operation 'regionOp0' // CHECK: Erasing block ^bb0 from region 0 from operation 'regionOp0' -// CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.func' +// CHECK: Erasing block ^bb0 from region 0 from operation 'func.func' // CHECK: Erasing block ^bb0 from region 0 from operation 'builtin.module' diff --git a/mlir/test/IR/wrapping_op.mlir b/mlir/test/IR/wrapping_op.mlir --- a/mlir/test/IR/wrapping_op.mlir +++ b/mlir/test/IR/wrapping_op.mlir @@ -2,7 +2,7 @@ // RUN: mlir-opt -allow-unregistered-dialect -mlir-print-op-generic -mlir-print-debuginfo -mlir-print-local-scope %s | FileCheck %s --check-prefix=CHECK-GENERIC // CHECK-LABEL: func @wrapping_op -// CHECK-GENERIC: "builtin.func" +// CHECK-GENERIC: "func.func" func @wrapping_op(%arg0 : i32, %arg1 : f32) -> (i3, i2, i1) { // CHECK: %0:3 = test.wrapping_region wraps "some.op"(%arg1, %arg0) {test.attr = "attr"} : (f32, i32) -> (i1, i2, i3) // CHECK-GENERIC: "test.wrapping_region"() ({ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir @@ -4,8 +4,8 @@ // RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.fill register-tile-sizes=4,32 vectorize" | \ // RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=memref.copy register-tile-sizes=4,32 vectorize" | \ -// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" | \ -// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt -pass-pipeline="func.func(canonicalize,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" | \ +// RUN: mlir-opt -pass-pipeline="func.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // Activate to dump assembly // R_UN: -dump-object-file -object-filename=/tmp/a.o \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir @@ -1,6 +1,6 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(canonicalize,cse),linalg-comprehensive-module-bufferize" |\ -// RUN: mlir-opt -pass-pipeline="builtin.func(buffer-deallocation,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" |\ -// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(canonicalize,cse),linalg-comprehensive-module-bufferize" |\ +// RUN: mlir-opt -pass-pipeline="func.func(buffer-deallocation,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" |\ +// RUN: mlir-opt -pass-pipeline="func.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext |\ diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py @@ -12,6 +12,7 @@ from mlir.dialects import sparse_tensor as st from mlir.dialects import builtin +from mlir.dialects import func from mlir.dialects.linalg.opdsl import lang as dsl _SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) @@ -44,7 +45,7 @@ arguments = [a, b, s, c] with ir.InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(*arguments) + @func.FuncOp.from_py_func(*arguments) def sddmm(*args): return sddmm_dsl(args[0], args[1], args[2], outs=[args[3]]) diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py @@ -12,6 +12,7 @@ from mlir.dialects import sparse_tensor as st from mlir.dialects import builtin +from mlir.dialects import func from mlir.dialects.linalg.opdsl import lang as dsl _SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) @@ -41,7 +42,7 @@ arguments = [a, b, c] with ir.InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(*arguments) + @func.FuncOp.from_py_func(*arguments) def spMxM(*args): return matmul_dsl(args[0], args[1], outs=[args[2]]) diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py @@ -110,7 +110,7 @@ # TODO: assert dense? assert element type is recognised by the TypeConverter? types.append(tp0) funcTp = ir.FunctionType.get(inputs=[tp0], results=[tp0]) - funcOp = builtin.FuncOp(name='main', type=funcTp) + funcOp = func.FuncOp(name='main', type=funcTp) funcOp.attributes['llvm.emit_c_interface'] = ir.UnitAttr.get() with ir.InsertionPoint(funcOp.add_entry_block()): arg0 = funcOp.entry_block.arguments[0] diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py @@ -670,7 +670,7 @@ # Build the kernel for the operations. with ir.InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(*input_types, name=_ENTRY_NAME) + @func.FuncOp.from_py_func(*input_types, name=_ENTRY_NAME) def linalg_funcop(*args): # Set up the mapping from the Access nodes to their MLIR values. for e, mlir in zip(input_accesses, args): diff --git a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir --- a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir +++ b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf,memref-expand,arith-expand),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf,memref-expand,arith-expand),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir @@ -1,9 +1,9 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir @@ -1,9 +1,9 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir --- a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir @@ -1,14 +1,14 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop,convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(test-vector-to-forloop,convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main \ // RUN: -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop)" | FileCheck %s -check-prefix=TRANSFORM +// RUN: mlir-opt %s -pass-pipeline="func.func(test-vector-to-forloop)" | FileCheck %s -check-prefix=TRANSFORM func private @print_memref_f32(memref<*xf32>) diff --git a/mlir/test/Pass/dynamic-pipeline.mlir b/mlir/test/Pass/dynamic-pipeline.mlir --- a/mlir/test/Pass/dynamic-pipeline.mlir +++ b/mlir/test/Pass/dynamic-pipeline.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1, dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD1-ONLY --check-prefix=CHECK -// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod2, dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD2 --check-prefix=MOD2-ONLY --check-prefix=CHECK -// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1,inner_mod2, dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK -// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{dynamic-pipeline=builtin.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK +// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1, dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD1-ONLY --check-prefix=CHECK +// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod2, dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD2 --check-prefix=MOD2-ONLY --check-prefix=CHECK +// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{op-name=inner_mod1,inner_mod2, dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK +// RUN: mlir-opt %s -pass-pipeline='builtin.module(test-dynamic-pipeline{dynamic-pipeline=func.func(cse,canonicalize)})' --mlir-disable-threading -print-ir-before-all 2>&1 | FileCheck %s --check-prefix=MOD1 --check-prefix=MOD2 --check-prefix=CHECK func @f() { diff --git a/mlir/test/Pass/interface-pass.mlir b/mlir/test/Pass/interface-pass.mlir --- a/mlir/test/Pass/interface-pass.mlir +++ b/mlir/test/Pass/interface-pass.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -verify-diagnostics -pass-pipeline='builtin.func(test-interface-pass)' -o /dev/null +// RUN: mlir-opt %s -verify-diagnostics -pass-pipeline='func.func(test-interface-pass)' -o /dev/null // Test that we run the interface pass on the function. diff --git a/mlir/test/Pass/ir-printing.mlir b/mlir/test/Pass/ir-printing.mlir --- a/mlir/test/Pass/ir-printing.mlir +++ b/mlir/test/Pass/ir-printing.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before=cse -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE %s -// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before-all -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_ALL %s -// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-after=cse -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER %s -// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-after-all -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL %s -// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before=cse -print-ir-module-scope -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_MODULE %s -// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,cse)' -print-ir-after-all -print-ir-after-change -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL_CHANGE %s -// RUN: not mlir-opt %s -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,test-pass-failure)' -print-ir-after-failure -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_FAILURE %s +// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before=cse -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before-all -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_ALL %s +// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-after=cse -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER %s +// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-after-all -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL %s +// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before=cse -print-ir-module-scope -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_MODULE %s +// RUN: mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,cse)' -print-ir-after-all -print-ir-after-change -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL_CHANGE %s +// RUN: not mlir-opt %s -mlir-disable-threading=true -pass-pipeline='func.func(cse,test-pass-failure)' -print-ir-after-failure -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_FAILURE %s func @foo() { %0 = arith.constant 0 : i32 @@ -49,10 +49,10 @@ // AFTER_ALL: // -----// IR Dump After{{.*}}Canonicalizer //----- // // AFTER_ALL-NEXT: func @bar() -// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('builtin.func' operation: @foo) //----- // +// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('func.func' operation: @foo) //----- // // BEFORE_MODULE: func @foo() // BEFORE_MODULE: func @bar() -// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('builtin.func' operation: @bar) //----- // +// BEFORE_MODULE: // -----// IR Dump Before{{.*}}CSE ('func.func' operation: @bar) //----- // // BEFORE_MODULE: func @foo() // BEFORE_MODULE: func @bar() diff --git a/mlir/test/Pass/pass-timing.mlir b/mlir/test/Pass/pass-timing.mlir --- a/mlir/test/Pass/pass-timing.mlir +++ b/mlir/test/Pass/pass-timing.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=LIST %s -// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=PIPELINE %s -// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=MT_LIST %s -// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='builtin.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=MT_PIPELINE %s +// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=LIST %s +// RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=PIPELINE %s +// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=list 2>&1 | FileCheck -check-prefix=MT_LIST %s +// RUN: mlir-opt %s -mlir-disable-threading=false -verify-each=true -pass-pipeline='func.func(cse,canonicalize,cse)' -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=MT_PIPELINE %s // RUN: mlir-opt %s -mlir-disable-threading=true -verify-each=false -test-pm-nested-pipeline -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck -check-prefix=NESTED_PIPELINE %s // LIST: Execution time report @@ -16,7 +16,7 @@ // PIPELINE: Total Execution Time: // PIPELINE: Name // PIPELINE-NEXT: Parser -// PIPELINE-NEXT: 'builtin.func' Pipeline +// PIPELINE-NEXT: 'func.func' Pipeline // PIPELINE-NEXT: CSE // PIPELINE-NEXT: (A) DominanceInfo // PIPELINE-NEXT: Canonicalizer @@ -38,7 +38,7 @@ // MT_PIPELINE: Total Execution Time: // MT_PIPELINE: Name // MT_PIPELINE-NEXT: Parser -// MT_PIPELINE-NEXT: 'builtin.func' Pipeline +// MT_PIPELINE-NEXT: 'func.func' Pipeline // MT_PIPELINE-NEXT: CSE // MT_PIPELINE-NEXT: (A) DominanceInfo // MT_PIPELINE-NEXT: Canonicalizer @@ -52,12 +52,12 @@ // NESTED_PIPELINE: Total Execution Time: // NESTED_PIPELINE: Name // NESTED_PIPELINE-NEXT: Parser -// NESTED_PIPELINE-NEXT: Pipeline Collection : ['builtin.func', 'builtin.module'] -// NESTED_PIPELINE-NEXT: 'builtin.func' Pipeline +// NESTED_PIPELINE-NEXT: Pipeline Collection : ['builtin.module', 'func.func'] +// NESTED_PIPELINE-NEXT: 'func.func' Pipeline // NESTED_PIPELINE-NEXT: TestFunctionPass // NESTED_PIPELINE-NEXT: 'builtin.module' Pipeline // NESTED_PIPELINE-NEXT: TestModulePass -// NESTED_PIPELINE-NEXT: 'builtin.func' Pipeline +// NESTED_PIPELINE-NEXT: 'func.func' Pipeline // NESTED_PIPELINE-NEXT: TestFunctionPass // NESTED_PIPELINE-NEXT: Output // NESTED_PIPELINE-NEXT: Rest diff --git a/mlir/test/Pass/pipeline-options-parsing.mlir b/mlir/test/Pass/pipeline-options-parsing.mlir --- a/mlir/test/Pass/pipeline-options-parsing.mlir +++ b/mlir/test/Pass/pipeline-options-parsing.mlir @@ -1,11 +1,11 @@ // RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass{)' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_1 %s // RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass{test-option=3})' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_2 %s -// RUN: not mlir-opt %s -pass-pipeline='builtin.module(builtin.func(test-options-pass{list=3}), test-module-pass{invalid-option=3})' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_3 %s +// RUN: not mlir-opt %s -pass-pipeline='builtin.module(func.func(test-options-pass{list=3}), test-module-pass{invalid-option=3})' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_3 %s // RUN: not mlir-opt %s -pass-pipeline='test-options-pass{list=3 list=notaninteger}' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_4 %s -// RUN: mlir-opt %s -pass-pipeline='builtin.func(test-options-pass{list=1,2,3,4 list=5 string=value1 string=value2})' -// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.func(test-options-pass{string-list=a list=1,2,3,4 string-list=b,c list=5 string-list=d string=nested_pipeline{arg1=10 arg2=" {} " arg3=true}})' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_1 %s +// RUN: mlir-opt %s -pass-pipeline='func.func(test-options-pass{list=1,2,3,4 list=5 string=value1 string=value2})' +// RUN: mlir-opt %s -verify-each=false -pass-pipeline='func.func(test-options-pass{string-list=a list=1,2,3,4 string-list=b,c list=5 string-list=d string=nested_pipeline{arg1=10 arg2=" {} " arg3=true}})' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_1 %s // RUN: mlir-opt %s -verify-each=false -test-options-pass-pipeline='list=1 string-list=a,b' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_2 %s -// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.module(builtin.func(test-options-pass{list=3}), builtin.func(test-options-pass{list=1,2,3,4}))' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_3 %s +// RUN: mlir-opt %s -verify-each=false -pass-pipeline='builtin.module(func.func(test-options-pass{list=3}), func.func(test-options-pass{list=1,2,3,4}))' -test-dump-pipeline 2>&1 | FileCheck --check-prefix=CHECK_3 %s // CHECK_ERROR_1: missing closing '}' while processing pass options // CHECK_ERROR_2: no such option test-option @@ -14,4 +14,4 @@ // CHECK_1: test-options-pass{list=1,2,3,4,5 string=nested_pipeline{arg1=10 arg2=" {} " arg3=true} string-list=a,b,c,d} // CHECK_2: test-options-pass{list=1 string= string-list=a,b} -// CHECK_3: builtin.module(builtin.func(test-options-pass{list=3 string= }), builtin.func(test-options-pass{list=1,2,3,4 string= })) +// CHECK_3: builtin.module(func.func(test-options-pass{list=3 string= }), func.func(test-options-pass{list=1,2,3,4 string= })) diff --git a/mlir/test/Pass/pipeline-parsing.mlir b/mlir/test/Pass/pipeline-parsing.mlir --- a/mlir/test/Pass/pipeline-parsing.mlir +++ b/mlir/test/Pass/pipeline-parsing.mlir @@ -1,16 +1,16 @@ -// RUN: mlir-opt %s -mlir-disable-threading -pass-pipeline='builtin.module(test-module-pass,builtin.func(test-function-pass)),builtin.func(test-function-pass)' -pass-pipeline="builtin.func(cse,canonicalize)" -verify-each=false -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck %s +// RUN: mlir-opt %s -mlir-disable-threading -pass-pipeline='builtin.module(test-module-pass,func.func(test-function-pass)),func.func(test-function-pass)' -pass-pipeline="func.func(cse,canonicalize)" -verify-each=false -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck %s // RUN: mlir-opt %s -mlir-disable-threading -test-textual-pm-nested-pipeline -verify-each=false -mlir-timing -mlir-timing-display=tree 2>&1 | FileCheck %s --check-prefix=TEXTUAL_CHECK // RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_1 %s // RUN: not mlir-opt %s -pass-pipeline='builtin.module(test-module-pass))' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_2 %s // RUN: not mlir-opt %s -pass-pipeline='builtin.module()(' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_3 %s // RUN: not mlir-opt %s -pass-pipeline=',' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_4 %s -// RUN: not mlir-opt %s -pass-pipeline='builtin.func(test-module-pass)' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_5 %s +// RUN: not mlir-opt %s -pass-pipeline='func.func(test-module-pass)' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_5 %s // CHECK_ERROR_1: encountered unbalanced parentheses while parsing pipeline // CHECK_ERROR_2: encountered extra closing ')' creating unbalanced parentheses while parsing pipeline // CHECK_ERROR_3: expected ',' after parsing pipeline // CHECK_ERROR_4: does not refer to a registered pass or pass pipeline -// CHECK_ERROR_5: Can't add pass '{{.*}}TestModulePass' restricted to 'builtin.module' on a PassManager intended to run on 'builtin.func', did you intend to nest? +// CHECK_ERROR_5: Can't add pass '{{.*}}TestModulePass' restricted to 'builtin.module' on a PassManager intended to run on 'func.func', did you intend to nest? func @foo() { return } @@ -21,21 +21,21 @@ } } -// CHECK: Pipeline Collection : ['builtin.func', 'builtin.module'] -// CHECK-NEXT: 'builtin.func' Pipeline +// CHECK: Pipeline Collection : ['builtin.module', 'func.func'] +// CHECK-NEXT: 'func.func' Pipeline // CHECK-NEXT: TestFunctionPass // CHECK-NEXT: CSE // CHECK-NEXT: DominanceInfo // CHECK-NEXT: Canonicalizer // CHECK-NEXT: 'builtin.module' Pipeline // CHECK-NEXT: TestModulePass -// CHECK-NEXT: 'builtin.func' Pipeline +// CHECK-NEXT: 'func.func' Pipeline // CHECK-NEXT: TestFunctionPass -// TEXTUAL_CHECK: Pipeline Collection : ['builtin.func', 'builtin.module'] -// TEXTUAL_CHECK-NEXT: 'builtin.func' Pipeline +// TEXTUAL_CHECK: Pipeline Collection : ['builtin.module', 'func.func'] +// TEXTUAL_CHECK-NEXT: 'func.func' Pipeline // TEXTUAL_CHECK-NEXT: TestFunctionPass // TEXTUAL_CHECK-NEXT: 'builtin.module' Pipeline // TEXTUAL_CHECK-NEXT: TestModulePass -// TEXTUAL_CHECK-NEXT: 'builtin.func' Pipeline +// TEXTUAL_CHECK-NEXT: 'func.func' Pipeline // TEXTUAL_CHECK-NEXT: TestFunctionPass diff --git a/mlir/test/Pass/pipeline-stats.mlir b/mlir/test/Pass/pipeline-stats.mlir --- a/mlir/test/Pass/pipeline-stats.mlir +++ b/mlir/test/Pass/pipeline-stats.mlir @@ -1,6 +1,6 @@ // REQUIRES: asserts -// RUN: mlir-opt %s -verify-each=true -pass-pipeline='builtin.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=list 2>&1 | FileCheck -check-prefix=LIST %s -// RUN: mlir-opt %s -verify-each=true -pass-pipeline='builtin.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=pipeline 2>&1 | FileCheck -check-prefix=PIPELINE %s +// RUN: mlir-opt %s -verify-each=true -pass-pipeline='func.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=list 2>&1 | FileCheck -check-prefix=LIST %s +// RUN: mlir-opt %s -verify-each=true -pass-pipeline='func.func(test-stats-pass,test-stats-pass)' -pass-statistics -pass-statistics-display=pipeline 2>&1 | FileCheck -check-prefix=PIPELINE %s // LIST: Pass statistics report // LIST: TestStatisticPass @@ -8,7 +8,7 @@ // LIST-NOT: Verifier // PIPELINE: Pass statistics report -// PIPELINE: 'builtin.func' Pipeline +// PIPELINE: 'func.func' Pipeline // PIPELINE-NEXT: TestStatisticPass // PIPELINE-NEXT: (S) {{0|4}} num-ops - Number of operations counted // PIPELINE-NEXT: TestStatisticPass diff --git a/mlir/test/Pass/run-reproducer.mlir b/mlir/test/Pass/run-reproducer.mlir --- a/mlir/test/Pass/run-reproducer.mlir +++ b/mlir/test/Pass/run-reproducer.mlir @@ -1,4 +1,4 @@ -// configuration: -mlir-disable-threading=true -pass-pipeline='builtin.func(cse,canonicalize)' -print-ir-before=cse +// configuration: -mlir-disable-threading=true -pass-pipeline='func.func(cse,canonicalize)' -print-ir-before=cse // Test of the reproducer run option. The first line has to be the // configuration (matching what is produced by reproducer). diff --git a/mlir/test/Target/Cpp/invalid.mlir b/mlir/test/Target/Cpp/invalid.mlir --- a/mlir/test/Target/Cpp/invalid.mlir +++ b/mlir/test/Target/Cpp/invalid.mlir @@ -1,6 +1,6 @@ // RUN: mlir-translate -split-input-file -mlir-to-cpp -verify-diagnostics %s -// expected-error@+1 {{'builtin.func' op with multiple blocks needs variables declared at top}} +// expected-error@+1 {{'func.func' op with multiple blocks needs variables declared at top}} func @multiple_blocks() { ^bb1: cf.br ^bb2 diff --git a/mlir/test/Target/LLVMIR/arm-neon-2d.mlir b/mlir/test/Target/LLVMIR/arm-neon-2d.mlir --- a/mlir/test/Target/LLVMIR/arm-neon-2d.mlir +++ b/mlir/test/Target/LLVMIR/arm-neon-2d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(arm-neon-2d-to-intr)" %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(arm-neon-2d-to-intr)" %s | FileCheck %s // CHECK-LABEL: arm_neon_sdot2d_4x4_i8i8 func @arm_neon_sdot2d_4x4_i8i8(%a: vector<4xi32>, %b: vector<4x4xi8>, %c: vector<4x4xi8>) -> vector<4xi32> { diff --git a/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir b/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir --- a/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir +++ b/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | mlir-translate -mlir-to-llvmir | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="convert-vector-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | mlir-translate -mlir-to-llvmir | FileCheck %s func @genbool_1d() -> vector<8xi1> { %0 = vector.constant_mask [4] : vector<8xi1> diff --git a/mlir/test/Transforms/canonicalize-block-merge.mlir b/mlir/test/Transforms/canonicalize-block-merge.mlir --- a/mlir/test/Transforms/canonicalize-block-merge.mlir +++ b/mlir/test/Transforms/canonicalize-block-merge.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck %s // Check the simple case of single operation blocks with a return. diff --git a/mlir/test/Transforms/canonicalize-dce.mlir b/mlir/test/Transforms/canonicalize-dce.mlir --- a/mlir/test/Transforms/canonicalize-dce.mlir +++ b/mlir/test/Transforms/canonicalize-dce.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -pass-pipeline='func.func(canonicalize)' | FileCheck %s // Test case: Simple case of deleting a dead pure op. @@ -82,7 +82,7 @@ // CHECK-NEXT: return func @f(%arg0: f32) { - builtin.func @g(%arg1: f32) { + func.func @g(%arg1: f32) { %0 = "arith.addf"(%arg1, %arg1) : (f32, f32) -> f32 return } diff --git a/mlir/test/Transforms/canonicalize-td.mlir b/mlir/test/Transforms/canonicalize-td.mlir --- a/mlir/test/Transforms/canonicalize-td.mlir +++ b/mlir/test/Transforms/canonicalize-td.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize{top-down=true})' | FileCheck %s --check-prefix=TD -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s --check-prefix=BU +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize{top-down=true})' | FileCheck %s --check-prefix=TD +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize)' | FileCheck %s --check-prefix=BU // BU-LABEL: func @default_insertion_position diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(canonicalize)' -split-input-file | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(canonicalize)' -split-input-file | FileCheck %s // CHECK-LABEL: func @test_subi_zero func @test_subi_zero(%arg0: i32) -> i32 { @@ -424,7 +424,7 @@ // CHECK-LABEL: func @dead_block_elim func @dead_block_elim() { // CHECK-NOT: ^bb - builtin.func @nested() { + func.func @nested() { return ^bb1: diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir --- a/mlir/test/Transforms/constant-fold.mlir +++ b/mlir/test/Transforms/constant-fold.mlir @@ -758,7 +758,7 @@ func @nested_isolated_region() { // CHECK-NEXT: func @isolated_op // CHECK-NEXT: arith.constant 2 - builtin.func @isolated_op() { + func.func @isolated_op() { %0 = arith.constant 1 : i32 %2 = arith.addi %0, %0 : i32 "foo.yield"(%2) : (i32) -> () diff --git a/mlir/test/Transforms/cse.mlir b/mlir/test/Transforms/cse.mlir --- a/mlir/test/Transforms/cse.mlir +++ b/mlir/test/Transforms/cse.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(cse)' | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(cse)' | FileCheck %s // CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 mod 2)> #map0 = affine_map<(d0) -> (d0 mod 2)> @@ -229,7 +229,7 @@ %0 = arith.constant 1 : i32 // CHECK-NEXT: @nested_func - builtin.func @nested_func() { + func.func @nested_func() { // CHECK-NEXT: arith.constant 1 %foo = arith.constant 1 : i32 "foo.yield"(%foo) : (i32) -> () diff --git a/mlir/test/Transforms/parallel-loop-collapsing.mlir b/mlir/test/Transforms/parallel-loop-collapsing.mlir --- a/mlir/test/Transforms/parallel-loop-collapsing.mlir +++ b/mlir/test/Transforms/parallel-loop-collapsing.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,3 collapsed-indices-1=1,4 collapsed-indices-2=2}, canonicalize)' | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,3 collapsed-indices-1=1,4 collapsed-indices-2=2}, canonicalize)' | FileCheck %s // CHECK-LABEL: func @parallel_many_dims() { func @parallel_many_dims() { diff --git a/mlir/test/Transforms/parametric-mapping.mlir b/mlir/test/Transforms/parametric-mapping.mlir --- a/mlir/test/Transforms/parametric-mapping.mlir +++ b/mlir/test/Transforms/parametric-mapping.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect -pass-pipeline="builtin.func(test-mapping-to-processing-elements)" %s | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect -pass-pipeline="func.func(test-mapping-to-processing-elements)" %s | FileCheck %s // CHECK: #[[mul_map:.+]] = affine_map<()[s0, s1] -> (s0 * s1)> // CHECK: #[[add_map:.+]] = affine_map<()[s0, s1] -> (s0 + s1)> diff --git a/mlir/test/Transforms/print-op-graph.mlir b/mlir/test/Transforms/print-op-graph.mlir --- a/mlir/test/Transforms/print-op-graph.mlir +++ b/mlir/test/Transforms/print-op-graph.mlir @@ -4,7 +4,7 @@ // DFG-LABEL: digraph G { // DFG: subgraph {{.*}} { // DFG: subgraph {{.*}} -// DFG: label = "builtin.func{{.*}}merge_blocks +// DFG: label = "func.func{{.*}}merge_blocks // DFG: subgraph {{.*}} { // DFG: v[[ARG0:.*]] [label = "arg0" // DFG: v[[CONST10:.*]] [label ={{.*}}10 : i32 @@ -26,7 +26,7 @@ // CFG-LABEL: digraph G { // CFG: subgraph {{.*}} { // CFG: subgraph {{.*}} -// CFG: label = "builtin.func{{.*}}merge_blocks +// CFG: label = "func.func{{.*}}merge_blocks // CFG: subgraph {{.*}} { // CFG: v[[C1:.*]] [label = "arith.constant // CFG: v[[C2:.*]] [label = "arith.constant diff --git a/mlir/test/Transforms/sccp-structured.mlir b/mlir/test/Transforms/sccp-structured.mlir --- a/mlir/test/Transforms/sccp-structured.mlir +++ b/mlir/test/Transforms/sccp-structured.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="builtin.func(sccp)" -split-input-file | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="func.func(sccp)" -split-input-file | FileCheck %s /// Check that a constant is properly propagated when only one edge is taken. diff --git a/mlir/test/Transforms/sccp.mlir b/mlir/test/Transforms/sccp.mlir --- a/mlir/test/Transforms/sccp.mlir +++ b/mlir/test/Transforms/sccp.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="builtin.func(sccp)" -split-input-file | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline="func.func(sccp)" -split-input-file | FileCheck %s /// Check simple forward constant propagation without any control flow. diff --git a/mlir/test/Transforms/single-parallel-loop-collapsing.mlir b/mlir/test/Transforms/single-parallel-loop-collapsing.mlir --- a/mlir/test/Transforms/single-parallel-loop-collapsing.mlir +++ b/mlir/test/Transforms/single-parallel-loop-collapsing.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,1}, canonicalize)' | FileCheck %s +// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='func.func(scf-parallel-loop-collapsing{collapsed-indices-0=0,1}, canonicalize)' | FileCheck %s func @collapse_to_single() { %c0 = arith.constant 3 : index diff --git a/mlir/test/Transforms/test-canonicalize-filter.mlir b/mlir/test/Transforms/test-canonicalize-filter.mlir --- a/mlir/test/Transforms/test-canonicalize-filter.mlir +++ b/mlir/test/Transforms/test-canonicalize-filter.mlir @@ -1,6 +1,6 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s --check-prefix=NO_FILTER -// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize{enable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_ENABLE -// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize{disable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_DISABLE +// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' | FileCheck %s --check-prefix=NO_FILTER +// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize{enable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_ENABLE +// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize{disable-patterns=TestRemoveOpWithInnerOps})' | FileCheck %s --check-prefix=FILTER_DISABLE // NO_FILTER-LABEL: func @remove_op_with_inner_ops_pattern // NO_FILTER-NEXT: return diff --git a/mlir/test/Transforms/test-canonicalize.mlir b/mlir/test/Transforms/test-canonicalize.mlir --- a/mlir/test/Transforms/test-canonicalize.mlir +++ b/mlir/test/Transforms/test-canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='builtin.func(canonicalize)' | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func.func(canonicalize)' | FileCheck %s // CHECK-LABEL: func @remove_op_with_inner_ops_pattern func @remove_op_with_inner_ops_pattern() { diff --git a/mlir/test/Transforms/test-legalizer-analysis.mlir b/mlir/test/Transforms/test-legalizer-analysis.mlir --- a/mlir/test/Transforms/test-legalizer-analysis.mlir +++ b/mlir/test/Transforms/test-legalizer-analysis.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt -allow-unregistered-dialect -test-legalize-patterns -verify-diagnostics -test-legalize-mode=analysis %s | FileCheck %s // expected-remark@-2 {{op 'builtin.module' is legalizable}} -// expected-remark@+1 {{op 'builtin.func' is legalizable}} +// expected-remark@+1 {{op 'func.func' is legalizable}} func @test(%arg0: f32) { // expected-remark@+1 {{op 'test.illegal_op_a' is legalizable}} %result = "test.illegal_op_a"() : () -> (i32) diff --git a/mlir/test/Transforms/test-legalizer-full.mlir b/mlir/test/Transforms/test-legalizer-full.mlir --- a/mlir/test/Transforms/test-legalizer-full.mlir +++ b/mlir/test/Transforms/test-legalizer-full.mlir @@ -37,7 +37,7 @@ } /// Operation that is dynamically legal, i.e. the function has a pattern /// applied to legalize the argument type before it becomes recursively legal. - builtin.func @dynamic_func(%arg: i64) attributes {test.recursively_legal} { + func.func @dynamic_func(%arg: i64) attributes {test.recursively_legal} { %ignored = "test.illegal_op_f"() : () -> (i32) "test.return"() : () -> () } diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp @@ -218,7 +218,7 @@ .enableTransferToSCFConversion()); // Created a nested OpPassManager and run. FuncOp funcOp = getOperation(); - OpPassManager dynamicPM("builtin.func"); + OpPassManager dynamicPM("func.func"); strategy.configurePassPipeline(dynamicPM, funcOp.getContext(), runEnablePass); if (failed(runPipeline(dynamicPM, funcOp))) return signalPassFailure(); diff --git a/mlir/test/lib/Dialect/Tosa/CMakeLists.txt b/mlir/test/lib/Dialect/Tosa/CMakeLists.txt --- a/mlir/test/lib/Dialect/Tosa/CMakeLists.txt +++ b/mlir/test/lib/Dialect/Tosa/CMakeLists.txt @@ -10,6 +10,7 @@ MLIRTosaPassIncGen LINK_LIBS PUBLIC + MLIRFunc MLIRPass MLIRTosa MLIRTransformUtils diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp --- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp +++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp @@ -239,7 +239,7 @@ .lower8x8xf32())); } - OpPassManager dynamicPM("builtin.func"); + OpPassManager dynamicPM("func.func"); dynamicPM.addPass(createLinalgStrategyLowerVectorsPass(options)); if (failed(runPipeline(dynamicPM, getOperation()))) return signalPassFailure(); diff --git a/mlir/test/mlir-cpu-runner/async-error.mlir b/mlir/test/mlir-cpu-runner/async-error.mlir --- a/mlir/test/mlir-cpu-runner/async-error.mlir +++ b/mlir/test/mlir-cpu-runner/async-error.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-vector-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/async-group.mlir b/mlir/test/mlir-cpu-runner/async-group.mlir --- a/mlir/test/mlir-cpu-runner/async-group.mlir +++ b/mlir/test/mlir-cpu-runner/async-group.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/async-value.mlir b/mlir/test/mlir-cpu-runner/async-value.mlir --- a/mlir/test/mlir-cpu-runner/async-value.mlir +++ b/mlir/test/mlir-cpu-runner/async-value.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/async.mlir b/mlir/test/mlir-cpu-runner/async.mlir --- a/mlir/test/mlir-cpu-runner/async.mlir +++ b/mlir/test/mlir-cpu-runner/async.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir --- a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir +++ b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm{use-bare-ptr-memref-call-conv=1}" -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm{use-bare-ptr-memref-call-conv=1}" -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s // Verify bare pointer memref calling convention. `simple_add1_add2_test` // gets two 2xf32 memrefs, adds 1.0f to the first one and 2.0f to the second diff --git a/mlir/test/mlir-cpu-runner/copy.mlir b/mlir/test/mlir-cpu-runner/copy.mlir --- a/mlir/test/mlir-cpu-runner/copy.mlir +++ b/mlir/test/mlir-cpu-runner/copy.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/global-memref.mlir b/mlir/test/mlir-cpu-runner/global-memref.mlir --- a/mlir/test/mlir-cpu-runner/global-memref.mlir +++ b/mlir/test/mlir-cpu-runner/global-memref.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } func private @print_memref_i32(memref<*xi32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir b/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir --- a/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir +++ b/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-math-polynomial-approximation,convert-arith-to-llvm),convert-vector-to-llvm,builtin.func(convert-math-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="func.func(test-math-polynomial-approximation,convert-arith-to-llvm),convert-vector-to-llvm,func.func(convert-math-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir --- a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf),convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-scf-to-cf),convert-memref-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/memref-reshape.mlir b/mlir/test/mlir-cpu-runner/memref-reshape.mlir --- a/mlir/test/mlir-cpu-runner/memref-reshape.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reshape.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,memref-expand,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-scf-to-cf,memref-expand,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir --- a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir +++ b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -pass-pipeline="builtin.func(convert-linalg-to-loops,lower-affine,convert-scf-to-cf,convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s +// RUN: mlir-opt -pass-pipeline="func.func(convert-linalg-to-loops,lower-affine,convert-scf-to-cf,convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s func @main() { %A = memref.alloc() : memref<16x16xf32> diff --git a/mlir/test/mlir-cpu-runner/unranked-memref.mlir b/mlir/test/mlir-cpu-runner/unranked-memref.mlir --- a/mlir/test/mlir-cpu-runner/unranked-memref.mlir +++ b/mlir/test/mlir-cpu-runner/unranked-memref.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/utils.mlir b/mlir/test/mlir-cpu-runner/utils.mlir --- a/mlir/test/mlir-cpu-runner/utils.mlir +++ b/mlir/test/mlir-cpu-runner/utils.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D -// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D +// RUN: mlir-opt %s -pass-pipeline="func.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D func @print_0d() { %f = arith.constant 2.00000e+00 : f32 diff --git a/mlir/test/mlir-lsp-server/diagnostics.test b/mlir/test/mlir-lsp-server/diagnostics.test --- a/mlir/test/mlir-lsp-server/diagnostics.test +++ b/mlir/test/mlir-lsp-server/diagnostics.test @@ -12,7 +12,7 @@ // CHECK-NEXT: "diagnostics": [ // CHECK-NEXT: { // CHECK-NEXT: "category": "Parse Error", -// CHECK-NEXT: "message": "custom op 'builtin.func' expected valid '@'-identifier for symbol name", +// CHECK-NEXT: "message": "custom op 'func.func' expected valid '@'-identifier for symbol name", // CHECK-NEXT: "range": { // CHECK-NEXT: "end": { // CHECK-NEXT: "character": 7, diff --git a/mlir/test/mlir-lsp-server/hover.test b/mlir/test/mlir-lsp-server/hover.test --- a/mlir/test/mlir-lsp-server/hover.test +++ b/mlir/test/mlir-lsp-server/hover.test @@ -66,7 +66,7 @@ // CHECK-NEXT: "result": { // CHECK-NEXT: "contents": { // CHECK-NEXT: "kind": "markdown", -// CHECK-NEXT: "value": "Operation: \"builtin.func\"\n\nBlock #1\n\nPredecessors: \n\n" +// CHECK-NEXT: "value": "Operation: \"func.func\"\n\nBlock #1\n\nPredecessors: \n\n" // CHECK-NEXT: }, // CHECK-NEXT: "range": { // CHECK-NEXT: "end": { @@ -90,7 +90,7 @@ // CHECK-NEXT: "result": { // CHECK-NEXT: "contents": { // CHECK-NEXT: "kind": "markdown", -// CHECK-NEXT: "value": "Operation: \"builtin.func\"\n\nBlock: \n\nArgument #0\n\nType: `i1`\n\n" +// CHECK-NEXT: "value": "Operation: \"func.func\"\n\nBlock: \n\nArgument #0\n\nType: `i1`\n\n" // CHECK-NEXT: }, // CHECK-NEXT: "range": { // CHECK-NEXT: "end": { @@ -114,7 +114,7 @@ // CHECK-NEXT: "result": { // CHECK-NEXT: "contents": { // CHECK-NEXT: "kind": "markdown", -// CHECK-NEXT: "value": "\"builtin.func\" : public @foo\n\nGeneric Form:\n\n```mlir\n\"builtin.func\"() ({\n}) {sym_name = \"foo\", type = (i1) -> ()} : () -> ()\n```\n" +// CHECK-NEXT: "value": "\"func.func\" : public @foo\n\nGeneric Form:\n\n```mlir\n\"func.func\"() ({\n}) {sym_name = \"foo\", type = (i1) -> ()} : () -> ()\n```\n" // CHECK-NEXT: }, // CHECK-NEXT: "range": { // CHECK-NEXT: "end": { @@ -138,7 +138,7 @@ // CHECK-NEXT: "result": { // CHECK-NEXT: "contents": { // CHECK-NEXT: "kind": "markdown", -// CHECK-NEXT: "value": "\"builtin.func\" : public @foo\n\nGeneric Form:\n\n```mlir\n\"builtin.func\"() ({\n}) {sym_name = \"foo\", type = (i1) -> ()} : () -> ()\n```\n" +// CHECK-NEXT: "value": "\"func.func\" : public @foo\n\nGeneric Form:\n\n```mlir\n\"func.func\"() ({\n}) {sym_name = \"foo\", type = (i1) -> ()} : () -> ()\n```\n" // CHECK-NEXT: }, // CHECK-NEXT: "range": { // CHECK-NEXT: "end": { diff --git a/mlir/test/mlir-opt/async.mlir b/mlir/test/mlir-opt/async.mlir --- a/mlir/test/mlir-opt/async.mlir +++ b/mlir/test/mlir-opt/async.mlir @@ -1,6 +1,6 @@ // Check if mlir marks the corresponding function with required coroutine attribute. // -// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,func.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,func.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | FileCheck %s // CHECK: llvm.func @async_execute_fn{{.*}}attributes{{.*}}"coroutine.presplit", "0" diff --git a/mlir/test/mlir-pdll/Parser/expr-failure.pdll b/mlir/test/mlir-pdll/Parser/expr-failure.pdll --- a/mlir/test/mlir-pdll/Parser/expr-failure.pdll +++ b/mlir/test/mlir-pdll/Parser/expr-failure.pdll @@ -206,7 +206,7 @@ Pattern { // CHECK: expected `)` after operation operand list let value: Value; - let foo = op(value<; + let foo = op(value<; } // ----- @@ -214,7 +214,7 @@ Pattern { // CHECK: unable to convert expression of type `Attr` to the expected type of `ValueRange` let attr: Attr; - let foo = op(attr); + let foo = op(attr); } // ----- diff --git a/mlir/test/mlir-pdll/Parser/stmt-failure.pdll b/mlir/test/mlir-pdll/Parser/stmt-failure.pdll --- a/mlir/test/mlir-pdll/Parser/stmt-failure.pdll +++ b/mlir/test/mlir-pdll/Parser/stmt-failure.pdll @@ -92,7 +92,7 @@ Pattern { // CHECK: expected `>` after operation name - let foo: Op f64 # CHECK: return %arg0 : f64 - @builtin.FuncOp.from_py_func(f64) + @func.FuncOp.from_py_func(f64) def unary_return(a): return a # CHECK-LABEL: func @binary_return(%arg0: f32, %arg1: f64) -> (f32, f64) # CHECK: return %arg0, %arg1 : f32, f64 - @builtin.FuncOp.from_py_func(f32, f64) + @func.FuncOp.from_py_func(f32, f64) def binary_return(a, b): return a, b # CHECK-LABEL: func @none_return(%arg0: f32, %arg1: f64) # CHECK: return - @builtin.FuncOp.from_py_func(f32, f64) + @func.FuncOp.from_py_func(f32, f64) def none_return(a, b): pass # CHECK-LABEL: func @call_unary # CHECK: %0 = call @unary_return(%arg0) : (f64) -> f64 # CHECK: return %0 : f64 - @builtin.FuncOp.from_py_func(f64) + @func.FuncOp.from_py_func(f64) def call_unary(a): return unary_return(a) # CHECK-LABEL: func @call_binary # CHECK: %0:2 = call @binary_return(%arg0, %arg1) : (f32, f64) -> (f32, f64) # CHECK: return %0#0, %0#1 : f32, f64 - @builtin.FuncOp.from_py_func(f32, f64) + @func.FuncOp.from_py_func(f32, f64) def call_binary(a, b): return binary_return(a, b) @@ -56,41 +56,41 @@ # CHECK-LABEL: func @single_result_op # CHECK: %0 = "custom.op1"() : () -> f32 # CHECK: return %0 : f32 - @builtin.FuncOp.from_py_func() + @func.FuncOp.from_py_func() def single_result_op(): return Operation.create("custom.op1", results=[f32]) # CHECK-LABEL: func @call_none # CHECK: call @none_return(%arg0, %arg1) : (f32, f64) -> () # CHECK: return - @builtin.FuncOp.from_py_func(f32, f64) + @func.FuncOp.from_py_func(f32, f64) def call_none(a, b): return none_return(a, b) ## Variants and optional feature tests. # CHECK-LABEL: func @from_name_arg - @builtin.FuncOp.from_py_func(f32, f64, name="from_name_arg") + @func.FuncOp.from_py_func(f32, f64, name="from_name_arg") def explicit_name(a, b): return b - @builtin.FuncOp.from_py_func(f32, f64) + @func.FuncOp.from_py_func(f32, f64) def positional_func_op(a, b, func_op): - assert isinstance(func_op, builtin.FuncOp) + assert isinstance(func_op, func.FuncOp) return b - @builtin.FuncOp.from_py_func(f32, f64) + @func.FuncOp.from_py_func(f32, f64) def kw_func_op(a, b=None, func_op=None): - assert isinstance(func_op, builtin.FuncOp) + assert isinstance(func_op, func.FuncOp) return b - @builtin.FuncOp.from_py_func(f32, f64) + @func.FuncOp.from_py_func(f32, f64) def kwargs_func_op(a, b=None, **kwargs): - assert isinstance(kwargs["func_op"], builtin.FuncOp) + assert isinstance(kwargs["func_op"], func.FuncOp) return b # CHECK-LABEL: func @explicit_results(%arg0: f32, %arg1: f64) -> f64 # CHECK: return %arg1 : f64 - @builtin.FuncOp.from_py_func(f32, f64, results=[f64]) + @func.FuncOp.from_py_func(f32, f64, results=[f64]) def explicit_results(a, b): func.ReturnOp([b]) @@ -107,7 +107,7 @@ with InsertionPoint(m.body): try: - @builtin.FuncOp.from_py_func(f64, results=[f64]) + @func.FuncOp.from_py_func(f64, results=[f64]) def unary_return(a): return a except AssertionError as e: @@ -125,7 +125,7 @@ f32 = F32Type.get() tensor_type = RankedTensorType.get((2, 3, 4), f32) with InsertionPoint.at_block_begin(m.body): - f = builtin.FuncOp(name="some_func", + f = func.FuncOp(name="some_func", type=FunctionType.get( inputs=[tensor_type, tensor_type], results=[tensor_type]), @@ -156,7 +156,7 @@ print(e) # Try the callback builder and passing type as tuple. - f = builtin.FuncOp(name="some_other_func", + f = func.FuncOp(name="some_other_func", type=([tensor_type, tensor_type], [tensor_type]), visibility="nested", body_builder=lambda f: func.ReturnOp( @@ -181,7 +181,7 @@ f32 = F32Type.get() f64 = F64Type.get() with InsertionPoint(module.body): - f = builtin.FuncOp("some_func", ([f32, f32], [f32, f32])) + f = func.FuncOp("some_func", ([f32, f32], [f32, f32])) with InsertionPoint(f.add_entry_block()): func.ReturnOp(f.arguments) f.arg_attrs = ArrayAttr.get([ @@ -196,7 +196,7 @@ DictAttr.get({"custom_dialect.res2": FloatAttr.get(f64, 256.0)}) ]) - other = builtin.FuncOp("other_func", ([f32, f32], [])) + other = func.FuncOp("other_func", ([f32, f32], [])) with InsertionPoint(other.add_entry_block()): func.ReturnOp([]) other.arg_attrs = [ diff --git a/mlir/test/python/dialects/func.py b/mlir/test/python/dialects/func.py --- a/mlir/test/python/dialects/func.py +++ b/mlir/test/python/dialects/func.py @@ -77,14 +77,14 @@ # CHECK-LABEL: TEST: testFunctionCalls @constructAndPrintInModule def testFunctionCalls(): - foo = builtin.FuncOp("foo", ([], [])) + foo = func.FuncOp("foo", ([], [])) foo.sym_visibility = StringAttr.get("private") - bar = builtin.FuncOp("bar", ([], [IndexType.get()])) + bar = func.FuncOp("bar", ([], [IndexType.get()])) bar.sym_visibility = StringAttr.get("private") - qux = builtin.FuncOp("qux", ([], [F32Type.get()])) + qux = func.FuncOp("qux", ([], [F32Type.get()])) qux.sym_visibility = StringAttr.get("private") - with InsertionPoint(builtin.FuncOp("caller", ([], [])).add_entry_block()): + with InsertionPoint(func.FuncOp("caller", ([], [])).add_entry_block()): func.CallOp(foo, []) func.CallOp([IndexType.get()], "bar", []) func.CallOp([F32Type.get()], FlatSymbolRefAttr.get("qux"), []) diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py b/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py --- a/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py +++ b/mlir/test/python/dialects/linalg/opdsl/emit_convolution.py @@ -46,7 +46,7 @@ # CHECK-NEXT: %[[SUM:.+]] = arith.addi %[[OUT]], %[[PROD]] : i32 # CHECK-NEXT: linalg.yield %[[SUM]] : i32 # CHECK-NEXT: -> tensor<1x2x4x1xi32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((1, 4, 16, 1), f32), RankedTensorType.get((2, 2, 1), f32), RankedTensorType.get((1, 2, 4, 1), i32)) diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_fill.py b/mlir/test/python/dialects/linalg/opdsl/emit_fill.py --- a/mlir/test/python/dialects/linalg/opdsl/emit_fill.py +++ b/mlir/test/python/dialects/linalg/opdsl/emit_fill.py @@ -35,7 +35,7 @@ # CHECK: linalg.generic # CHECK-SAME: indexing_maps = [#[[$MAP0]], #[[$MAP0]] # CHECK-SAME: iterator_types = [] - @builtin.FuncOp.from_py_func(f32, RankedTensorType.get([], f32)) + @func.FuncOp.from_py_func(f32, RankedTensorType.get([], f32)) def test_fill_0d(value, init_result): return fill_poly(value, outs=[init_result]) @@ -43,7 +43,7 @@ # CHECK: linalg.generic # CHECK-SAME: indexing_maps = [#[[$MAP1]], #[[$MAP2]]] # CHECK-SAME: iterator_types = ["parallel", "parallel"] - @builtin.FuncOp.from_py_func(f32, RankedTensorType.get([4, 16], f32)) + @func.FuncOp.from_py_func(f32, RankedTensorType.get([4, 16], f32)) def test_fill_2d(value, init_result): return fill_poly(value, outs=[init_result]) @@ -51,7 +51,7 @@ # CHECK: linalg.generic # CHECK-SAME: indexing_maps = [#[[$MAP3]], #[[$MAP4]]] # CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"] - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get([], f32), RankedTensorType.get([4, 8, 16], f32)) def test_fill_rank_zero_3d(input, init_result): return fill_rank_zero_poly(input, outs=[init_result]) diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_matmul.py b/mlir/test/python/dialects/linalg/opdsl/emit_matmul.py --- a/mlir/test/python/dialects/linalg/opdsl/emit_matmul.py +++ b/mlir/test/python/dialects/linalg/opdsl/emit_matmul.py @@ -56,7 +56,7 @@ # CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"] # CHECK-SAME: ins(%[[A]], %[[B]] # CHECK-SAME: outs(%[[INITC]] - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8), f32)) def test_matmul_mono(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) @@ -70,7 +70,7 @@ # CHECK-NEXT: %[[ADD:.+]] = arith.addi %[[C_ARG]], %[[MUL]] : i32 # CHECK-NEXT: linalg.yield %[[ADD]] : i32 # CHECK-NEXT: -> tensor<4x8xi32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8), RankedTensorType.get((4, 8), i32)) def test_i8i8i32_matmul(lhs, rhs, init_result): @@ -79,7 +79,7 @@ # CHECK-LABEL: @test_i8i8i32_matmul_unsigned # CHECK: = arith.extui # CHECK: = arith.extui - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8), RankedTensorType.get((4, 8), i32)) def test_i8i8i32_matmul_unsigned(lhs, rhs, init_result): @@ -94,7 +94,7 @@ # CHECK-NEXT: %[[ADD:.+]] = arith.addi %[[C_ARG]], %[[MUL]] : i32 # CHECK-NEXT: linalg.yield %[[ADD]] : i32 # CHECK-NEXT: -> tensor<4x8xi32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i16), RankedTensorType.get((4, 8), i32)) def test_i8i16i32_matmul(lhs, rhs, init_result): @@ -108,7 +108,7 @@ # CHECK-NEXT: %[[ADD:.+]] = arith.addi %[[C_ARG]], %[[MUL]] : i16 # CHECK-NEXT: linalg.yield %[[ADD]] : i16 # CHECK-NEXT: -> tensor<4x8xi16> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), i32), RankedTensorType.get((16, 8), i32), RankedTensorType.get((4, 8), i16)) def test_i32i32i16_matmul(lhs, rhs, init_result): @@ -122,7 +122,7 @@ # CHECK-NEXT: %[[ADD:.+]] = arith.addf %[[C_ARG]], %[[MUL]] : f32 # CHECK-NEXT: linalg.yield %[[ADD]] : f32 # CHECK-NEXT: -> tensor<4x8xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8), RankedTensorType.get((4, 8), f32)) def test_i8i8f32_matmul(lhs, rhs, init_result): @@ -131,7 +131,7 @@ # CHECK-LABEL: @test_i8i8f32_matmul_unsigned # CHECK: = arith.uitofp # CHECK: = arith.uitofp - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8), RankedTensorType.get((4, 8), f32)) def test_i8i8f32_matmul_unsigned(lhs, rhs, init_result): @@ -146,7 +146,7 @@ # CHECK-NEXT: %[[ADD:.+]] = arith.addf %[[C_ARG]], %[[MUL]] : f32 # CHECK-NEXT: linalg.yield %[[ADD]] : f32 # CHECK-NEXT: -> tensor<4x8xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f16), RankedTensorType.get((16, 8), f16), RankedTensorType.get((4, 8), f32)) def test_f16f16f32_matmul(lhs, rhs, init_result): @@ -160,7 +160,7 @@ # CHECK-NEXT: %[[ADD:.+]] = arith.addf %[[C_ARG]], %[[MUL]] : f32 # CHECK-NEXT: linalg.yield %[[ADD]] : f32 # CHECK-NEXT: -> tensor<4x8xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f64), RankedTensorType.get((16, 8), f64), RankedTensorType.get((4, 8), f32)) def test_f64f64f32_matmul(lhs, rhs, init_result): diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py --- a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py +++ b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py @@ -54,7 +54,7 @@ # CHECK-DAG: %[[CST1_CAST:.+]] = arith.truncf %[[CST1]] : f64 to f32 # CHECK-DAG: %[[SUM:.+]] = arith.addf %[[CST0_CAST]], %[[CST1_CAST]] : f32 # CHECK-NEXT: linalg.yield %[[SUM]] : f32 - @builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), f32)) + @func.FuncOp.from_py_func(RankedTensorType.get((4, 16), f32)) def test_f32_const(init_result): return test_const(outs=[init_result]) @@ -65,7 +65,7 @@ # CHECK-DAG: %[[IDX1_CAST:.+]] = arith.index_cast %[[IDX1]] : index to i32 # CHECK-DAG: %[[SUM:.+]] = arith.addi %[[IDX0_CAST]], %[[IDX1_CAST]] : i32 # CHECK-NEXT: linalg.yield %[[SUM]] : i32 - @builtin.FuncOp.from_py_func(RankedTensorType.get((4, 16), i32)) + @func.FuncOp.from_py_func(RankedTensorType.get((4, 16), i32)) def test_i32_index(init_result): return test_index(outs=[init_result]) @@ -74,7 +74,7 @@ # CHECK-NEXT: %[[EXP:.+]] = math.exp %[[IN]] : f32 # CHECK-NEXT: linalg.yield %[[EXP]] : f32 # CHECK-NEXT: -> tensor<4x16xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32)) def test_f32_elemwise_exp(input, init_result): return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.exp) @@ -84,7 +84,7 @@ # CHECK-NEXT: %[[LOG:.+]] = math.log %[[IN]] : f32 # CHECK-NEXT: linalg.yield %[[LOG]] : f32 # CHECK-NEXT: -> tensor<4x16xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32)) def test_f32_elemwise_log(input, init_result): return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.log) @@ -94,7 +94,7 @@ # CHECK-NEXT: %[[EXP:.+]] = math.abs %[[IN]] : f32 # CHECK-NEXT: linalg.yield %[[EXP]] : f32 # CHECK-NEXT: -> tensor<4x16xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32)) def test_f32_elemwise_abs(input, init_result): return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.abs) @@ -104,7 +104,7 @@ # CHECK-NEXT: %[[EXP:.+]] = math.ceil %[[IN]] : f32 # CHECK-NEXT: linalg.yield %[[EXP]] : f32 # CHECK-NEXT: -> tensor<4x16xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32)) def test_f32_elemwise_ceil(input, init_result): return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.ceil) @@ -114,7 +114,7 @@ # CHECK-NEXT: %[[EXP:.+]] = math.floor %[[IN]] : f32 # CHECK-NEXT: linalg.yield %[[EXP]] : f32 # CHECK-NEXT: -> tensor<4x16xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32)) def test_f32_elemwise_floor(input, init_result): return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.floor) @@ -124,14 +124,14 @@ # CHECK-NEXT: %[[EXP:.+]] = arith.negf %[[IN]] : f32 # CHECK-NEXT: linalg.yield %[[EXP]] : f32 # CHECK-NEXT: -> tensor<4x16xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32)) def test_f32_elemwise_neg(input, init_result): return elemwise_unary_poly(input, outs=[init_result], fun=UnaryFn.negf) # Just check that we don't assert out on name mismatch. # CHECK-LABEL: @test_non_default_op_name - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((42,), f32), RankedTensorType.get((42,), f32)) def test_non_default_op_name(input, init_result): return non_default_op_name(input, outs=[init_result]) diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py b/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py --- a/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py +++ b/mlir/test/python/dialects/linalg/opdsl/emit_pooling.py @@ -46,7 +46,7 @@ # CHECK-NEXT: %[[MAX:.+]] = arith.maxsi %[[OUT]], %[[IN_CAST:.+]] : i32 # CHECK-NEXT: linalg.yield %[[MAX]] : i32 # CHECK-NEXT: -> tensor<1x2x4x1xi32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((1, 4, 16, 1), f32), RankedTensorType.get((2, 2), f32), RankedTensorType.get((1, 2, 4, 1), i32)) @@ -57,7 +57,7 @@ # CHECK-LABEL: @test_f32i32_max_unsigned_pooling # CHECK: = arith.fptoui # CHECK: = arith.maxui - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((1, 4, 16, 1), f32), RankedTensorType.get((2, 2), f32), RankedTensorType.get((1, 2, 4, 1), i32)) @@ -79,7 +79,7 @@ # CHECK-NEXT: %[[MAX:.+]] = arith.maxf %[[OUT]], %[[IN:.+]] : f32 # CHECK-NEXT: linalg.yield %[[MAX]] : f32 # CHECK-NEXT: -> tensor<1x2x4x1xf32> - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((1, 4, 16, 1), f32), RankedTensorType.get((2, 2), f32), RankedTensorType.get((1, 2, 4, 1), f32)) @@ -90,7 +90,7 @@ # CHECK-LABEL: @test_f32i32_min_pooling # CHECK: = arith.fptosi # CHECK: = arith.minsi - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((1, 4, 16, 1), f32), RankedTensorType.get((2, 2), f32), RankedTensorType.get((1, 2, 4, 1), i32)) @@ -106,7 +106,7 @@ # CHECK-LABEL: @test_f32i32_min_unsigned_pooling # CHECK: = arith.fptoui # CHECK: = arith.minui - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((1, 4, 16, 1), f32), RankedTensorType.get((2, 2), f32), RankedTensorType.get((1, 2, 4, 1), i32)) @@ -122,7 +122,7 @@ # CHECK-LABEL: @test_f32f32_min_pooling # CHECK: = arith.minf - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((1, 4, 16, 1), f32), RankedTensorType.get((2, 2), f32), RankedTensorType.get((1, 2, 4, 1), f32)) diff --git a/mlir/test/python/dialects/linalg/ops.py b/mlir/test/python/dialects/linalg/ops.py --- a/mlir/test/python/dialects/linalg/ops.py +++ b/mlir/test/python/dialects/linalg/ops.py @@ -24,19 +24,19 @@ with InsertionPoint(module.body): # CHECK-LABEL: func @static_sizes # CHECK: %0 = linalg.init_tensor [3, 4] : tensor<3x4xf32> - @builtin.FuncOp.from_py_func() + @func.FuncOp.from_py_func() def static_sizes(): return linalg.InitTensorOp([3, 4], f32) # CHECK-LABEL: func @dynamic_sizes # CHECK: %0 = linalg.init_tensor [%arg0, %arg1] : tensor - @builtin.FuncOp.from_py_func(IndexType.get(), IndexType.get()) + @func.FuncOp.from_py_func(IndexType.get(), IndexType.get()) def dynamic_sizes(d0, d1): return linalg.InitTensorOp([d0, d1], f32) # CHECK-LABEL: func @zero_d # CHECK: %0 = linalg.init_tensor [] : tensor - @builtin.FuncOp.from_py_func() + @func.FuncOp.from_py_func() def zero_d(): return linalg.InitTensorOp([], f32) @@ -67,7 +67,7 @@ # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: %[[RES:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : tensor<12x?xf32>) -> tensor<12x?xf32> # CHECK-NEXT: return %[[RES]] : tensor<12x?xf32> - @builtin.FuncOp.from_py_func(RankedTensorType.get((12, -1), f32)) + @func.FuncOp.from_py_func(RankedTensorType.get((12, -1), f32)) def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result return linalg.fill(zero, outs=[out]) @@ -77,7 +77,7 @@ # CHECK-NEXT: %[[CST:.*]] = arith.constant 0.0{{.*}} : f32 # CHECK-NEXT: linalg.fill ins(%[[CST]] : f32) outs(%[[OUT]] : memref<12x?xf32>) # CHECK-NEXT: return - @builtin.FuncOp.from_py_func(MemRefType.get((12, -1), f32)) + @func.FuncOp.from_py_func(MemRefType.get((12, -1), f32)) def fill_buffer(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.fill(zero, outs=[out]) @@ -93,7 +93,7 @@ f32 = F32Type.get() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 8), f32), RankedTensorType.get((4, 8), f32)) def named_form(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) @@ -127,7 +127,7 @@ f32 = F32Type.get() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8), f32)) def named_form(lhs, rhs): @@ -153,7 +153,7 @@ f32 = F32Type.get() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8), f32)) def generic_form(lhs, rhs): @@ -173,7 +173,7 @@ f32 = F32Type.get() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8), f32)) def pass_an_op_directly(arg0, arg1): diff --git a/mlir/test/python/dialects/math_dialect.py b/mlir/test/python/dialects/math_dialect.py --- a/mlir/test/python/dialects/math_dialect.py +++ b/mlir/test/python/dialects/math_dialect.py @@ -4,7 +4,7 @@ # python package's math module (coming in from random.py). from mlir.ir import * -import mlir.dialects.builtin as builtin +import mlir.dialects.func as func import mlir.dialects.math as mlir_math def run(f): @@ -17,7 +17,7 @@ with Context() as ctx, Location.unknown(): module = Module.create() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(F32Type.get()) + @func.FuncOp.from_py_func(F32Type.get()) def emit_sqrt(arg): return mlir_math.SqrtOp(arg) diff --git a/mlir/test/python/dialects/scf.py b/mlir/test/python/dialects/scf.py --- a/mlir/test/python/dialects/scf.py +++ b/mlir/test/python/dialects/scf.py @@ -22,7 +22,7 @@ def testSimpleLoop(): index_type = IndexType.get() - @builtin.FuncOp.from_py_func(index_type, index_type, index_type) + @func.FuncOp.from_py_func(index_type, index_type, index_type) def simple_loop(lb, ub, step): loop = scf.ForOp(lb, ub, step, [lb, lb]) with InsertionPoint(loop.body): @@ -41,7 +41,7 @@ def testInductionVar(): index_type = IndexType.get() - @builtin.FuncOp.from_py_func(index_type, index_type, index_type) + @func.FuncOp.from_py_func(index_type, index_type, index_type) def induction_var(lb, ub, step): loop = scf.ForOp(lb, ub, step, [lb]) with InsertionPoint(loop.body): @@ -57,9 +57,9 @@ @constructAndPrintInModule def testOpsAsArguments(): index_type = IndexType.get() - callee = builtin.FuncOp( + callee = func.FuncOp( "callee", ([], [index_type, index_type]), visibility="private") - f = builtin.FuncOp("ops_as_arguments", ([], [])) + f = func.FuncOp("ops_as_arguments", ([], [])) with InsertionPoint(f.add_entry_block()): lb = arith.ConstantOp.create_index(0) ub = arith.ConstantOp.create_index(42) @@ -89,7 +89,7 @@ bool = IntegerType.get_signless(1) i32 = IntegerType.get_signless(32) - @builtin.FuncOp.from_py_func(bool) + @func.FuncOp.from_py_func(bool) def simple_if(cond): if_op = scf.IfOp(cond) with InsertionPoint(if_op.then_block): @@ -111,7 +111,7 @@ bool = IntegerType.get_signless(1) i32 = IntegerType.get_signless(32) - @builtin.FuncOp.from_py_func(bool) + @func.FuncOp.from_py_func(bool) def simple_if_else(cond): if_op = scf.IfOp(cond, [i32, i32], hasElse=True) with InsertionPoint(if_op.then_block): diff --git a/mlir/test/python/dialects/shape.py b/mlir/test/python/dialects/shape.py --- a/mlir/test/python/dialects/shape.py +++ b/mlir/test/python/dialects/shape.py @@ -2,7 +2,7 @@ from mlir.ir import * import numpy as np -import mlir.dialects.builtin as builtin +import mlir.dialects.func as func import mlir.dialects.shape as shape @@ -19,7 +19,7 @@ module = Module.create() f32 = F32Type.get() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( RankedTensorType.get((12, -1), f32)) def const_shape_tensor(arg): return shape.ConstShapeOp( diff --git a/mlir/test/python/dialects/tensor.py b/mlir/test/python/dialects/tensor.py --- a/mlir/test/python/dialects/tensor.py +++ b/mlir/test/python/dialects/tensor.py @@ -2,7 +2,7 @@ from mlir.ir import * import mlir.dialects.arith as arith -import mlir.dialects.builtin as builtin +import mlir.dialects.func as func import mlir.dialects.tensor as tensor @@ -21,7 +21,7 @@ indexType = IndexType.get() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(RankedTensorType.get((-1, -1), f32Type)) + @func.FuncOp.from_py_func(RankedTensorType.get((-1, -1), f32Type)) # CHECK: func @tensor_static_dim # CHECK-SAME: %[[ARG0:.+]]: tensor # CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index diff --git a/mlir/test/python/dialects/vector.py b/mlir/test/python/dialects/vector.py --- a/mlir/test/python/dialects/vector.py +++ b/mlir/test/python/dialects/vector.py @@ -17,7 +17,7 @@ module = Module.create() with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(VectorType.get((12, 5), F32Type.get())) + @func.FuncOp.from_py_func(VectorType.get((12, 5), F32Type.get())) def print_vector(arg): return vector.PrintOp(arg) @@ -40,7 +40,7 @@ mask_type = VectorType.get(vector_type.shape, IntegerType.get_signless(1)) identity_map = AffineMap.get_identity(vector_type.rank) identity_map_attr = AffineMapAttr.get(identity_map) - f = builtin.FuncOp("transfer_read", + f = func.FuncOp("transfer_read", ([memref_type, index_type, F32Type.get(), mask_type], [])) with InsertionPoint(f.add_entry_block()): diff --git a/mlir/test/python/integration/dialects/linalg/opsrun.py b/mlir/test/python/integration/dialects/linalg/opsrun.py --- a/mlir/test/python/integration/dialects/linalg/opsrun.py +++ b/mlir/test/python/integration/dialects/linalg/opsrun.py @@ -196,7 +196,7 @@ mod = Module.parse("\n".join([str(op) for op in ops]) + boilerplate) pm = PassManager.parse( - "builtin.func(convert-linalg-to-loops, lower-affine, " + + "func.func(convert-linalg-to-loops, lower-affine, " + "convert-math-to-llvm, convert-scf-to-cf, arith-expand, memref-expand), " + "convert-vector-to-llvm, convert-memref-to-llvm, convert-func-to-llvm," + "reconcile-unrealized-casts") @@ -211,14 +211,14 @@ i8 = IntegerType.get_signless(8) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((), f32), MemRefType.get((4, 8), f32), MemRefType.get((4, 8), f32)) def elemwise_exp_add_on_buffers(lhs, rhs, out): linalg.elemwise_unary(lhs, outs=[out]) linalg.elemwise_binary(out, rhs, outs=[out]) - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((), f32), MemRefType.get((4, 8), f32), MemRefType.get((4, 8), f32)) def elemwise_log_mul_on_buffers(lhs, rhs, out): @@ -250,14 +250,14 @@ i8 = IntegerType.get_signless(8) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((), f32), MemRefType.get((4, 8), f32), MemRefType.get((4, 8), f32)) def elemwise_exp_add_on_buffers(lhs, rhs, out): linalg.elemwise_unary(lhs, outs=[out], emit_generic=True) linalg.elemwise_binary(out, rhs, outs=[out], emit_generic=True) - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((), f32), MemRefType.get((4, 8), f32), MemRefType.get((4, 8), f32)) def elemwise_log_mul_on_buffers(lhs, rhs, out): @@ -291,13 +291,13 @@ i8 = IntegerType.get_signless(8) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((4, 16), i8), MemRefType.get((16, 8), f32), MemRefType.get((4, 8), f32)) def matmul_signed_on_buffers(lhs, rhs, out): linalg.matmul(lhs, rhs, outs=[out]) - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((4, 16), i8), MemRefType.get((16, 8), f32), MemRefType.get((4, 8), f32)) def matmul_unsigned_on_buffers(lhs, rhs, out): @@ -328,13 +328,13 @@ i8 = IntegerType.get_signless(8) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((4, 16), i8), MemRefType.get((16, 8), f32), MemRefType.get((4, 8), f32)) def matmul_signed_on_buffers(lhs, rhs, out): linalg.matmul(lhs, rhs, outs=[out], emit_generic=True) - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((4, 16), i8), MemRefType.get((16, 8), f32), MemRefType.get((4, 8), f32)) def matmul_unsigned_on_buffers(lhs, rhs, out): @@ -366,15 +366,15 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(f32, MemRefType.get([], i32)) + @func.FuncOp.from_py_func(f32, MemRefType.get([], i32)) def fill_0d_on_buffers(value, out): linalg.fill(value, outs=[out]) - @builtin.FuncOp.from_py_func(f32, MemRefType.get([16], i32)) + @func.FuncOp.from_py_func(f32, MemRefType.get([16], i32)) def fill_1d_on_buffers(value, out): linalg.fill(value, outs=[out]) - @builtin.FuncOp.from_py_func(f32, MemRefType.get([4, 16], i32)) + @func.FuncOp.from_py_func(f32, MemRefType.get([4, 16], i32)) def fill_2d_on_buffers(value, out): linalg.fill(value, outs=[out]) @@ -401,15 +401,15 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(f32, MemRefType.get([], i32)) + @func.FuncOp.from_py_func(f32, MemRefType.get([], i32)) def fill_0d_on_buffers(value, out): linalg.fill(value, outs=[out], emit_generic=True) - @builtin.FuncOp.from_py_func(f32, MemRefType.get([16], i32)) + @func.FuncOp.from_py_func(f32, MemRefType.get([16], i32)) def fill_1d_on_buffers(value, out): linalg.fill(value, outs=[out], emit_generic=True) - @builtin.FuncOp.from_py_func(f32, MemRefType.get([4, 16], i32)) + @func.FuncOp.from_py_func(f32, MemRefType.get([4, 16], i32)) def fill_2d_on_buffers(value, out): linalg.fill(value, outs=[out], emit_generic=True) @@ -436,7 +436,7 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32)) + @func.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32)) def fill_rng_on_buffers(min, max, seed, out): linalg.fill_rng_2d(min, max, seed, outs=[out]) @@ -463,7 +463,7 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32)) + @func.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32)) def fill_rng_on_buffers(min, max, seed, out): linalg.fill_rng_2d(min, max, seed, outs=[out], emit_generic=True) @@ -490,7 +490,7 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64), MemRefType.get((1, 2, 4, 1), i32)) def pooling_on_buffers(input, shape, output): @@ -521,7 +521,7 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64), MemRefType.get((1, 2, 4, 1), i32)) def pooling_on_buffers(input, shape, output): @@ -557,7 +557,7 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64), MemRefType.get((1, 2, 4, 1), i32)) # Set the strides and use the default dilations. @@ -587,7 +587,7 @@ i32 = IntegerType.get_signless(32) with InsertionPoint(module.body): - @builtin.FuncOp.from_py_func( + @func.FuncOp.from_py_func( MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64), MemRefType.get((1, 2, 4, 1), i32)) # Set the strides and use the default dilations. diff --git a/mlir/test/python/ir/blocks.py b/mlir/test/python/ir/blocks.py --- a/mlir/test/python/ir/blocks.py +++ b/mlir/test/python/ir/blocks.py @@ -32,7 +32,7 @@ f_type = FunctionType.get( [IntegerType.get_signless(32), IntegerType.get_signless(16)], []) - f_op = builtin.FuncOp("test", f_type) + f_op = func.FuncOp("test", f_type) entry_block = f_op.add_entry_block() i32_arg, i16_arg = entry_block.arguments successor_block = entry_block.create_after(i32_arg.type) @@ -62,7 +62,7 @@ module = Module.create() f32 = F32Type.get() with InsertionPoint(module.body): - f = builtin.FuncOp("test", ([f32], [])) + f = func.FuncOp("test", ([f32], [])) entry_block = Block.create_at_start(f.operation.regions[0], [f32]) with InsertionPoint(entry_block): func.ReturnOp([]) diff --git a/mlir/test/python/ir/operation.py b/mlir/test/python/ir/operation.py --- a/mlir/test/python/ir/operation.py +++ b/mlir/test/python/ir/operation.py @@ -113,9 +113,9 @@ # CHECK: REGION 0: # CHECK: BLOCK 0: # CHECK: OP 0: %0 = "custom.addi" - # CHECK: OP 0: parent builtin.func + # CHECK: OP 0: parent func.func # CHECK: OP 1: return - # CHECK: OP 1: parent builtin.func + # CHECK: OP 1: parent func.func walk_operations("", module.operation) @@ -127,7 +127,7 @@ module = Module.parse( r""" builtin.module { - builtin.func @f() { + func.func @f() { func.return } } diff --git a/mlir/test/python/pass_manager.py b/mlir/test/python/pass_manager.py --- a/mlir/test/python/pass_manager.py +++ b/mlir/test/python/pass_manager.py @@ -36,19 +36,19 @@ # A first import is expected to fail because the pass isn't registered # until we import mlir.transforms try: - pm = PassManager.parse("builtin.module(builtin.func(print-op-stats))") + pm = PassManager.parse("builtin.module(func.func(print-op-stats))") # TODO: this error should be propagate to Python but the C API does not help right now. # CHECK: error: 'print-op-stats' does not refer to a registered pass or pass pipeline except ValueError as e: - # CHECK: ValueError exception: invalid pass pipeline 'builtin.module(builtin.func(print-op-stats))'. + # CHECK: ValueError exception: invalid pass pipeline 'builtin.module(func.func(print-op-stats))'. log("ValueError exception:", e) else: log("Exception not produced") # This will register the pass and round-trip should be possible now. import mlir.transforms - pm = PassManager.parse("builtin.module(builtin.func(print-op-stats))") - # CHECK: Roundtrip: builtin.module(builtin.func(print-op-stats)) + pm = PassManager.parse("builtin.module(func.func(print-op-stats))") + # CHECK: Roundtrip: builtin.module(func.func(print-op-stats)) log("Roundtrip: ", pm) run(testParseSuccess) @@ -72,10 +72,10 @@ with Context(): try: import mlir.all_passes_registration - pm = PassManager.parse("builtin.func(normalize-memrefs)") + pm = PassManager.parse("func.func(normalize-memrefs)") except ValueError as e: - # CHECK: Can't add pass 'NormalizeMemRefs' restricted to 'builtin.module' on a PassManager intended to run on 'builtin.func', did you intend to nest? - # CHECK: ValueError exception: invalid pass pipeline 'builtin.func(normalize-memrefs)'. + # CHECK: Can't add pass 'NormalizeMemRefs' restricted to 'builtin.module' on a PassManager intended to run on 'func.func', did you intend to nest? + # CHECK: ValueError exception: invalid pass pipeline 'func.func(normalize-memrefs)'. log("ValueError exception:", e) else: log("Exception not produced") @@ -90,7 +90,7 @@ module = Module.parse(r"""func @successfulParse() { return }""") pm.run(module) # CHECK: Operations encountered: -# CHECK: builtin.func , 1 # CHECK: builtin.module , 1 +# CHECK: func.func , 1 # CHECK: func.return , 1 run(testRunPipeline)