diff --git a/mlir/docs/Tutorials/Toy/Ch-2.md b/mlir/docs/Tutorials/Toy/Ch-2.md --- a/mlir/docs/Tutorials/Toy/Ch-2.md +++ b/mlir/docs/Tutorials/Toy/Ch-2.md @@ -120,7 +120,8 @@ the above fundamental concepts. This allows MLIR to parse, represent, and [round-trip](../../../getting_started/Glossary.md/#round-trip) IR for *any* operation. For example, we could place our Toy operation from above into an -`.mlir` file and round-trip through *mlir-opt* without registering any dialect: +`.mlir` file and round-trip through *mlir-opt* without registering any `toy` +related dialect: ```mlir func @toy_func(%tensor: tensor<2x3xf64>) -> tensor<3x2xf64> { @@ -558,13 +559,14 @@ ```mlir module { - func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { + "toy.func"() ({ + ^bb0(%arg0: tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":4:1), %arg1: tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":4:1)): %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:10) %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25) %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25) "toy.return"(%2) : (tensor<*xf64>) -> () loc("test/Examples/Toy/Ch2/codegen.toy":5:3) - } loc("test/Examples/Toy/Ch2/codegen.toy":4:1) - func @main() { + }) {sym_name = "multiply_transpose", type = (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64>} : () -> () loc("test/Examples/Toy/Ch2/codegen.toy":4:1) + "toy.func"() ({ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:17) %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:3) %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/Examples/Toy/Ch2/codegen.toy":10:17) @@ -573,7 +575,7 @@ %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":12:11) "toy.print"(%5) : (tensor<*xf64>) -> () loc("test/Examples/Toy/Ch2/codegen.toy":13:3) "toy.return"() : () -> () loc("test/Examples/Toy/Ch2/codegen.toy":8:1) - } loc("test/Examples/Toy/Ch2/codegen.toy":8:1) + }) {sym_name = "main", type = () -> ()} : () -> () loc("test/Examples/Toy/Ch2/codegen.toy":8:1) } loc(unknown) ``` @@ -686,13 +688,13 @@ ```mlir module { - func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { + toy.func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:10) %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25) %2 = toy.mul %0, %1 : tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:25) toy.return %2 : tensor<*xf64> loc("test/Examples/Toy/Ch2/codegen.toy":5:3) } loc("test/Examples/Toy/Ch2/codegen.toy":4:1) - func @main() { + toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:17) %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> loc("test/Examples/Toy/Ch2/codegen.toy":9:3) %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> loc("test/Examples/Toy/Ch2/codegen.toy":10:17) diff --git a/mlir/docs/Tutorials/Toy/Ch-3.md b/mlir/docs/Tutorials/Toy/Ch-3.md --- a/mlir/docs/Tutorials/Toy/Ch-3.md +++ b/mlir/docs/Tutorials/Toy/Ch-3.md @@ -37,7 +37,7 @@ Which corresponds to the following IR: ```mlir -func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> { +toy.func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> %1 = toy.transpose(%0 : tensor<*xf64>) to tensor<*xf64> toy.return %1 : tensor<*xf64> @@ -125,14 +125,14 @@ ```c++ mlir::PassManager pm(module.getContext()); - pm.addNestedPass(mlir::createCanonicalizerPass()); + pm.addNestedPass(mlir::createCanonicalizerPass()); ``` Finally, we can run `toyc-ch3 test/Examples/Toy/Ch3/transpose_transpose.toy -emit=mlir -opt` and observe our pattern in action: ```mlir -func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> { +toy.func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> toy.return %arg0 : tensor<*xf64> } @@ -153,7 +153,7 @@ Let's retry now `toyc-ch3 test/transpose_transpose.toy -emit=mlir -opt`: ```mlir -func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> { +toy.func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> { toy.return %arg0 : tensor<*xf64> } ``` @@ -228,7 +228,7 @@ ```mlir module { - func @main() { + toy.func @main() { %0 = toy.constant dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf64> %1 = toy.reshape(%0 : tensor<2xf64>) to tensor<2x1xf64> %2 = toy.reshape(%1 : tensor<2x1xf64>) to tensor<2x1xf64> @@ -244,7 +244,7 @@ ```mlir module { - func @main() { + toy.func @main() { %0 = toy.constant dense<[[1.000000e+00], [2.000000e+00]]> : tensor<2x1xf64> toy.print %0 : tensor<2x1xf64> toy.return diff --git a/mlir/docs/Tutorials/Toy/Ch-4.md b/mlir/docs/Tutorials/Toy/Ch-4.md --- a/mlir/docs/Tutorials/Toy/Ch-4.md +++ b/mlir/docs/Tutorials/Toy/Ch-4.md @@ -77,6 +77,14 @@ return true; } + /// This hook cheks if the given 'src' region can be inlined into the 'dest' + /// region. The regions here are the bodies of the callable functions. For + /// Toy, any function can be inlined, so we simply return true. + bool isLegalToInline(Region *dest, Region *src, bool wouldBeCloned, + BlockAndValueMapping &valueMapping) const final { + return true; + } + /// This hook is called when a terminator operation has been inlined. The only /// terminator that we have in the Toy dialect is the return /// operation(toy.return). We handle the return by replacing the values @@ -101,7 +109,7 @@ ```c++ /// Emit a new function and add it to the MLIR module. -mlir::FuncOp mlirGen(FunctionAST &funcAST) { +mlir::toy::FuncOp mlirGen(FunctionAST &funcAST) { ... // If this function isn't main, then set the visibility to private. if (funcAST.getProto()->getName() != "main") @@ -121,12 +129,12 @@ ``` Next, we need to provide a way for the inliner to know that `toy.generic_call` -represents a call to a function. MLIR provides an -[operation interface](../../Interfaces.md/#attributeoperationtype-interfaces) that can be used -to mark an operation as being "call-like". Unlike dialect interfaces, operation -interfaces provide a more refined granularity of information that is specific -and core to a single operation. The interface that we will be adding here is the -`CallOpInterface`. +represents a call, and `toy.func` represents a function. MLIR provides +[operation interfaces](../../Interfaces.md/#attributeoperationtype-interfaces) that can be used +to mark an operation as being "call-like" or "callable-like". Unlike dialect interfaces, +operation interfaces provide a more refined granularity of information that is specific +and core to a single operation. The interfaces that we will be adding here is the +`CallOpInterface` and `CallableOpInterface`. To add this interface we just need to include the definition into our operation specification file (`Ops.td`): @@ -138,6 +146,11 @@ and add it to the traits list of `GenericCallOp`: ```tablegen +def FuncOp : Toy_Op<"func", + [DeclareOpInterfaceMethods]> { + ... +} + def GenericCallOp : Toy_Op<"generic_call", [DeclareOpInterfaceMethods]> { ... @@ -149,6 +162,15 @@ GenericCallOp. This means that we just need to provide a definition: ```c++ +/// Returns the region on the function operation that is callable. +Region *FuncOp::getCallableRegion() { return &getBody(); } + +/// Returns the results types that the callable region produces when +/// executed. +ArrayRef FuncOp::getCallableResults() { return getType().getResults(); } + +// .... + /// Return the callee of the generic call operation, this is required by the /// call interface. CallInterfaceCallable GenericCallOp::getCallableForCallee() { @@ -170,13 +192,13 @@ Now let's look at a working example: ```mlir -func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { +toy.func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> %2 = toy.mul %0, %1 : tensor<*xf64> toy.return %2 : tensor<*xf64> } -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> @@ -214,6 +236,7 @@ let arguments = (ins F64Tensor:$input); let results = (outs F64Tensor:$output); + let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)"; } ``` @@ -263,14 +286,14 @@ If we run the working example through the pipeline again, we get the expected: ```mlir -func @main() { - %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> - %1 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> - %2 = "toy.cast"(%1) : (tensor<2x3xf64>) -> tensor<*xf64> - %3 = "toy.cast"(%0) : (tensor<2x3xf64>) -> tensor<*xf64> - %4 = "toy.transpose"(%2) : (tensor<*xf64>) -> tensor<*xf64> - %5 = "toy.transpose"(%3) : (tensor<*xf64>) -> tensor<*xf64> - %6 = "toy.mul"(%4, %5) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> +toy.func @main() { + %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> + %1 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> + %2 = toy.cast %1 : tensor<2x3xf64> to tensor<*xf64> + %3 = toy.cast %0 : tensor<2x3xf64> to tensor<*xf64> + %4 = toy.transpose(%2 : tensor<*xf64>) to tensor<*xf64> + %5 = toy.transpose(%3 : tensor<*xf64>) to tensor<*xf64> + %6 = toy.mul %4, %5 : tensor<*xf64> toy.print %6 : tensor<*xf64> toy.return } @@ -357,10 +380,10 @@ At this point, each of the necessary Toy operations provide a mechanism by which to infer their output shapes. The ShapeInferencePass will operate on functions: -it will run on each Function in isolation. MLIR also supports general +it will run on each function in isolation. MLIR also supports general [OperationPasses](../../PassManagement.md#operation-pass) that run on any -isolated operation (i.e. other function-like operations), but here our module -only contains functions, so there is no need to generalize to all operations. +isolated operation, but here our module only contains functions, so there is no +need to generalize to all operations. Implementing such a pass is done by creating a class inheriting from `mlir::OperationPass` and overriding the `runOnOperation()` method. @@ -421,10 +444,10 @@ If we rerun our original example, we now get the following: ```mlir -func @main() { - %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> - %1 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> - %2 = "toy.mul"(%1, %1) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> +toy.func @main() { + %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> + %1 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> + %2 = toy.mul %1, %1 : tensor<3x2xf64> toy.print %2 : tensor<3x2xf64> toy.return } diff --git a/mlir/docs/Tutorials/Toy/Ch-5.md b/mlir/docs/Tutorials/Toy/Ch-5.md --- a/mlir/docs/Tutorials/Toy/Ch-5.md +++ b/mlir/docs/Tutorials/Toy/Ch-5.md @@ -172,8 +172,7 @@ // With the target and rewrite patterns defined, we can now attempt the // conversion. The conversion will signal failure if any of our *illegal* // operations were not converted successfully. - mlir::FuncOp function = getOperation(); - if (mlir::failed(mlir::applyPartialConversion(function, target, patterns))) + if (mlir::failed(mlir::applyPartialConversion(getOperation(), target, patterns))) signalPassFailure(); } ``` @@ -232,7 +231,7 @@ Let's take a concrete example: ```mlir -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %2 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %3 = toy.mul %2, %2 : tensor<3x2xf64> diff --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md --- a/mlir/docs/Tutorials/Toy/Ch-6.md +++ b/mlir/docs/Tutorials/Toy/Ch-6.md @@ -119,7 +119,7 @@ Looking back at our current working example: ```mlir -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %2 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %3 = toy.mul %2, %2 : tensor<3x2xf64> diff --git a/mlir/docs/Tutorials/Toy/Ch-7.md b/mlir/docs/Tutorials/Toy/Ch-7.md --- a/mlir/docs/Tutorials/Toy/Ch-7.md +++ b/mlir/docs/Tutorials/Toy/Ch-7.md @@ -327,7 +327,7 @@ ```mlir module { - func @multiply_transpose(%arg0: !toy.struct, tensor<*xf64>>) { + toy.func @multiply_transpose(%arg0: !toy.struct, tensor<*xf64>>) { toy.return } } @@ -405,7 +405,7 @@ ```mlir module { - func @multiply_transpose(%arg0: !toy.struct, tensor<*xf64>>) -> tensor<*xf64> { + toy.func @multiply_transpose(%arg0: !toy.struct, tensor<*xf64>>) -> tensor<*xf64> { %0 = toy.struct_access %arg0[0] : !toy.struct, tensor<*xf64>> -> tensor<*xf64> %1 = toy.transpose(%0 : tensor<*xf64>) to tensor<*xf64> %2 = toy.struct_access %arg0[1] : !toy.struct, tensor<*xf64>> -> tensor<*xf64> @@ -413,7 +413,7 @@ %4 = toy.mul %1, %3 : tensor<*xf64> toy.return %4 : tensor<*xf64> } - func @main() { + toy.func @main() { %0 = toy.struct_constant [ dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>, dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> @@ -434,7 +434,7 @@ ```mlir module { - func @main() { + toy.func @main() { %0 = toy.struct_constant [ dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>, dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> @@ -500,7 +500,7 @@ ```mlir module { - func @main() { + toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %1 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %2 = toy.mul %1, %1 : tensor<3x2xf64> diff --git a/mlir/examples/toy/Ch2/include/toy/Dialect.h b/mlir/examples/toy/Ch2/include/toy/Dialect.h --- a/mlir/examples/toy/Ch2/include/toy/Dialect.h +++ b/mlir/examples/toy/Ch2/include/toy/Dialect.h @@ -14,8 +14,10 @@ #ifndef MLIR_TUTORIAL_TOY_DIALECT_H_ #define MLIR_TUTORIAL_TOY_DIALECT_H_ -#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Dialect.h" +#include "mlir/IR/FunctionInterfaces.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" /// Include the auto-generated header file containing the declaration of the toy diff --git a/mlir/examples/toy/Ch2/include/toy/Ops.td b/mlir/examples/toy/Ch2/include/toy/Ops.td --- a/mlir/examples/toy/Ch2/include/toy/Ops.td +++ b/mlir/examples/toy/Ch2/include/toy/Ops.td @@ -14,6 +14,8 @@ #define TOY_OPS include "mlir/IR/OpBase.td" +include "mlir/IR/FunctionInterfaces.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" // Provide a definition of the 'toy' dialect in the ODS framework so that we @@ -106,6 +108,72 @@ ]; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : Toy_Op<"func", [ + FunctionOpInterface, IsolatedFromAbove, Symbol + ]> { + let summary = "user defined function operation"; + let description = [{ + The "toy.func" operation represents a user defined function. These are + callable SSA-region operations that contain toy computations. + + Example: + + ```mlir + toy.func @main() { + %0 = toy.constant dense<5.500000e+00> : tensor + %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> + toy.print %1 : tensor<2x2xf64> + toy.return + } + ``` + }]; + + let arguments = (ins + SymbolNameAttr:$sym_name, + TypeAttrOf:$type + ); + let regions = (region AnyRegion:$body); + + let builders = [OpBuilder<(ins + "StringRef":$name, "FunctionType":$type, + CArg<"ArrayRef", "{}">:$attrs) + >]; + let extraClassDeclaration = [{ + /// Returns the type of this function. + /// FIXME: We should drive this via the ODS `type` param. + FunctionType getType() { + return getTypeAttr().getValue().cast(); + } + + //===------------------------------------------------------------------===// + // FunctionOpInterface Methods + //===------------------------------------------------------------------===// + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return type().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return type().getResults(); } + + /// Verify the type attribute of this function. Returns failure and emits + /// an error if the attribute is invalid. + LogicalResult verifyType() { + auto type = getTypeAttr().getValue(); + if (!type.isa()) + return emitOpError("requires '" + FunctionOpInterface::getTypeAttrName() + + "' attribute of function type"); + return success(); + } + }]; + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch2/mlir/Dialect.cpp b/mlir/examples/toy/Ch2/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch2/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch2/mlir/Dialect.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/FunctionImplementation.h" #include "mlir/IR/OpImplementation.h" using namespace mlir; @@ -187,6 +188,57 @@ mlir::SymbolRefAttr::get(builder.getContext(), callee)); } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +void FuncOp::build(mlir::OpBuilder &builder, mlir::OperationState &state, + llvm::StringRef name, mlir::FunctionType type, + llvm::ArrayRef attrs) { + // FunctionOpInterface provides a convenient `build` method that will populate + // the state of our FuncOp, and create an entry block. + buildWithEntryBlock(builder, state, name, type, attrs, type.getInputs()); +} + +mlir::ParseResult FuncOp::parse(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + // Dispatch to the FunctionOpInterface provided utility method that parses the + // function operation. + auto buildFuncType = + [](mlir::Builder &builder, llvm::ArrayRef argTypes, + llvm::ArrayRef results, + mlir::function_interface_impl::VariadicFlag, + std::string &) { return builder.getFunctionType(argTypes, results); }; + + return mlir::function_interface_impl::parseFunctionOp( + parser, result, /*allowVariadic=*/false, buildFuncType); +} + +void FuncOp::print(mlir::OpAsmPrinter &p) { + // Dispatch to the FunctionOpInterface provided utility method that prints the + // function operation. + mlir::function_interface_impl::printFunctionOp(p, *this, + /*isVariadic=*/false); +} + +mlir::LogicalResult FuncOp::verify() { + // Verify that the argument list of the function and the arg list of the entry + // block line up. The trait already verified that the number of arguments is + // the same between the signature and the block. + llvm::ArrayRef fnInputTypes = getType().getInputs(); + mlir::Block &entryBlock = front(); + for (unsigned i = 0, e = entryBlock.getNumArguments(); i != e; ++i) { + if (fnInputTypes[i] != entryBlock.getArgument(i).getType()) { + return emitOpError("type of entry block argument #") + << i << '(' << entryBlock.getArgument(i).getType() + << ") must match the type of the corresponding argument in " + << "function signature(" << fnInputTypes[i] << ')'; + } + } + + return success(); +} + //===----------------------------------------------------------------------===// // MulOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch2/mlir/MLIRGen.cpp @@ -58,12 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &f : moduleAST) { - auto func = mlirGen(f); - if (!func) - return nullptr; - theModule.push_back(func); - } + for (FunctionAST &f : moduleAST) + mlirGen(f); // Verify the module after we have finished constructing it, this will check // the structural properties of the IR and invoke any specific verifiers we @@ -108,7 +104,7 @@ /// Create the prototype for an MLIR function with as many arguments as the /// provided Toy AST prototype. - mlir::FuncOp mlirGen(PrototypeAST &proto) { + mlir::toy::FuncOp mlirGen(PrototypeAST &proto) { auto location = loc(proto.loc()); // This is a generic function, the return type will be inferred later. @@ -116,23 +112,23 @@ llvm::SmallVector argTypes(proto.getArgs().size(), getType(VarType{})); auto funcType = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), funcType); + return builder.create(location, proto.getName(), + funcType); } /// Emit a new function and add it to the MLIR module. - mlir::FuncOp mlirGen(FunctionAST &funcAST) { + mlir::toy::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. - mlir::FuncOp function(mlirGen(*funcAST.getProto())); + builder.setInsertionPointToEnd(theModule.getBody()); + mlir::toy::FuncOp function = mlirGen(*funcAST.getProto()); if (!function) return nullptr; // Let's start the body of the function now! - // In MLIR the entry block of the function is special: it must have the same - // argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); + mlir::Block &entryBlock = function.front(); auto protoArgs = funcAST.getProto()->getArgs(); // Declare all the function arguments in the symbol table. diff --git a/mlir/examples/toy/Ch3/include/toy/Dialect.h b/mlir/examples/toy/Ch3/include/toy/Dialect.h --- a/mlir/examples/toy/Ch3/include/toy/Dialect.h +++ b/mlir/examples/toy/Ch3/include/toy/Dialect.h @@ -14,8 +14,10 @@ #ifndef MLIR_TUTORIAL_TOY_DIALECT_H_ #define MLIR_TUTORIAL_TOY_DIALECT_H_ -#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Dialect.h" +#include "mlir/IR/FunctionInterfaces.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" /// Include the auto-generated header file containing the declaration of the toy diff --git a/mlir/examples/toy/Ch3/include/toy/Ops.td b/mlir/examples/toy/Ch3/include/toy/Ops.td --- a/mlir/examples/toy/Ch3/include/toy/Ops.td +++ b/mlir/examples/toy/Ch3/include/toy/Ops.td @@ -13,6 +13,8 @@ #ifndef TOY_OPS #define TOY_OPS +include "mlir/IR/FunctionInterfaces.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" // Provide a definition of the 'toy' dialect in the ODS framework so that we @@ -105,6 +107,72 @@ ]; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : Toy_Op<"func", [ + FunctionOpInterface, IsolatedFromAbove, Symbol + ]> { + let summary = "user defined function operation"; + let description = [{ + The "toy.func" operation represents a user defined function. These are + callable SSA-region operations that contain toy computations. + + Example: + + ```mlir + toy.func @main() { + %0 = toy.constant dense<5.500000e+00> : tensor + %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> + toy.print %1 : tensor<2x2xf64> + toy.return + } + ``` + }]; + + let arguments = (ins + SymbolNameAttr:$sym_name, + TypeAttrOf:$type + ); + let regions = (region AnyRegion:$body); + + let builders = [OpBuilder<(ins + "StringRef":$name, "FunctionType":$type, + CArg<"ArrayRef", "{}">:$attrs) + >]; + let extraClassDeclaration = [{ + /// Returns the type of this function. + /// FIXME: We should drive this via the ODS `type` param. + FunctionType getType() { + return getTypeAttr().getValue().cast(); + } + + //===------------------------------------------------------------------===// + // FunctionOpInterface Methods + //===------------------------------------------------------------------===// + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return type().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return type().getResults(); } + + /// Verify the type attribute of this function. Returns failure and emits + /// an error if the attribute is invalid. + LogicalResult verifyType() { + auto type = getTypeAttr().getValue(); + if (!type.isa()) + return emitOpError("requires '" + FunctionOpInterface::getTypeAttrName() + + "' attribute of function type"); + return success(); + } + }]; + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch3/mlir/Dialect.cpp b/mlir/examples/toy/Ch3/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch3/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch3/mlir/Dialect.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/FunctionImplementation.h" #include "mlir/IR/OpImplementation.h" using namespace mlir; @@ -174,6 +175,57 @@ void AddOp::print(mlir::OpAsmPrinter &p) { printBinaryOp(p, *this); } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +void FuncOp::build(mlir::OpBuilder &builder, mlir::OperationState &state, + llvm::StringRef name, mlir::FunctionType type, + llvm::ArrayRef attrs) { + // FunctionOpInterface provides a convenient `build` method that will populate + // the state of our FuncOp, and create an entry block. + buildWithEntryBlock(builder, state, name, type, attrs, type.getInputs()); +} + +mlir::ParseResult FuncOp::parse(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + // Dispatch to the FunctionOpInterface provided utility method that parses the + // function operation. + auto buildFuncType = + [](mlir::Builder &builder, llvm::ArrayRef argTypes, + llvm::ArrayRef results, + mlir::function_interface_impl::VariadicFlag, + std::string &) { return builder.getFunctionType(argTypes, results); }; + + return mlir::function_interface_impl::parseFunctionOp( + parser, result, /*allowVariadic=*/false, buildFuncType); +} + +void FuncOp::print(mlir::OpAsmPrinter &p) { + // Dispatch to the FunctionOpInterface provided utility method that prints the + // function operation. + mlir::function_interface_impl::printFunctionOp(p, *this, + /*isVariadic=*/false); +} + +mlir::LogicalResult FuncOp::verify() { + // Verify that the argument list of the function and the arg list of the entry + // block line up. The trait already verified that the number of arguments is + // the same between the signature and the block. + llvm::ArrayRef fnInputTypes = getType().getInputs(); + mlir::Block &entryBlock = front(); + for (unsigned i = 0, e = entryBlock.getNumArguments(); i != e; ++i) { + if (fnInputTypes[i] != entryBlock.getArgument(i).getType()) { + return emitOpError("type of entry block argument #") + << i << '(' << entryBlock.getArgument(i).getType() + << ") must match the type of the corresponding argument in " + << "function signature(" << fnInputTypes[i] << ')'; + } + } + + return success(); +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch3/mlir/MLIRGen.cpp @@ -58,12 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &f : moduleAST) { - auto func = mlirGen(f); - if (!func) - return nullptr; - theModule.push_back(func); - } + for (FunctionAST &f : moduleAST) + mlirGen(f); // Verify the module after we have finished constructing it, this will check // the structural properties of the IR and invoke any specific verifiers we @@ -108,7 +104,7 @@ /// Create the prototype for an MLIR function with as many arguments as the /// provided Toy AST prototype. - mlir::FuncOp mlirGen(PrototypeAST &proto) { + mlir::toy::FuncOp mlirGen(PrototypeAST &proto) { auto location = loc(proto.loc()); // This is a generic function, the return type will be inferred later. @@ -116,23 +112,23 @@ llvm::SmallVector argTypes(proto.getArgs().size(), getType(VarType{})); auto funcType = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), funcType); + return builder.create(location, proto.getName(), + funcType); } /// Emit a new function and add it to the MLIR module. - mlir::FuncOp mlirGen(FunctionAST &funcAST) { + mlir::toy::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. - mlir::FuncOp function(mlirGen(*funcAST.getProto())); + builder.setInsertionPointToEnd(theModule.getBody()); + mlir::toy::FuncOp function = mlirGen(*funcAST.getProto()); if (!function) return nullptr; // Let's start the body of the function now! - // In MLIR the entry block of the function is special: it must have the same - // argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); + mlir::Block &entryBlock = function.front(); auto protoArgs = funcAST.getProto()->getArgs(); // Declare all the function arguments in the symbol table. diff --git a/mlir/examples/toy/Ch3/toyc.cpp b/mlir/examples/toy/Ch3/toyc.cpp --- a/mlir/examples/toy/Ch3/toyc.cpp +++ b/mlir/examples/toy/Ch3/toyc.cpp @@ -118,7 +118,7 @@ applyPassManagerCLOptions(pm); // Add a run of the canonicalizer to optimize the mlir module. - pm.addNestedPass(mlir::createCanonicalizerPass()); + pm.addNestedPass(mlir::createCanonicalizerPass()); if (mlir::failed(pm.run(*module))) return 4; } diff --git a/mlir/examples/toy/Ch4/include/toy/Dialect.h b/mlir/examples/toy/Ch4/include/toy/Dialect.h --- a/mlir/examples/toy/Ch4/include/toy/Dialect.h +++ b/mlir/examples/toy/Ch4/include/toy/Dialect.h @@ -14,9 +14,11 @@ #ifndef MLIR_TUTORIAL_TOY_DIALECT_H_ #define MLIR_TUTORIAL_TOY_DIALECT_H_ -#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" +#include "mlir/IR/FunctionInterfaces.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/CastInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "toy/ShapeInferenceInterface.h" diff --git a/mlir/examples/toy/Ch4/include/toy/Ops.td b/mlir/examples/toy/Ch4/include/toy/Ops.td --- a/mlir/examples/toy/Ch4/include/toy/Ops.td +++ b/mlir/examples/toy/Ch4/include/toy/Ops.td @@ -13,6 +13,8 @@ #ifndef TOY_OPS #define TOY_OPS +include "mlir/IR/FunctionInterfaces.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/CastInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" @@ -134,6 +136,73 @@ let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)"; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : Toy_Op<"func", [ + DeclareOpInterfaceMethods, FunctionOpInterface, + IsolatedFromAbove, Symbol + ]> { + let summary = "user defined function operation"; + let description = [{ + The "toy.func" operation represents a user defined function. These are + callable SSA-region operations that contain toy computations. + + Example: + + ```mlir + toy.func @main() { + %0 = toy.constant dense<5.500000e+00> : tensor + %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> + toy.print %1 : tensor<2x2xf64> + toy.return + } + ``` + }]; + + let arguments = (ins + SymbolNameAttr:$sym_name, + TypeAttrOf:$type + ); + let regions = (region AnyRegion:$body); + + let builders = [OpBuilder<(ins + "StringRef":$name, "FunctionType":$type, + CArg<"ArrayRef", "{}">:$attrs) + >]; + let extraClassDeclaration = [{ + /// Returns the type of this function. + /// FIXME: We should drive this via the ODS `type` param. + FunctionType getType() { + return getTypeAttr().getValue().cast(); + } + + //===------------------------------------------------------------------===// + // FunctionOpInterface Methods + //===------------------------------------------------------------------===// + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return type().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return type().getResults(); } + + /// Verify the type attribute of this function. Returns failure and emits + /// an error if the attribute is invalid. + LogicalResult verifyType() { + auto type = getTypeAttr().getValue(); + if (!type.isa()) + return emitOpError("requires '" + FunctionOpInterface::getTypeAttrName() + + "' attribute of function type"); + return success(); + } + }]; + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch4/mlir/Dialect.cpp b/mlir/examples/toy/Ch4/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch4/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch4/mlir/Dialect.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/FunctionImplementation.h" #include "mlir/IR/OpImplementation.h" #include "mlir/Transforms/InliningUtils.h" @@ -48,6 +49,12 @@ return true; } + // All functions within toy can be inlined. + bool isLegalToInline(Region *, Region *, bool, + BlockAndValueMapping &) const final { + return true; + } + //===--------------------------------------------------------------------===// // Transformation Hooks //===--------------------------------------------------------------------===// @@ -257,6 +264,66 @@ return !input.hasRank() || !output.hasRank() || input == output; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +void FuncOp::build(mlir::OpBuilder &builder, mlir::OperationState &state, + llvm::StringRef name, mlir::FunctionType type, + llvm::ArrayRef attrs) { + // FunctionOpInterface provides a convenient `build` method that will populate + // the state of our FuncOp, and create an entry block. + buildWithEntryBlock(builder, state, name, type, attrs, type.getInputs()); +} + +mlir::ParseResult FuncOp::parse(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + // Dispatch to the FunctionOpInterface provided utility method that parses the + // function operation. + auto buildFuncType = + [](mlir::Builder &builder, llvm::ArrayRef argTypes, + llvm::ArrayRef results, + mlir::function_interface_impl::VariadicFlag, + std::string &) { return builder.getFunctionType(argTypes, results); }; + + return mlir::function_interface_impl::parseFunctionOp( + parser, result, /*allowVariadic=*/false, buildFuncType); +} + +void FuncOp::print(mlir::OpAsmPrinter &p) { + // Dispatch to the FunctionOpInterface provided utility method that prints the + // function operation. + mlir::function_interface_impl::printFunctionOp(p, *this, + /*isVariadic=*/false); +} + +mlir::LogicalResult FuncOp::verify() { + // Verify that the argument list of the function and the arg list of the entry + // block line up. The trait already verified that the number of arguments is + // the same between the signature and the block. + llvm::ArrayRef fnInputTypes = getType().getInputs(); + mlir::Block &entryBlock = front(); + for (unsigned i = 0, e = entryBlock.getNumArguments(); i != e; ++i) { + if (fnInputTypes[i] != entryBlock.getArgument(i).getType()) { + return emitOpError("type of entry block argument #") + << i << '(' << entryBlock.getArgument(i).getType() + << ") must match the type of the corresponding argument in " + << "function signature(" << fnInputTypes[i] << ')'; + } + } + + return success(); +} + +/// Returns the region on the function operation that is callable. +mlir::Region *FuncOp::getCallableRegion() { return &getBody(); } + +/// Returns the results types that the callable region produces when +/// executed. +llvm::ArrayRef FuncOp::getCallableResults() { + return getType().getResults(); +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch4/mlir/MLIRGen.cpp @@ -58,12 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &f : moduleAST) { - auto func = mlirGen(f); - if (!func) - return nullptr; - theModule.push_back(func); - } + for (FunctionAST &f : moduleAST) + mlirGen(f); // Verify the module after we have finished constructing it, this will check // the structural properties of the IR and invoke any specific verifiers we @@ -108,7 +104,7 @@ /// Create the prototype for an MLIR function with as many arguments as the /// provided Toy AST prototype. - mlir::FuncOp mlirGen(PrototypeAST &proto) { + mlir::toy::FuncOp mlirGen(PrototypeAST &proto) { auto location = loc(proto.loc()); // This is a generic function, the return type will be inferred later. @@ -116,23 +112,23 @@ llvm::SmallVector argTypes(proto.getArgs().size(), getType(VarType{})); auto funcType = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), funcType); + return builder.create(location, proto.getName(), + funcType); } /// Emit a new function and add it to the MLIR module. - mlir::FuncOp mlirGen(FunctionAST &funcAST) { + mlir::toy::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. - mlir::FuncOp function(mlirGen(*funcAST.getProto())); + builder.setInsertionPointToEnd(theModule.getBody()); + mlir::toy::FuncOp function = mlirGen(*funcAST.getProto()); if (!function) return nullptr; // Let's start the body of the function now! - // In MLIR the entry block of the function is special: it must have the same - // argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); + mlir::Block &entryBlock = function.front(); auto protoArgs = funcAST.getProto()->getArgs(); // Declare all the function arguments in the symbol table. diff --git a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp @@ -45,7 +45,7 @@ /// 3) If the worklist is empty, the algorithm succeeded. /// class ShapeInferencePass - : public mlir::PassWrapper> { + : public mlir::PassWrapper> { public: void runOnOperation() override { auto f = getOperation(); diff --git a/mlir/examples/toy/Ch4/toyc.cpp b/mlir/examples/toy/Ch4/toyc.cpp --- a/mlir/examples/toy/Ch4/toyc.cpp +++ b/mlir/examples/toy/Ch4/toyc.cpp @@ -123,7 +123,7 @@ // Now that there is only one function, we can infer the shapes of each of // the operations. - mlir::OpPassManager &optPM = pm.nest(); + mlir::OpPassManager &optPM = pm.nest(); optPM.addPass(mlir::toy::createShapeInferencePass()); optPM.addPass(mlir::createCanonicalizerPass()); optPM.addPass(mlir::createCSEPass()); diff --git a/mlir/examples/toy/Ch5/include/toy/Dialect.h b/mlir/examples/toy/Ch5/include/toy/Dialect.h --- a/mlir/examples/toy/Ch5/include/toy/Dialect.h +++ b/mlir/examples/toy/Ch5/include/toy/Dialect.h @@ -14,9 +14,11 @@ #ifndef MLIR_TUTORIAL_TOY_DIALECT_H_ #define MLIR_TUTORIAL_TOY_DIALECT_H_ -#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" +#include "mlir/IR/FunctionInterfaces.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/CastInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "toy/ShapeInferenceInterface.h" diff --git a/mlir/examples/toy/Ch5/include/toy/Ops.td b/mlir/examples/toy/Ch5/include/toy/Ops.td --- a/mlir/examples/toy/Ch5/include/toy/Ops.td +++ b/mlir/examples/toy/Ch5/include/toy/Ops.td @@ -13,6 +13,8 @@ #ifndef TOY_OPS #define TOY_OPS +include "mlir/IR/FunctionInterfaces.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/CastInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" @@ -134,6 +136,73 @@ let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)"; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : Toy_Op<"func", [ + DeclareOpInterfaceMethods, FunctionOpInterface, + IsolatedFromAbove, Symbol + ]> { + let summary = "user defined function operation"; + let description = [{ + The "toy.func" operation represents a user defined function. These are + callable SSA-region operations that contain toy computations. + + Example: + + ```mlir + toy.func @main() { + %0 = toy.constant dense<5.500000e+00> : tensor + %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> + toy.print %1 : tensor<2x2xf64> + toy.return + } + ``` + }]; + + let arguments = (ins + SymbolNameAttr:$sym_name, + TypeAttrOf:$type + ); + let regions = (region AnyRegion:$body); + + let builders = [OpBuilder<(ins + "StringRef":$name, "FunctionType":$type, + CArg<"ArrayRef", "{}">:$attrs) + >]; + let extraClassDeclaration = [{ + /// Returns the type of this function. + /// FIXME: We should drive this via the ODS `type` param. + FunctionType getType() { + return getTypeAttr().getValue().cast(); + } + + //===------------------------------------------------------------------===// + // FunctionOpInterface Methods + //===------------------------------------------------------------------===// + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return type().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return type().getResults(); } + + /// Verify the type attribute of this function. Returns failure and emits + /// an error if the attribute is invalid. + LogicalResult verifyType() { + auto type = getTypeAttr().getValue(); + if (!type.isa()) + return emitOpError("requires '" + FunctionOpInterface::getTypeAttrName() + + "' attribute of function type"); + return success(); + } + }]; + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch5/mlir/Dialect.cpp b/mlir/examples/toy/Ch5/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch5/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch5/mlir/Dialect.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/FunctionImplementation.h" #include "mlir/IR/OpImplementation.h" #include "mlir/Transforms/InliningUtils.h" @@ -48,6 +49,12 @@ return true; } + // All functions within toy can be inlined. + bool isLegalToInline(Region *, Region *, bool, + BlockAndValueMapping &) const final { + return true; + } + //===--------------------------------------------------------------------===// // Transformation Hooks //===--------------------------------------------------------------------===// @@ -257,6 +264,66 @@ return !input.hasRank() || !output.hasRank() || input == output; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +void FuncOp::build(mlir::OpBuilder &builder, mlir::OperationState &state, + llvm::StringRef name, mlir::FunctionType type, + llvm::ArrayRef attrs) { + // FunctionOpInterface provides a convenient `build` method that will populate + // the state of our FuncOp, and create an entry block. + buildWithEntryBlock(builder, state, name, type, attrs, type.getInputs()); +} + +mlir::ParseResult FuncOp::parse(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + // Dispatch to the FunctionOpInterface provided utility method that parses the + // function operation. + auto buildFuncType = + [](mlir::Builder &builder, llvm::ArrayRef argTypes, + llvm::ArrayRef results, + mlir::function_interface_impl::VariadicFlag, + std::string &) { return builder.getFunctionType(argTypes, results); }; + + return mlir::function_interface_impl::parseFunctionOp( + parser, result, /*allowVariadic=*/false, buildFuncType); +} + +void FuncOp::print(mlir::OpAsmPrinter &p) { + // Dispatch to the FunctionOpInterface provided utility method that prints the + // function operation. + mlir::function_interface_impl::printFunctionOp(p, *this, + /*isVariadic=*/false); +} + +mlir::LogicalResult FuncOp::verify() { + // Verify that the argument list of the function and the arg list of the entry + // block line up. The trait already verified that the number of arguments is + // the same between the signature and the block. + llvm::ArrayRef fnInputTypes = getType().getInputs(); + mlir::Block &entryBlock = front(); + for (unsigned i = 0, e = entryBlock.getNumArguments(); i != e; ++i) { + if (fnInputTypes[i] != entryBlock.getArgument(i).getType()) { + return emitOpError("type of entry block argument #") + << i << '(' << entryBlock.getArgument(i).getType() + << ") must match the type of the corresponding argument in " + << "function signature(" << fnInputTypes[i] << ')'; + } + } + + return success(); +} + +/// Returns the region on the function operation that is callable. +mlir::Region *FuncOp::getCallableRegion() { return &getBody(); } + +/// Returns the results types that the callable region produces when +/// executed. +llvm::ArrayRef FuncOp::getCallableResults() { + return getType().getResults(); +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/IR/BuiltinDialect.h" #include "toy/Dialect.h" #include "toy/Passes.h" @@ -197,6 +198,37 @@ } }; +//===----------------------------------------------------------------------===// +// ToyToAffine RewritePatterns: Func operations +//===----------------------------------------------------------------------===// + +struct FuncOpLowering : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(toy::FuncOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const final { + // We only lower the main function as we expect that all other functions + // have been inlined. + if (op.getName() != "main") + return failure(); + + // Verify that the given main has no inputs and results. + if (op.getNumArguments() || op.getType().getNumResults()) { + return rewriter.notifyMatchFailure(op, [](Diagnostic &diag) { + diag << "expected 'main' to have 0 inputs and 0 results"; + }); + } + + // Create a new non-toy function, with the same region. + auto func = + rewriter.create(op.getLoc(), op.getName(), op.getType()); + rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end()); + rewriter.eraseOp(op); + return success(); + } +}; + //===----------------------------------------------------------------------===// // ToyToAffine RewritePatterns: Print operations //===----------------------------------------------------------------------===// @@ -277,7 +309,7 @@ /// rest of the code in the Toy dialect. namespace { struct ToyToAffineLoweringPass - : public PassWrapper> { + : public PassWrapper> { void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); } @@ -286,19 +318,6 @@ } // namespace void ToyToAffineLoweringPass::runOnOperation() { - FuncOp function = getOperation(); - - // We only lower the main function as we expect that all other functions have - // been inlined. - if (function.getName() != "main") - return; - - // Verify that the given main has no inputs and results. - if (function.getNumArguments() || function.getType().getNumResults()) { - function.emitError("expected 'main' to have 0 inputs and 0 results"); - return signalPassFailure(); - } - // The first thing to define is the conversion target. This will define the // final target for this lowering. ConversionTarget target(getContext()); @@ -306,8 +325,9 @@ // We define the specific operations, or dialects, that are legal targets for // this lowering. In our case, we are lowering to a combination of the // `Affine`, `Arithmetic`, `Func`, and `MemRef` dialects. - target.addLegalDialect(); + target + .addLegalDialect(); // We also define the Toy dialect as Illegal so that the conversion will fail // if any of these operations are *not* converted. Given that we actually want @@ -324,7 +344,7 @@ // Now that the conversion target has been defined, we just need to provide // the set of patterns that will lower the Toy operations. RewritePatternSet patterns(&getContext()); - patterns.add( &getContext()); diff --git a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch5/mlir/MLIRGen.cpp @@ -58,12 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &f : moduleAST) { - auto func = mlirGen(f); - if (!func) - return nullptr; - theModule.push_back(func); - } + for (FunctionAST &f : moduleAST) + mlirGen(f); // Verify the module after we have finished constructing it, this will check // the structural properties of the IR and invoke any specific verifiers we @@ -108,7 +104,7 @@ /// Create the prototype for an MLIR function with as many arguments as the /// provided Toy AST prototype. - mlir::FuncOp mlirGen(PrototypeAST &proto) { + mlir::toy::FuncOp mlirGen(PrototypeAST &proto) { auto location = loc(proto.loc()); // This is a generic function, the return type will be inferred later. @@ -116,23 +112,23 @@ llvm::SmallVector argTypes(proto.getArgs().size(), getType(VarType{})); auto funcType = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), funcType); + return builder.create(location, proto.getName(), + funcType); } /// Emit a new function and add it to the MLIR module. - mlir::FuncOp mlirGen(FunctionAST &funcAST) { + mlir::toy::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. - mlir::FuncOp function(mlirGen(*funcAST.getProto())); + builder.setInsertionPointToEnd(theModule.getBody()); + mlir::toy::FuncOp function = mlirGen(*funcAST.getProto()); if (!function) return nullptr; // Let's start the body of the function now! - // In MLIR the entry block of the function is special: it must have the same - // argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); + mlir::Block &entryBlock = function.front(); auto protoArgs = funcAST.getProto()->getArgs(); // Declare all the function arguments in the symbol table. diff --git a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp @@ -45,7 +45,7 @@ /// 3) If the worklist is empty, the algorithm succeeded. /// class ShapeInferencePass - : public mlir::PassWrapper> { + : public mlir::PassWrapper> { public: void runOnOperation() override { auto f = getOperation(); diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp --- a/mlir/examples/toy/Ch5/toyc.cpp +++ b/mlir/examples/toy/Ch5/toyc.cpp @@ -130,17 +130,18 @@ // Now that there is only one function, we can infer the shapes of each of // the operations. - mlir::OpPassManager &optPM = pm.nest(); + mlir::OpPassManager &optPM = pm.nest(); optPM.addPass(mlir::toy::createShapeInferencePass()); optPM.addPass(mlir::createCanonicalizerPass()); optPM.addPass(mlir::createCSEPass()); } if (isLoweringToAffine) { - mlir::OpPassManager &optPM = pm.nest(); + // Partially lower the toy dialect. + pm.addPass(mlir::toy::createLowerToAffinePass()); - // Partially lower the toy dialect with a few cleanups afterwards. - optPM.addPass(mlir::toy::createLowerToAffinePass()); + // Add a few cleanups post lowering. + mlir::OpPassManager &optPM = pm.nest(); optPM.addPass(mlir::createCanonicalizerPass()); optPM.addPass(mlir::createCSEPass()); diff --git a/mlir/examples/toy/Ch6/include/toy/Dialect.h b/mlir/examples/toy/Ch6/include/toy/Dialect.h --- a/mlir/examples/toy/Ch6/include/toy/Dialect.h +++ b/mlir/examples/toy/Ch6/include/toy/Dialect.h @@ -14,9 +14,11 @@ #ifndef MLIR_TUTORIAL_TOY_DIALECT_H_ #define MLIR_TUTORIAL_TOY_DIALECT_H_ -#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" +#include "mlir/IR/FunctionInterfaces.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/CastInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "toy/ShapeInferenceInterface.h" diff --git a/mlir/examples/toy/Ch6/include/toy/Ops.td b/mlir/examples/toy/Ch6/include/toy/Ops.td --- a/mlir/examples/toy/Ch6/include/toy/Ops.td +++ b/mlir/examples/toy/Ch6/include/toy/Ops.td @@ -13,6 +13,8 @@ #ifndef TOY_OPS #define TOY_OPS +include "mlir/IR/FunctionInterfaces.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/CastInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" @@ -134,6 +136,73 @@ let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)"; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : Toy_Op<"func", [ + DeclareOpInterfaceMethods, FunctionOpInterface, + IsolatedFromAbove, Symbol + ]> { + let summary = "user defined function operation"; + let description = [{ + The "toy.func" operation represents a user defined function. These are + callable SSA-region operations that contain toy computations. + + Example: + + ```mlir + toy.func @main() { + %0 = toy.constant dense<5.500000e+00> : tensor + %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> + toy.print %1 : tensor<2x2xf64> + toy.return + } + ``` + }]; + + let arguments = (ins + SymbolNameAttr:$sym_name, + TypeAttrOf:$type + ); + let regions = (region AnyRegion:$body); + + let builders = [OpBuilder<(ins + "StringRef":$name, "FunctionType":$type, + CArg<"ArrayRef", "{}">:$attrs) + >]; + let extraClassDeclaration = [{ + /// Returns the type of this function. + /// FIXME: We should drive this via the ODS `type` param. + FunctionType getType() { + return getTypeAttr().getValue().cast(); + } + + //===------------------------------------------------------------------===// + // FunctionOpInterface Methods + //===------------------------------------------------------------------===// + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return type().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return type().getResults(); } + + /// Verify the type attribute of this function. Returns failure and emits + /// an error if the attribute is invalid. + LogicalResult verifyType() { + auto type = getTypeAttr().getValue(); + if (!type.isa()) + return emitOpError("requires '" + FunctionOpInterface::getTypeAttrName() + + "' attribute of function type"); + return success(); + } + }]; + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch6/mlir/Dialect.cpp b/mlir/examples/toy/Ch6/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch6/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch6/mlir/Dialect.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/FunctionImplementation.h" #include "mlir/IR/OpImplementation.h" #include "mlir/Transforms/InliningUtils.h" @@ -48,6 +49,12 @@ return true; } + // All functions within toy can be inlined. + bool isLegalToInline(Region *, Region *, bool, + BlockAndValueMapping &) const final { + return true; + } + //===--------------------------------------------------------------------===// // Transformation Hooks //===--------------------------------------------------------------------===// @@ -257,6 +264,66 @@ return !input.hasRank() || !output.hasRank() || input == output; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +void FuncOp::build(mlir::OpBuilder &builder, mlir::OperationState &state, + llvm::StringRef name, mlir::FunctionType type, + llvm::ArrayRef attrs) { + // FunctionOpInterface provides a convenient `build` method that will populate + // the state of our FuncOp, and create an entry block. + buildWithEntryBlock(builder, state, name, type, attrs, type.getInputs()); +} + +mlir::ParseResult FuncOp::parse(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + // Dispatch to the FunctionOpInterface provided utility method that parses the + // function operation. + auto buildFuncType = + [](mlir::Builder &builder, llvm::ArrayRef argTypes, + llvm::ArrayRef results, + mlir::function_interface_impl::VariadicFlag, + std::string &) { return builder.getFunctionType(argTypes, results); }; + + return mlir::function_interface_impl::parseFunctionOp( + parser, result, /*allowVariadic=*/false, buildFuncType); +} + +void FuncOp::print(mlir::OpAsmPrinter &p) { + // Dispatch to the FunctionOpInterface provided utility method that prints the + // function operation. + mlir::function_interface_impl::printFunctionOp(p, *this, + /*isVariadic=*/false); +} + +mlir::LogicalResult FuncOp::verify() { + // Verify that the argument list of the function and the arg list of the entry + // block line up. The trait already verified that the number of arguments is + // the same between the signature and the block. + llvm::ArrayRef fnInputTypes = getType().getInputs(); + mlir::Block &entryBlock = front(); + for (unsigned i = 0, e = entryBlock.getNumArguments(); i != e; ++i) { + if (fnInputTypes[i] != entryBlock.getArgument(i).getType()) { + return emitOpError("type of entry block argument #") + << i << '(' << entryBlock.getArgument(i).getType() + << ") must match the type of the corresponding argument in " + << "function signature(" << fnInputTypes[i] << ')'; + } + } + + return success(); +} + +/// Returns the region on the function operation that is callable. +mlir::Region *FuncOp::getCallableRegion() { return &getBody(); } + +/// Returns the results types that the callable region produces when +/// executed. +llvm::ArrayRef FuncOp::getCallableResults() { + return getType().getResults(); +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/IR/BuiltinDialect.h" #include "toy/Dialect.h" #include "toy/Passes.h" @@ -197,6 +198,37 @@ } }; +//===----------------------------------------------------------------------===// +// ToyToAffine RewritePatterns: Func operations +//===----------------------------------------------------------------------===// + +struct FuncOpLowering : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(toy::FuncOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const final { + // We only lower the main function as we expect that all other functions + // have been inlined. + if (op.getName() != "main") + return failure(); + + // Verify that the given main has no inputs and results. + if (op.getNumArguments() || op.getType().getNumResults()) { + return rewriter.notifyMatchFailure(op, [](Diagnostic &diag) { + diag << "expected 'main' to have 0 inputs and 0 results"; + }); + } + + // Create a new non-toy function, with the same region. + auto func = + rewriter.create(op.getLoc(), op.getName(), op.getType()); + rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end()); + rewriter.eraseOp(op); + return success(); + } +}; + //===----------------------------------------------------------------------===// // ToyToAffine RewritePatterns: Print operations //===----------------------------------------------------------------------===// @@ -277,7 +309,7 @@ /// rest of the code in the Toy dialect. namespace { struct ToyToAffineLoweringPass - : public PassWrapper> { + : public PassWrapper> { void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); } @@ -286,19 +318,6 @@ } // namespace void ToyToAffineLoweringPass::runOnOperation() { - auto function = getOperation(); - - // We only lower the main function as we expect that all other functions have - // been inlined. - if (function.getName() != "main") - return; - - // Verify that the given main has no inputs and results. - if (function.getNumArguments() || function.getType().getNumResults()) { - function.emitError("expected 'main' to have 0 inputs and 0 results"); - return signalPassFailure(); - } - // The first thing to define is the conversion target. This will define the // final target for this lowering. ConversionTarget target(getContext()); @@ -306,8 +325,9 @@ // We define the specific operations, or dialects, that are legal targets for // this lowering. In our case, we are lowering to a combination of the // `Affine`, `Arithmetic`, `Func`, and `MemRef` dialects. - target.addLegalDialect(); + target + .addLegalDialect(); // We also define the Toy dialect as Illegal so that the conversion will fail // if any of these operations are *not* converted. Given that we actually want @@ -324,7 +344,7 @@ // Now that the conversion target has been defined, we just need to provide // the set of patterns that will lower the Toy operations. RewritePatternSet patterns(&getContext()); - patterns.add( &getContext()); diff --git a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch6/mlir/MLIRGen.cpp @@ -58,12 +58,8 @@ // add them to the module. theModule = mlir::ModuleOp::create(builder.getUnknownLoc()); - for (FunctionAST &f : moduleAST) { - auto func = mlirGen(f); - if (!func) - return nullptr; - theModule.push_back(func); - } + for (FunctionAST &f : moduleAST) + mlirGen(f); // Verify the module after we have finished constructing it, this will check // the structural properties of the IR and invoke any specific verifiers we @@ -108,7 +104,7 @@ /// Create the prototype for an MLIR function with as many arguments as the /// provided Toy AST prototype. - mlir::FuncOp mlirGen(PrototypeAST &proto) { + mlir::toy::FuncOp mlirGen(PrototypeAST &proto) { auto location = loc(proto.loc()); // This is a generic function, the return type will be inferred later. @@ -116,23 +112,23 @@ llvm::SmallVector argTypes(proto.getArgs().size(), getType(VarType{})); auto funcType = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), funcType); + return builder.create(location, proto.getName(), + funcType); } /// Emit a new function and add it to the MLIR module. - mlir::FuncOp mlirGen(FunctionAST &funcAST) { + mlir::toy::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. ScopedHashTableScope varScope(symbolTable); // Create an MLIR function for the given prototype. - mlir::FuncOp function(mlirGen(*funcAST.getProto())); + builder.setInsertionPointToEnd(theModule.getBody()); + mlir::toy::FuncOp function = mlirGen(*funcAST.getProto()); if (!function) return nullptr; // Let's start the body of the function now! - // In MLIR the entry block of the function is special: it must have the same - // argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); + mlir::Block &entryBlock = function.front(); auto protoArgs = funcAST.getProto()->getArgs(); // Declare all the function arguments in the symbol table. diff --git a/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp @@ -45,7 +45,7 @@ /// 3) If the worklist is empty, the algorithm succeeded. /// class ShapeInferencePass - : public mlir::PassWrapper> { + : public mlir::PassWrapper> { public: void runOnOperation() override { auto f = getOperation(); diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp --- a/mlir/examples/toy/Ch6/toyc.cpp +++ b/mlir/examples/toy/Ch6/toyc.cpp @@ -146,17 +146,18 @@ // Now that there is only one function, we can infer the shapes of each of // the operations. - mlir::OpPassManager &optPM = pm.nest(); + mlir::OpPassManager &optPM = pm.nest(); optPM.addPass(mlir::toy::createShapeInferencePass()); optPM.addPass(mlir::createCanonicalizerPass()); optPM.addPass(mlir::createCSEPass()); } if (isLoweringToAffine) { - mlir::OpPassManager &optPM = pm.nest(); + // Partially lower the toy dialect. + pm.addPass(mlir::toy::createLowerToAffinePass()); - // Partially lower the toy dialect with a few cleanups afterwards. - optPM.addPass(mlir::toy::createLowerToAffinePass()); + // Add a few cleanups post lowering. + mlir::OpPassManager &optPM = pm.nest(); optPM.addPass(mlir::createCanonicalizerPass()); optPM.addPass(mlir::createCSEPass()); diff --git a/mlir/examples/toy/Ch7/include/toy/Dialect.h b/mlir/examples/toy/Ch7/include/toy/Dialect.h --- a/mlir/examples/toy/Ch7/include/toy/Dialect.h +++ b/mlir/examples/toy/Ch7/include/toy/Dialect.h @@ -14,9 +14,11 @@ #ifndef MLIR_TUTORIAL_TOY_DIALECT_H_ #define MLIR_TUTORIAL_TOY_DIALECT_H_ -#include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" +#include "mlir/IR/FunctionInterfaces.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/Interfaces/CallInterfaces.h" #include "mlir/Interfaces/CastInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "toy/ShapeInferenceInterface.h" diff --git a/mlir/examples/toy/Ch7/include/toy/Ops.td b/mlir/examples/toy/Ch7/include/toy/Ops.td --- a/mlir/examples/toy/Ch7/include/toy/Ops.td +++ b/mlir/examples/toy/Ch7/include/toy/Ops.td @@ -13,6 +13,8 @@ #ifndef TOY_OPS #define TOY_OPS +include "mlir/IR/FunctionInterfaces.td" +include "mlir/IR/SymbolInterfaces.td" include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/CastInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" @@ -153,6 +155,73 @@ let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)"; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +def FuncOp : Toy_Op<"func", [ + DeclareOpInterfaceMethods, FunctionOpInterface, + IsolatedFromAbove, Symbol + ]> { + let summary = "user defined function operation"; + let description = [{ + The "toy.func" operation represents a user defined function. These are + callable SSA-region operations that contain toy computations. + + Example: + + ```mlir + toy.func @main() { + %0 = toy.constant dense<5.500000e+00> : tensor + %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> + toy.print %1 : tensor<2x2xf64> + toy.return + } + ``` + }]; + + let arguments = (ins + SymbolNameAttr:$sym_name, + TypeAttrOf:$type + ); + let regions = (region AnyRegion:$body); + + let builders = [OpBuilder<(ins + "StringRef":$name, "FunctionType":$type, + CArg<"ArrayRef", "{}">:$attrs) + >]; + let extraClassDeclaration = [{ + /// Returns the type of this function. + /// FIXME: We should drive this via the ODS `type` param. + FunctionType getType() { + return getTypeAttr().getValue().cast(); + } + + //===------------------------------------------------------------------===// + // FunctionOpInterface Methods + //===------------------------------------------------------------------===// + + /// Returns the argument types of this function. + ArrayRef getArgumentTypes() { return type().getInputs(); } + + /// Returns the result types of this function. + ArrayRef getResultTypes() { return type().getResults(); } + + /// Verify the type attribute of this function. Returns failure and emits + /// an error if the attribute is invalid. + LogicalResult verifyType() { + auto type = getTypeAttr().getValue(); + if (!type.isa()) + return emitOpError("requires '" + FunctionOpInterface::getTypeAttrName() + + "' attribute of function type"); + return success(); + } + }]; + let hasCustomAssemblyFormat = 1; + let skipDefaultBuilders = 1; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch7/mlir/Dialect.cpp b/mlir/examples/toy/Ch7/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch7/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch7/mlir/Dialect.cpp @@ -16,6 +16,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/FunctionImplementation.h" #include "mlir/IR/OpImplementation.h" #include "mlir/Transforms/InliningUtils.h" @@ -49,6 +50,12 @@ return true; } + // All functions within toy can be inlined. + bool isLegalToInline(Region *, Region *, bool, + BlockAndValueMapping &) const final { + return true; + } + //===--------------------------------------------------------------------===// // Transformation Hooks //===--------------------------------------------------------------------===// @@ -284,6 +291,66 @@ return !input.hasRank() || !output.hasRank() || input == output; } +//===----------------------------------------------------------------------===// +// FuncOp +//===----------------------------------------------------------------------===// + +void FuncOp::build(mlir::OpBuilder &builder, mlir::OperationState &state, + llvm::StringRef name, mlir::FunctionType type, + llvm::ArrayRef attrs) { + // FunctionOpInterface provides a convenient `build` method that will populate + // the state of our FuncOp, and create an entry block. + buildWithEntryBlock(builder, state, name, type, attrs, type.getInputs()); +} + +mlir::ParseResult FuncOp::parse(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + // Dispatch to the FunctionOpInterface provided utility method that parses the + // function operation. + auto buildFuncType = + [](mlir::Builder &builder, llvm::ArrayRef argTypes, + llvm::ArrayRef results, + mlir::function_interface_impl::VariadicFlag, + std::string &) { return builder.getFunctionType(argTypes, results); }; + + return mlir::function_interface_impl::parseFunctionOp( + parser, result, /*allowVariadic=*/false, buildFuncType); +} + +void FuncOp::print(mlir::OpAsmPrinter &p) { + // Dispatch to the FunctionOpInterface provided utility method that prints the + // function operation. + mlir::function_interface_impl::printFunctionOp(p, *this, + /*isVariadic=*/false); +} + +mlir::LogicalResult FuncOp::verify() { + // Verify that the argument list of the function and the arg list of the entry + // block line up. The trait already verified that the number of arguments is + // the same between the signature and the block. + llvm::ArrayRef fnInputTypes = getType().getInputs(); + mlir::Block &entryBlock = front(); + for (unsigned i = 0, e = entryBlock.getNumArguments(); i != e; ++i) { + if (fnInputTypes[i] != entryBlock.getArgument(i).getType()) { + return emitOpError("type of entry block argument #") + << i << '(' << entryBlock.getArgument(i).getType() + << ") must match the type of the corresponding argument in " + << "function signature(" << fnInputTypes[i] << ')'; + } + } + + return success(); +} + +/// Returns the region on the function operation that is callable. +mlir::Region *FuncOp::getCallableRegion() { return &getBody(); } + +/// Returns the results types that the callable region produces when +/// executed. +llvm::ArrayRef FuncOp::getCallableResults() { + return getType().getResults(); +} + //===----------------------------------------------------------------------===// // GenericCallOp //===----------------------------------------------------------------------===// diff --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp --- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp +++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "mlir/IR/BuiltinDialect.h" #include "toy/Dialect.h" #include "toy/Passes.h" @@ -197,6 +198,37 @@ } }; +//===----------------------------------------------------------------------===// +// ToyToAffine RewritePatterns: Func operations +//===----------------------------------------------------------------------===// + +struct FuncOpLowering : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(toy::FuncOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const final { + // We only lower the main function as we expect that all other functions + // have been inlined. + if (op.getName() != "main") + return failure(); + + // Verify that the given main has no inputs and results. + if (op.getNumArguments() || op.getType().getNumResults()) { + return rewriter.notifyMatchFailure(op, [](Diagnostic &diag) { + diag << "expected 'main' to have 0 inputs and 0 results"; + }); + } + + // Create a new non-toy function, with the same region. + auto func = + rewriter.create(op.getLoc(), op.getName(), op.getType()); + rewriter.inlineRegionBefore(op.getRegion(), func.getBody(), func.end()); + rewriter.eraseOp(op); + return success(); + } +}; + //===----------------------------------------------------------------------===// // ToyToAffine RewritePatterns: Print operations //===----------------------------------------------------------------------===// @@ -277,7 +309,7 @@ /// rest of the code in the Toy dialect. namespace { struct ToyToAffineLoweringPass - : public PassWrapper> { + : public PassWrapper> { void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); } @@ -286,19 +318,6 @@ } // namespace void ToyToAffineLoweringPass::runOnOperation() { - auto function = getOperation(); - - // We only lower the main function as we expect that all other functions have - // been inlined. - if (function.getName() != "main") - return; - - // Verify that the given main has no inputs and results. - if (function.getNumArguments() || function.getType().getNumResults()) { - function.emitError("expected 'main' to have 0 inputs and 0 results"); - return signalPassFailure(); - } - // The first thing to define is the conversion target. This will define the // final target for this lowering. ConversionTarget target(getContext()); @@ -306,8 +325,9 @@ // We define the specific operations, or dialects, that are legal targets for // this lowering. In our case, we are lowering to a combination of the // `Affine`, `Arithmetic`, `Func`, and `MemRef` dialects. - target.addLegalDialect(); + target + .addLegalDialect(); // We also define the Toy dialect as Illegal so that the conversion will fail // if any of these operations are *not* converted. Given that we actually want @@ -324,7 +344,7 @@ // Now that the conversion target has been defined, we just need to provide // the set of patterns that will lower the Toy operations. RewritePatternSet patterns(&getContext()); - patterns.add( &getContext()); diff --git a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp --- a/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp +++ b/mlir/examples/toy/Ch7/mlir/MLIRGen.cpp @@ -60,11 +60,9 @@ for (auto &record : moduleAST) { if (FunctionAST *funcAST = llvm::dyn_cast(record.get())) { - auto func = mlirGen(*funcAST); + mlir::toy::FuncOp func = mlirGen(*funcAST); if (!func) return nullptr; - - theModule.push_back(func); functionMap.insert({func.getName(), func}); } else if (StructAST *str = llvm::dyn_cast(record.get())) { if (failed(mlirGen(*str))) @@ -105,7 +103,7 @@ std::pair>; /// A mapping for the functions that have been code generated to MLIR. - llvm::StringMap functionMap; + llvm::StringMap functionMap; /// A mapping for named struct types to the underlying MLIR type and the /// original AST node. @@ -157,7 +155,7 @@ /// Create the prototype for an MLIR function with as many arguments as the /// provided Toy AST prototype. - mlir::FuncOp mlirGen(PrototypeAST &proto) { + mlir::toy::FuncOp mlirGen(PrototypeAST &proto) { auto location = loc(proto.loc()); // This is a generic function, the return type will be inferred later. @@ -170,23 +168,23 @@ argTypes.push_back(type); } auto funcType = builder.getFunctionType(argTypes, llvm::None); - return mlir::FuncOp::create(location, proto.getName(), funcType); + return builder.create(location, proto.getName(), + funcType); } /// Emit a new function and add it to the MLIR module. - mlir::FuncOp mlirGen(FunctionAST &funcAST) { + mlir::toy::FuncOp mlirGen(FunctionAST &funcAST) { // Create a scope in the symbol table to hold variable declarations. SymbolTableScopeT varScope(symbolTable); // Create an MLIR function for the given prototype. - mlir::FuncOp function(mlirGen(*funcAST.getProto())); + builder.setInsertionPointToEnd(theModule.getBody()); + mlir::toy::FuncOp function = mlirGen(*funcAST.getProto()); if (!function) return nullptr; // Let's start the body of the function now! - // In MLIR the entry block of the function is special: it must have the same - // argument list as the function itself. - auto &entryBlock = *function.addEntryBlock(); + mlir::Block &entryBlock = function.front(); auto protoArgs = funcAST.getProto()->getArgs(); // Declare all the function arguments in the symbol table. @@ -519,7 +517,7 @@ emitError(location) << "no defined function found for '" << callee << "'"; return nullptr; } - mlir::FuncOp calledFunc = calledFuncIt->second; + mlir::toy::FuncOp calledFunc = calledFuncIt->second; return builder.create( location, calledFunc.getType().getResult(0), mlir::SymbolRefAttr::get(builder.getContext(), callee), operands); diff --git a/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp --- a/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp @@ -45,7 +45,7 @@ /// 3) If the worklist is empty, the algorithm succeeded. /// class ShapeInferencePass - : public mlir::PassWrapper> { + : public mlir::PassWrapper> { public: void runOnOperation() override { auto f = getOperation(); diff --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp --- a/mlir/examples/toy/Ch7/toyc.cpp +++ b/mlir/examples/toy/Ch7/toyc.cpp @@ -146,7 +146,7 @@ // Now that there is only one function, we can infer the shapes of each of // the operations. - mlir::OpPassManager &optPM = pm.nest(); + mlir::OpPassManager &optPM = pm.nest(); optPM.addPass(mlir::createCanonicalizerPass()); optPM.addPass(mlir::toy::createShapeInferencePass()); optPM.addPass(mlir::createCanonicalizerPass()); @@ -154,10 +154,11 @@ } if (isLoweringToAffine) { - mlir::OpPassManager &optPM = pm.nest(); + // Partially lower the toy dialect. + pm.addPass(mlir::toy::createLowerToAffinePass()); - // Partially lower the toy dialect with a few cleanups afterwards. - optPM.addPass(mlir::toy::createLowerToAffinePass()); + // Add a few cleanups post lowering. + mlir::OpPassManager &optPM = pm.nest(); optPM.addPass(mlir::createCanonicalizerPass()); optPM.addPass(mlir::createCSEPass()); diff --git a/mlir/test/Examples/Toy/Ch2/codegen.toy b/mlir/test/Examples/Toy/Ch2/codegen.toy --- a/mlir/test/Examples/Toy/Ch2/codegen.toy +++ b/mlir/test/Examples/Toy/Ch2/codegen.toy @@ -13,14 +13,14 @@ print(d); } -# CHECK-LABEL: func @multiply_transpose( -# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> +# CHECK-LABEL: toy.func @multiply_transpose( +# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> # CHECK: [[VAL_2:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = toy.mul [[VAL_2]], [[VAL_3]] : tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_5:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = toy.reshape([[VAL_5]] : tensor<2x3xf64>) to tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> diff --git a/mlir/test/Examples/Toy/Ch2/invalid.mlir b/mlir/test/Examples/Toy/Ch2/invalid.mlir --- a/mlir/test/Examples/Toy/Ch2/invalid.mlir +++ b/mlir/test/Examples/Toy/Ch2/invalid.mlir @@ -4,6 +4,6 @@ // - toy.print should not return a value. // - toy.print should take an argument. // - There should be a block terminator. -func @main() { +toy.func @main() { %0 = "toy.print"() : () -> tensor<2x3xf64> } diff --git a/mlir/test/Examples/Toy/Ch2/scalar.toy b/mlir/test/Examples/Toy/Ch2/scalar.toy --- a/mlir/test/Examples/Toy/Ch2/scalar.toy +++ b/mlir/test/Examples/Toy/Ch2/scalar.toy @@ -5,7 +5,7 @@ print(a); } -# CHECK-LABEL: func @main() { +# CHECK-LABEL: toy.func @main() { # CHECK-NEXT: %0 = toy.constant dense<5.500000e+00> : tensor # CHECK-NEXT: %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> # CHECK-NEXT: toy.print %1 : tensor<2x2xf64> diff --git a/mlir/test/Examples/Toy/Ch3/codegen.toy b/mlir/test/Examples/Toy/Ch3/codegen.toy --- a/mlir/test/Examples/Toy/Ch3/codegen.toy +++ b/mlir/test/Examples/Toy/Ch3/codegen.toy @@ -13,14 +13,14 @@ print(d); } -# CHECK-LABEL: func @multiply_transpose( -# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> +# CHECK-LABEL: toy.func @multiply_transpose( +# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> # CHECK: [[VAL_2:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = toy.mul [[VAL_2]], [[VAL_3]] : tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_5:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = toy.reshape([[VAL_5]] : tensor<2x3xf64>) to tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> diff --git a/mlir/test/Examples/Toy/Ch3/invalid.mlir b/mlir/test/Examples/Toy/Ch3/invalid.mlir --- a/mlir/test/Examples/Toy/Ch3/invalid.mlir +++ b/mlir/test/Examples/Toy/Ch3/invalid.mlir @@ -4,6 +4,6 @@ // - toy.print should not return a value. // - toy.print should take an argument. // - There should be a block terminator. -func @main() { +toy.func @main() { %0 = "toy.print"() : () -> tensor<2x3xf64> } diff --git a/mlir/test/Examples/Toy/Ch3/scalar.toy b/mlir/test/Examples/Toy/Ch3/scalar.toy --- a/mlir/test/Examples/Toy/Ch3/scalar.toy +++ b/mlir/test/Examples/Toy/Ch3/scalar.toy @@ -5,7 +5,7 @@ print(a); } -# CHECK-LABEL: func @main() { +# CHECK-LABEL: toy.func @main() { # CHECK-NEXT: %0 = toy.constant dense<5.500000e+00> : tensor # CHECK-NEXT: %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> # CHECK-NEXT: toy.print %1 : tensor<2x2xf64> diff --git a/mlir/test/Examples/Toy/Ch3/transpose_transpose.toy b/mlir/test/Examples/Toy/Ch3/transpose_transpose.toy --- a/mlir/test/Examples/Toy/Ch3/transpose_transpose.toy +++ b/mlir/test/Examples/Toy/Ch3/transpose_transpose.toy @@ -11,12 +11,12 @@ print(b); } -# CHECK-LABEL: func @transpose_transpose( -# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>) -> tensor<*xf64> +# CHECK-LABEL: toy.func @transpose_transpose( +# CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_0]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_1:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: [[VAL_2:%.*]] = toy.generic_call @transpose_transpose([[VAL_1]]) : (tensor<2x3xf64>) -> tensor<*xf64> # CHECK-NEXT: toy.print [[VAL_2]] : tensor<*xf64> # CHECK-NEXT: toy.return \ No newline at end of file diff --git a/mlir/test/Examples/Toy/Ch3/trivial_reshape.toy b/mlir/test/Examples/Toy/Ch3/trivial_reshape.toy --- a/mlir/test/Examples/Toy/Ch3/trivial_reshape.toy +++ b/mlir/test/Examples/Toy/Ch3/trivial_reshape.toy @@ -7,10 +7,10 @@ print(c); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_0:%.*]] = toy.constant # CHECK-SAME: dense<[ # CHECK-SAME: [1.000000e+00], [2.000000e+00] # CHECK-SAME: ]> : tensor<2x1xf64> # CHECK-NEXT: toy.print [[VAL_0]] : tensor<2x1xf64> # CHECK-NEXT: toy.return \ No newline at end of file diff --git a/mlir/test/Examples/Toy/Ch4/codegen.toy b/mlir/test/Examples/Toy/Ch4/codegen.toy --- a/mlir/test/Examples/Toy/Ch4/codegen.toy +++ b/mlir/test/Examples/Toy/Ch4/codegen.toy @@ -13,14 +13,14 @@ print(d); } -# CHECK-LABEL: func private @multiply_transpose( +# CHECK-LABEL: toy.func private @multiply_transpose( # CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> # CHECK: [[VAL_2:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = toy.mul [[VAL_2]], [[VAL_3]] : tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_5:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = toy.reshape([[VAL_5]] : tensor<2x3xf64>) to tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> diff --git a/mlir/test/Examples/Toy/Ch4/invalid.mlir b/mlir/test/Examples/Toy/Ch4/invalid.mlir --- a/mlir/test/Examples/Toy/Ch4/invalid.mlir +++ b/mlir/test/Examples/Toy/Ch4/invalid.mlir @@ -4,6 +4,6 @@ // - toy.print should not return a value. // - toy.print should take an argument. // - There should be a block terminator. -func @main() { +toy.func @main() { %0 = "toy.print"() : () -> tensor<2x3xf64> } diff --git a/mlir/test/Examples/Toy/Ch4/scalar.toy b/mlir/test/Examples/Toy/Ch4/scalar.toy --- a/mlir/test/Examples/Toy/Ch4/scalar.toy +++ b/mlir/test/Examples/Toy/Ch4/scalar.toy @@ -5,7 +5,7 @@ print(a); } -# CHECK-LABEL: func @main() { +# CHECK-LABEL: toy.func @main() { # CHECK-NEXT: %0 = toy.constant dense<5.500000e+00> : tensor # CHECK-NEXT: %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> # CHECK-NEXT: toy.print %1 : tensor<2x2xf64> diff --git a/mlir/test/Examples/Toy/Ch4/shape_inference.mlir b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch4/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir @@ -2,13 +2,13 @@ // Check the result of inlining+shape inference on an input module. -func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { +toy.func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> %2 = toy.mul %0, %1 : tensor<*xf64> toy.return %2 : tensor<*xf64> } -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> @@ -19,10 +19,10 @@ toy.return } -// CHECK-NOT: func private @multiply_transpose +// CHECK-NOT: toy.func private @multiply_transpose // CHECK-NOT: tensor<*xf64> -// CHECK-LABEL: func @main() +// CHECK-LABEL: toy.func @main() // CHECK: [[VAL_0:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> // CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<2x3xf64>) to tensor<3x2xf64> // CHECK: [[VAL_2:%.*]] = toy.mul [[VAL_1]], [[VAL_1]] : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch4/transpose_transpose.toy b/mlir/test/Examples/Toy/Ch4/transpose_transpose.toy --- a/mlir/test/Examples/Toy/Ch4/transpose_transpose.toy +++ b/mlir/test/Examples/Toy/Ch4/transpose_transpose.toy @@ -11,7 +11,7 @@ print(b); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_1:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: toy.print [[VAL_1]] : tensor<2x3xf64> # CHECK-NEXT: toy.return \ No newline at end of file diff --git a/mlir/test/Examples/Toy/Ch4/trivial_reshape.toy b/mlir/test/Examples/Toy/Ch4/trivial_reshape.toy --- a/mlir/test/Examples/Toy/Ch4/trivial_reshape.toy +++ b/mlir/test/Examples/Toy/Ch4/trivial_reshape.toy @@ -7,10 +7,10 @@ print(c); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_0:%.*]] = toy.constant # CHECK-SAME: dense<[ # CHECK-SAME: [1.000000e+00], [2.000000e+00] # CHECK-SAME: ]> : tensor<2x1xf64> # CHECK-NEXT: toy.print [[VAL_0]] : tensor<2x1xf64> # CHECK-NEXT: toy.return \ No newline at end of file diff --git a/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir b/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir --- a/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir @@ -1,7 +1,7 @@ // RUN: toyc-ch5 %s -emit=mlir-affine 2>&1 | FileCheck %s // RUN: toyc-ch5 %s -emit=mlir-affine -opt 2>&1 | FileCheck %s --check-prefix=OPT -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %2 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %3 = toy.mul %2, %2 : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch5/codegen.toy b/mlir/test/Examples/Toy/Ch5/codegen.toy --- a/mlir/test/Examples/Toy/Ch5/codegen.toy +++ b/mlir/test/Examples/Toy/Ch5/codegen.toy @@ -13,14 +13,14 @@ print(d); } -# CHECK-LABEL: func private @multiply_transpose( +# CHECK-LABEL: toy.func private @multiply_transpose( # CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> # CHECK: [[VAL_2:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = toy.mul [[VAL_2]], [[VAL_3]] : tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_5:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = toy.reshape([[VAL_5]] : tensor<2x3xf64>) to tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> diff --git a/mlir/test/Examples/Toy/Ch5/invalid.mlir b/mlir/test/Examples/Toy/Ch5/invalid.mlir --- a/mlir/test/Examples/Toy/Ch5/invalid.mlir +++ b/mlir/test/Examples/Toy/Ch5/invalid.mlir @@ -4,6 +4,6 @@ // - toy.print should not return a value. // - toy.print should take an argument. // - There should be a block terminator. -func @main() { +toy.func @main() { %0 = "toy.print"() : () -> tensor<2x3xf64> } diff --git a/mlir/test/Examples/Toy/Ch5/shape_inference.mlir b/mlir/test/Examples/Toy/Ch5/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch5/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch5/shape_inference.mlir @@ -2,13 +2,13 @@ // Check the result of inlining+shape inference on an input module. -func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { +toy.func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> %2 = toy.mul %0, %1 : tensor<*xf64> toy.return %2 : tensor<*xf64> } -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> @@ -19,10 +19,10 @@ toy.return } -// CHECK-NOT: func @multiply_transpose +// CHECK-NOT: toy.func @multiply_transpose // CHECK-NOT: tensor<*xf64> -// CHECK-LABEL: func @main() +// CHECK-LABEL: toy.func @main() // CHECK: [[VAL_0:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> // CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<2x3xf64>) to tensor<3x2xf64> // CHECK: [[VAL_2:%.*]] = toy.mul [[VAL_1]], [[VAL_1]] : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch5/transpose_transpose.toy b/mlir/test/Examples/Toy/Ch5/transpose_transpose.toy --- a/mlir/test/Examples/Toy/Ch5/transpose_transpose.toy +++ b/mlir/test/Examples/Toy/Ch5/transpose_transpose.toy @@ -11,7 +11,7 @@ print(b); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_1:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: toy.print [[VAL_1]] : tensor<2x3xf64> # CHECK-NEXT: toy.return \ No newline at end of file diff --git a/mlir/test/Examples/Toy/Ch5/trivial_reshape.toy b/mlir/test/Examples/Toy/Ch5/trivial_reshape.toy --- a/mlir/test/Examples/Toy/Ch5/trivial_reshape.toy +++ b/mlir/test/Examples/Toy/Ch5/trivial_reshape.toy @@ -7,7 +7,7 @@ print(c); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_0:%.*]] = toy.constant # CHECK-SAME: dense<[ # CHECK-SAME: [1.000000e+00], [2.000000e+00] diff --git a/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir b/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir --- a/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir @@ -1,7 +1,7 @@ // RUN: toyc-ch6 %s -emit=mlir-affine 2>&1 | FileCheck %s // RUN: toyc-ch6 %s -emit=mlir-affine -opt 2>&1 | FileCheck %s --check-prefix=OPT -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %2 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %3 = toy.mul %2, %2 : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch6/codegen.toy b/mlir/test/Examples/Toy/Ch6/codegen.toy --- a/mlir/test/Examples/Toy/Ch6/codegen.toy +++ b/mlir/test/Examples/Toy/Ch6/codegen.toy @@ -13,14 +13,14 @@ print(d); } -# CHECK-LABEL: func private @multiply_transpose( +# CHECK-LABEL: toy.func private @multiply_transpose( # CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> # CHECK: [[VAL_2:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = toy.mul [[VAL_2]], [[VAL_3]] : tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_5:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = toy.reshape([[VAL_5]] : tensor<2x3xf64>) to tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> diff --git a/mlir/test/Examples/Toy/Ch6/invalid.mlir b/mlir/test/Examples/Toy/Ch6/invalid.mlir --- a/mlir/test/Examples/Toy/Ch6/invalid.mlir +++ b/mlir/test/Examples/Toy/Ch6/invalid.mlir @@ -4,6 +4,6 @@ // - toy.print should not return a value. // - toy.print should take an argument. // - There should be a block terminator. -func @main() { +toy.func @main() { %0 = "toy.print"() : () -> tensor<2x3xf64> } diff --git a/mlir/test/Examples/Toy/Ch6/llvm-lowering.mlir b/mlir/test/Examples/Toy/Ch6/llvm-lowering.mlir --- a/mlir/test/Examples/Toy/Ch6/llvm-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch6/llvm-lowering.mlir @@ -1,6 +1,6 @@ // RUN: toyc-ch6 %s -emit=llvm -opt -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %2 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %3 = toy.mul %2, %2 : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch6/scalar.toy b/mlir/test/Examples/Toy/Ch6/scalar.toy --- a/mlir/test/Examples/Toy/Ch6/scalar.toy +++ b/mlir/test/Examples/Toy/Ch6/scalar.toy @@ -5,7 +5,7 @@ print(a); } -# CHECK-LABEL: func @main() { +# CHECK-LABEL: toy.func @main() { # CHECK-NEXT: %0 = toy.constant dense<5.500000e+00> : tensor # CHECK-NEXT: %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> # CHECK-NEXT: toy.print %1 : tensor<2x2xf64> diff --git a/mlir/test/Examples/Toy/Ch6/shape_inference.mlir b/mlir/test/Examples/Toy/Ch6/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch6/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch6/shape_inference.mlir @@ -2,13 +2,13 @@ // Check the result of inlining+shape inference on an input module. -func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { +toy.func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> %2 = toy.mul %0, %1 : tensor<*xf64> toy.return %2 : tensor<*xf64> } -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> @@ -19,10 +19,10 @@ toy.return } -// CHECK-NOT: func @multiply_transpose +// CHECK-NOT: toy.func @multiply_transpose // CHECK-NOT: tensor<*xf64> -// CHECK-LABEL: func @main() +// CHECK-LABEL: toy.func @main() // CHECK: [[VAL_0:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> // CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<2x3xf64>) to tensor<3x2xf64> // CHECK: [[VAL_2:%.*]] = toy.mul [[VAL_1]], [[VAL_1]] : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch6/transpose_transpose.toy b/mlir/test/Examples/Toy/Ch6/transpose_transpose.toy --- a/mlir/test/Examples/Toy/Ch6/transpose_transpose.toy +++ b/mlir/test/Examples/Toy/Ch6/transpose_transpose.toy @@ -11,7 +11,7 @@ print(b); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_1:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: toy.print [[VAL_1]] : tensor<2x3xf64> # CHECK-NEXT: toy.return \ No newline at end of file diff --git a/mlir/test/Examples/Toy/Ch6/trivial_reshape.toy b/mlir/test/Examples/Toy/Ch6/trivial_reshape.toy --- a/mlir/test/Examples/Toy/Ch6/trivial_reshape.toy +++ b/mlir/test/Examples/Toy/Ch6/trivial_reshape.toy @@ -7,7 +7,7 @@ print(c); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_0:%.*]] = toy.constant # CHECK-SAME: dense<[ # CHECK-SAME: [1.000000e+00], [2.000000e+00] diff --git a/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir b/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir --- a/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir @@ -1,7 +1,7 @@ // RUN: toyc-ch7 %s -emit=mlir-affine 2>&1 | FileCheck %s // RUN: toyc-ch7 %s -emit=mlir-affine -opt 2>&1 | FileCheck %s --check-prefix=OPT -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %2 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %3 = toy.mul %2, %2 : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch7/codegen.toy b/mlir/test/Examples/Toy/Ch7/codegen.toy --- a/mlir/test/Examples/Toy/Ch7/codegen.toy +++ b/mlir/test/Examples/Toy/Ch7/codegen.toy @@ -13,14 +13,14 @@ print(d); } -# CHECK-LABEL: func private @multiply_transpose( +# CHECK-LABEL: toy.func private @multiply_transpose( # CHECK-SAME: [[VAL_0:%.*]]: tensor<*xf64>, [[VAL_1:%.*]]: tensor<*xf64>) -> tensor<*xf64> # CHECK: [[VAL_2:%.*]] = toy.transpose([[VAL_0]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = toy.mul [[VAL_2]], [[VAL_3]] : tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_5:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = toy.reshape([[VAL_5]] : tensor<2x3xf64>) to tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> diff --git a/mlir/test/Examples/Toy/Ch7/invalid.mlir b/mlir/test/Examples/Toy/Ch7/invalid.mlir --- a/mlir/test/Examples/Toy/Ch7/invalid.mlir +++ b/mlir/test/Examples/Toy/Ch7/invalid.mlir @@ -4,6 +4,6 @@ // - toy.print should not return a value. // - toy.print should take an argument. // - There should be a block terminator. -func @main() { +toy.func @main() { %0 = "toy.print"() : () -> tensor<2x3xf64> } diff --git a/mlir/test/Examples/Toy/Ch7/llvm-lowering.mlir b/mlir/test/Examples/Toy/Ch7/llvm-lowering.mlir --- a/mlir/test/Examples/Toy/Ch7/llvm-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch7/llvm-lowering.mlir @@ -1,6 +1,6 @@ // RUN: toyc-ch7 %s -emit=llvm -opt -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %2 = toy.transpose(%0 : tensor<2x3xf64>) to tensor<3x2xf64> %3 = toy.mul %2, %2 : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch7/scalar.toy b/mlir/test/Examples/Toy/Ch7/scalar.toy --- a/mlir/test/Examples/Toy/Ch7/scalar.toy +++ b/mlir/test/Examples/Toy/Ch7/scalar.toy @@ -5,7 +5,7 @@ print(a); } -# CHECK-LABEL: func @main() { +# CHECK-LABEL: toy.func @main() { # CHECK-NEXT: %0 = toy.constant dense<5.500000e+00> : tensor # CHECK-NEXT: %1 = toy.reshape(%0 : tensor) to tensor<2x2xf64> # CHECK-NEXT: toy.print %1 : tensor<2x2xf64> diff --git a/mlir/test/Examples/Toy/Ch7/shape_inference.mlir b/mlir/test/Examples/Toy/Ch7/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch7/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch7/shape_inference.mlir @@ -2,13 +2,13 @@ // Check the result of inlining+shape inference on an input module. -func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { +toy.func private @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> %2 = toy.mul %0, %1 : tensor<*xf64> toy.return %2 : tensor<*xf64> } -func @main() { +toy.func @main() { %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> diff --git a/mlir/test/Examples/Toy/Ch7/struct-codegen.toy b/mlir/test/Examples/Toy/Ch7/struct-codegen.toy --- a/mlir/test/Examples/Toy/Ch7/struct-codegen.toy +++ b/mlir/test/Examples/Toy/Ch7/struct-codegen.toy @@ -21,7 +21,7 @@ print(c); } -# CHECK-LABEL: func private @multiply_transpose( +# CHECK-LABEL: toy.func private @multiply_transpose( # CHECK-SAME: [[VAL_0:%.*]]: !toy.struct, tensor<*xf64>>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_1:%.*]] = toy.struct_access [[VAL_0]][0] : !toy.struct, tensor<*xf64>> -> tensor<*xf64> # CHECK-NEXT: [[VAL_2:%.*]] = toy.transpose([[VAL_1]] : tensor<*xf64>) to tensor<*xf64> @@ -30,13 +30,13 @@ # CHECK-NEXT: [[VAL_5:%.*]] = toy.mul [[VAL_2]], [[VAL_4]] : tensor<*xf64> # CHECK-NEXT: toy.return [[VAL_5]] : tensor<*xf64> -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_6:%.*]] = toy.struct_constant [dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>, dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>] : !toy.struct, tensor<*xf64>> # CHECK-NEXT: [[VAL_7:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]]) : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> # CHECK-NEXT: toy.print [[VAL_7]] : tensor<*xf64> # CHECK-NEXT: toy.return -# OPT-LABEL: func @main() +# OPT-LABEL: toy.func @main() # OPT-NEXT: [[VAL_0:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # OPT-NEXT: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<2x3xf64>) to tensor<3x2xf64> # OPT-NEXT: [[VAL_2:%.*]] = toy.mul [[VAL_1]], [[VAL_1]] : tensor<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch7/struct-opt.mlir b/mlir/test/Examples/Toy/Ch7/struct-opt.mlir --- a/mlir/test/Examples/Toy/Ch7/struct-opt.mlir +++ b/mlir/test/Examples/Toy/Ch7/struct-opt.mlir @@ -1,6 +1,6 @@ // RUN: toyc-ch7 %s -emit=mlir -opt 2>&1 | FileCheck %s -func @main() { +toy.func @main() { %0 = toy.struct_constant [ [dense<4.000000e+00> : tensor<2x2xf64>], dense<4.000000e+00> : tensor<2x2xf64> ] : !toy.struct>, tensor<*xf64>> @@ -10,6 +10,6 @@ toy.return } -// CHECK-LABEL: func @main +// CHECK-LABEL: toy.func @main // CHECK-NEXT: %[[CST:.*]] = toy.constant dense<4.0 // CHECK-NEXT: toy.print %[[CST]] diff --git a/mlir/test/Examples/Toy/Ch7/transpose_transpose.toy b/mlir/test/Examples/Toy/Ch7/transpose_transpose.toy --- a/mlir/test/Examples/Toy/Ch7/transpose_transpose.toy +++ b/mlir/test/Examples/Toy/Ch7/transpose_transpose.toy @@ -11,7 +11,7 @@ print(b); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_1:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> # CHECK-NEXT: toy.print [[VAL_1]] : tensor<2x3xf64> # CHECK-NEXT: toy.return \ No newline at end of file diff --git a/mlir/test/Examples/Toy/Ch7/trivial_reshape.toy b/mlir/test/Examples/Toy/Ch7/trivial_reshape.toy --- a/mlir/test/Examples/Toy/Ch7/trivial_reshape.toy +++ b/mlir/test/Examples/Toy/Ch7/trivial_reshape.toy @@ -7,7 +7,7 @@ print(c); } -# CHECK-LABEL: func @main() +# CHECK-LABEL: toy.func @main() # CHECK-NEXT: [[VAL_0:%.*]] = toy.constant # CHECK-SAME: dense<[ # CHECK-SAME: [1.000000e+00], [2.000000e+00]