diff --git a/mlir/docs/Tutorials/Toy/Ch-2.md b/mlir/docs/Tutorials/Toy/Ch-2.md --- a/mlir/docs/Tutorials/Toy/Ch-2.md +++ b/mlir/docs/Tutorials/Toy/Ch-2.md @@ -517,6 +517,183 @@ } ``` +#### Specifying a Custom Assembly Format + +At this point we can generate our "Toy IR". A simplified version of the previous +example: + +```toy +# User defined generic function that operates on unknown shaped arguments. +def multiply_transpose(a, b) { + return transpose(a) * transpose(b); +} + +def main() { + var a<2, 3> = [[1, 2, 3], [4, 5, 6]]; + var b<2, 3> = [1, 2, 3, 4, 5, 6]; + var c = multiply_transpose(a, b); + var d = multiply_transpose(b, a); + print(d); +} +``` + +Results in the following IR: + +```mlir +module { + func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { + %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:10) + %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25) + %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25) + "toy.return"(%2) : (tensor<*xf64>) -> () loc("test/codegen.toy":5:3) + } loc("test/codegen.toy":4:1) + func @main() { + %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/codegen.toy":9:17) + %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":9:3) + %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/codegen.toy":10:17) + %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":10:3) + %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11) + %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11) + "toy.print"(%5) : (tensor<*xf64>) -> () loc("test/codegen.toy":13:3) + "toy.return"() : () -> () loc("test/codegen.toy":8:1) + } loc("test/codegen.toy":8:1) +} loc("test/codegen.toy":0:0) +``` + +One thing to notice here is that all of our Toy operations are printed using the +generic assembly format. This format is the one shown when breaking down +`toy.transpose` at the beginning of this chapter. MLIR allows for operations to +define their own custom assembly format, either +[declaratively](../../OpDefinitions.md#declarative-assembly-format) or +imperatively via C++. Defining a custom assembly format allows for tailoring the +generated IR into something a bit more readable by removing a lot of the fluff +that is required by the generic format. Let's walk through an example of an +operation format that we would like to simplify. + +##### `toy.print` + +The current form of `toy.print` is a little verbose. There are a lot of +additional characters that we would like to strip away. Let's begin by thinking +of what a good format of `toy.print` would be, and see how we can implement it. +Looking at the basics of `toy.print` we get: + +```mlir +toy.print %5 : tensor<*xf64> loc(...) +``` + +Here we have stripped much of the format down to the bare essentials, and it has +become much more readable. To provide a custom assembly format, an operation can +either override the `parser` and `printer` fields for a C++ format, or the +`assemblyFormat` field for the declarative format. Let's look at the C++ variant +first, as this is what the declarative format maps to internally. + +```tablegen +/// Consider a stripped definition of `toy.print` here. +def PrintOp : Toy_Op<"print"> { + let arguments = (ins F64Tensor:$input); + + // Divert the printer and parser to static functions in our .cpp + // file that correspond to 'print' and 'printPrintOp'. 'printer' and 'parser' + // here correspond to an instance of a 'OpAsmParser' and 'OpAsmPrinter'. More + // details on these classes is shown below. + let printer = [{ return ::print(printer, *this); }]; + let parser = [{ return ::parse$cppClass(parser, result); }]; +} +``` + +A C++ implementation for the printer and parser is shown below: + +```c++ +/// The 'OpAsmPrinter' class is a stream that will allows for formatting +/// strings, attributes, operands, types, etc. +static void print(mlir::OpAsmPrinter &printer, PrintOp op) { + printer << "toy.print " << op.input(); + printer.printOptionalAttrDict(op.getAttrs()); + printer << " : " << op.input().getType(); +} + +/// The 'OpAsmPrinter' class provides a collection of methods for parsing +/// various punctuation, as well as attributes, operands, types, etc. Each of +/// these methods returns a `ParseResult`. This class is a wrapper around +/// `LogicalResult` that can be converted to a boolean `true` value on failure, +/// or `false` on success. This allows for easily chaining together a set of +/// parser rules. These rules are used to populate an `mlir::OperationState` +/// similarly to the `build` methods described above. +static mlir::ParseResult parsePrintOp(mlir::OpAsmParser &parser, + mlir::OperationState &result) { + // Parse the input operand, the attribute dictionary, and the type of the + // input. + OpAsmParser::OperandType inputOperand; + Type inputType; + if (parser.parseOperand(inputOperand) || + parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || + parser.parseType(inputType)) + return mlir::failure(); + + // Resolve the input operand to the type we parsed in. + if (parser.resolveOperand(inputOperand, inputType, result.operands)) + return mlir::failure(); + + return mlir::success(); +} +``` + +With the C++ implementation defined, let's see how this can be mapped to the +[declarative format](../../OpDefinitions.md#declarative-assembly-format). The +declarative format is largely composed of three different components: + +* Directives + - A type of builtin function, with an optional set of arguments. +* Literals + - A keyword or punctuation surrounded by \`\`. +* Variables + - An entity that has been registered on the operation itself, i.e. an + argument(attribute or operand), result, etc. In the `PrintOp` example + above, a variable would be `$input`. + +A direct mapping of our C++ format looks something like: + +```tablegen +/// Consider a stripped definition of `toy.print` here. +def PrintOp : Toy_Op<"print"> { + let arguments = (ins F64Tensor:$input); + + // In the following format we have two directives, `attr-dict` and `type`. + // These correspond to the attribute dictionary and the type of a given + // variable represectively. + let assemblyFormat = "$input attr-dict `:` type($input)"; +} +``` + +The [declarative format](../../OpDefinitions.md#declarative-assembly-format) has +many more interesting features, so be sure to check it out before implementing a +custom format in C++. After beautifying the format of a few of our operations we +now get a much more readable: + +```mlir +module { + func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> { + %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:10) + %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25) + %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25) + toy.return %2 : tensor<*xf64> loc("test/codegen.toy":5:3) + } loc("test/codegen.toy":4:1) + func @main() { + %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/codegen.toy":9:17) + %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":9:3) + %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/codegen.toy":10:17) + %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":10:3) + %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11) + %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11) + toy.print %5 : tensor<*xf64> loc("test/codegen.toy":13:3) + toy.return loc("test/codegen.toy":8:1) + } loc("test/codegen.toy":8:1) +} loc("test/codegen.toy":0:0) +``` + +Adding a custom assembly format to the rest of the Toy operations is left as an +exercise to the reader. + Above we introduce several of the concepts for defining operations in the ODS framework, but there are many more that we haven't had a chance to: regions, variadic operands, etc. Check out the @@ -550,17 +727,17 @@ %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:10) %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25) %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> loc("test/codegen.toy":5:25) - "toy.return"(%2) : (tensor<*xf64>) -> () loc("test/codegen.toy":5:3) + toy.return %2 : tensor<*xf64> loc("test/codegen.toy":5:3) } loc("test/codegen.toy":4:1) func @main() { %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> loc("test/codegen.toy":9:17) %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":9:3) %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> loc("test/codegen.toy":10:17) %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> loc("test/codegen.toy":10:3) - %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11) - %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11) - "toy.print"(%5) : (tensor<*xf64>) -> () loc("test/codegen.toy":13:3) - "toy.return"() : () -> () loc("test/codegen.toy":8:1) + %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":11:11) + %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> loc("test/codegen.toy":12:11) + toy.print %5 : tensor<*xf64> loc("test/codegen.toy":13:3) + toy.return loc("test/codegen.toy":8:1) } loc("test/codegen.toy":8:1) } loc("test/codegen.toy":0:0) ``` diff --git a/mlir/docs/Tutorials/Toy/Ch-3.md b/mlir/docs/Tutorials/Toy/Ch-3.md --- a/mlir/docs/Tutorials/Toy/Ch-3.md +++ b/mlir/docs/Tutorials/Toy/Ch-3.md @@ -40,7 +40,7 @@ func @transpose_transpose(%arg0: tensor<*xf64>) -> tensor<*xf64> { %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> %1 = "toy.transpose"(%0) : (tensor<*xf64>) -> tensor<*xf64> - "toy.return"(%1) : (tensor<*xf64>) -> () + toy.return %1 : tensor<*xf64> } ``` @@ -234,8 +234,8 @@ %1 = "toy.reshape"(%0) : (tensor<2xf64>) -> tensor<2x1xf64> %2 = "toy.reshape"(%1) : (tensor<2x1xf64>) -> tensor<2x1xf64> %3 = "toy.reshape"(%2) : (tensor<2x1xf64>) -> tensor<2x1xf64> - "toy.print"(%3) : (tensor<2x1xf64>) -> () - "toy.return"() : () -> () + toy.print %3 : tensor<2x1xf64> + toy.return } } ``` @@ -248,8 +248,8 @@ func @main() { %0 = "toy.constant"() {value = dense<[[1.000000e+00], [2.000000e+00]]> \ : tensor<2x1xf64>} : () -> tensor<2x1xf64> - "toy.print"(%0) : (tensor<2x1xf64>) -> () - "toy.return"() : () -> () + toy.print %0 : tensor<2x1xf64> + toy.return } } ``` diff --git a/mlir/docs/Tutorials/Toy/Ch-4.md b/mlir/docs/Tutorials/Toy/Ch-4.md --- a/mlir/docs/Tutorials/Toy/Ch-4.md +++ b/mlir/docs/Tutorials/Toy/Ch-4.md @@ -153,17 +153,17 @@ %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.return"(%2) : (tensor<*xf64>) -> () + toy.return %2 : tensor<*xf64> } func @main() { %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> - %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - "toy.print"(%5) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + toy.print %5 : tensor<*xf64> + toy.return } ``` @@ -226,8 +226,8 @@ %4 = "toy.transpose"(%2) : (tensor<*xf64>) -> tensor<*xf64> %5 = "toy.transpose"(%3) : (tensor<*xf64>) -> tensor<*xf64> %6 = "toy.mul"(%4, %5) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.print"(%6) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + toy.print %6 : tensor<*xf64> + toy.return } ``` @@ -374,8 +374,8 @@ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %1 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> %2 = "toy.mul"(%1, %1) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> - "toy.print"(%2) : (tensor<3x2xf64>) -> () - "toy.return"() : () -> () + toy.print %2 : tensor<3x2xf64> + toy.return } ``` diff --git a/mlir/docs/Tutorials/Toy/Ch-5.md b/mlir/docs/Tutorials/Toy/Ch-5.md --- a/mlir/docs/Tutorials/Toy/Ch-5.md +++ b/mlir/docs/Tutorials/Toy/Ch-5.md @@ -242,8 +242,8 @@ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %2 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> %3 = "toy.mul"(%2, %2) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> - "toy.print"(%3) : (tensor<3x2xf64>) -> () - "toy.return"() : () -> () + toy.print %3 : tensor<3x2xf64> + toy.return } ``` @@ -291,7 +291,7 @@ } // Print the value held by the buffer. - "toy.print"(%0) : (memref<3x2xf64>) -> () + toy.print %0 : memref<3x2xf64> dealloc %2 : memref<2x3xf64> dealloc %1 : memref<3x2xf64> dealloc %0 : memref<3x2xf64> @@ -340,7 +340,7 @@ } // Print the value held by the buffer. - "toy.print"(%0) : (memref<3x2xf64>) -> () + toy.print %0 : memref<3x2xf64> dealloc %1 : memref<2x3xf64> dealloc %0 : memref<3x2xf64> return diff --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md --- a/mlir/docs/Tutorials/Toy/Ch-6.md +++ b/mlir/docs/Tutorials/Toy/Ch-6.md @@ -118,8 +118,8 @@ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %2 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> %3 = "toy.mul"(%2, %2) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> - "toy.print"(%3) : (tensor<3x2xf64>) -> () - "toy.return"() : () -> () + toy.print %3 : tensor<3x2xf64> + toy.return } ``` diff --git a/mlir/docs/Tutorials/Toy/Ch-7.md b/mlir/docs/Tutorials/Toy/Ch-7.md --- a/mlir/docs/Tutorials/Toy/Ch-7.md +++ b/mlir/docs/Tutorials/Toy/Ch-7.md @@ -342,7 +342,7 @@ ```mlir module { func @multiply_transpose(%arg0: !toy.struct, tensor<*xf64>>) { - "toy.return"() : () -> () + toy.return } } ``` @@ -441,13 +441,13 @@ %2 = "toy.struct_access"(%arg0) {index = 1 : i64} : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> %3 = "toy.transpose"(%2) : (tensor<*xf64>) -> tensor<*xf64> %4 = "toy.mul"(%1, %3) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.return"(%4) : (tensor<*xf64>) -> () + toy.return %4 : tensor<*xf64> } func @main() { %0 = "toy.struct_constant"() {value = [dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>, dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>]} : () -> !toy.struct, tensor<*xf64>> - %1 = "toy.generic_call"(%0) {callee = @multiply_transpose} : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> - "toy.print"(%1) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + %1 = toy.generic_call @multiply_transpose(%0) : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> + toy.print %1 : tensor<*xf64> + toy.return } } ``` @@ -468,8 +468,8 @@ %3 = "toy.struct_access"(%0) {index = 1 : i64} : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> %4 = "toy.transpose"(%3) : (tensor<*xf64>) -> tensor<*xf64> %5 = "toy.mul"(%2, %4) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.print"(%5) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + toy.print %5 : tensor<*xf64> + toy.return } } ``` @@ -527,8 +527,8 @@ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %1 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> %2 = "toy.mul"(%1, %1) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> - "toy.print"(%2) : (tensor<3x2xf64>) -> () - "toy.return"() : () -> () + toy.print %2 : tensor<3x2xf64> + toy.return } } ``` diff --git a/mlir/examples/toy/Ch2/include/toy/Ops.td b/mlir/examples/toy/Ch2/include/toy/Ops.td --- a/mlir/examples/toy/Ch2/include/toy/Ops.td +++ b/mlir/examples/toy/Ch2/include/toy/Ops.td @@ -102,7 +102,7 @@ arguments expected by the callee. For example: ```mlir - %4 = "toy.generic_call"(%1, %3) {callee = @my_func} + %4 = toy.generic_call @my_func(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> ``` @@ -117,6 +117,11 @@ // The generic call operation returns a single value of TensorType. let results = (outs F64Tensor); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = [{ + $callee `(` $inputs `)` attr-dict `:` functional-type($inputs, results) + }]; + // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " @@ -149,6 +154,8 @@ // The print operation takes an input tensor to print. let arguments = (ins F64Tensor:$input); + + let assemblyFormat = "$input attr-dict `:` type($input)"; } def ReshapeOp : Toy_Op<"reshape"> { @@ -188,6 +195,9 @@ // value must match the return type of the enclosing function. let arguments = (ins Variadic:$input); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input? `:` type($input)) attr-dict "; + // Allow building a ReturnOp with no return operand. let builders = [OpBuilder< "Builder *b, OperationState &state", [{ build(b, state, llvm::None); }] diff --git a/mlir/examples/toy/Ch2/mlir/Dialect.cpp b/mlir/examples/toy/Ch2/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch2/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch2/mlir/Dialect.cpp @@ -14,6 +14,7 @@ #include "toy/Dialect.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" #include "mlir/IR/StandardTypes.h" using namespace mlir; diff --git a/mlir/examples/toy/Ch3/include/toy/Ops.td b/mlir/examples/toy/Ch3/include/toy/Ops.td --- a/mlir/examples/toy/Ch3/include/toy/Ops.td +++ b/mlir/examples/toy/Ch3/include/toy/Ops.td @@ -102,7 +102,7 @@ arguments expected by the callee. For example: ```mlir - %4 = "toy.generic_call"(%1, %3) {callee = @my_func} + %4 = toy.generic_call @my_func(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> ``` @@ -117,6 +117,11 @@ // The generic call operation returns a single value of TensorType. let results = (outs F64Tensor); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = [{ + $callee `(` $inputs `)` attr-dict `:` functional-type($inputs, results) + }]; + // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " @@ -149,6 +154,8 @@ // The print operation takes an input tensor to print. let arguments = (ins F64Tensor:$input); + + let assemblyFormat = "$input attr-dict `:` type($input)"; } def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { @@ -191,6 +198,9 @@ // value must match the return type of the enclosing function. let arguments = (ins Variadic:$input); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input? `:` type($input)) attr-dict "; + // Allow building a ReturnOp with no return operand. let builders = [OpBuilder< "Builder *b, OperationState &state", [{ build(b, state, llvm::None); }] diff --git a/mlir/examples/toy/Ch3/mlir/Dialect.cpp b/mlir/examples/toy/Ch3/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch3/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch3/mlir/Dialect.cpp @@ -14,6 +14,7 @@ #include "toy/Dialect.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" #include "mlir/IR/StandardTypes.h" using namespace mlir; diff --git a/mlir/examples/toy/Ch4/include/toy/Ops.td b/mlir/examples/toy/Ch4/include/toy/Ops.td --- a/mlir/examples/toy/Ch4/include/toy/Ops.td +++ b/mlir/examples/toy/Ch4/include/toy/Ops.td @@ -124,7 +124,7 @@ arguments expected by the callee. For example: ```mlir - %4 = "toy.generic_call"(%1, %3) {callee = @my_func} + %4 = toy.generic_call @my_func(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> ``` @@ -139,6 +139,11 @@ // The generic call operation returns a single value of TensorType. let results = (outs F64Tensor); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = [{ + $callee `(` $inputs `)` attr-dict `:` functional-type($inputs, results) + }]; + // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " @@ -172,6 +177,8 @@ // The print operation takes an input tensor to print. let arguments = (ins F64Tensor:$input); + + let assemblyFormat = "$input attr-dict `:` type($input)"; } def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { @@ -212,6 +219,9 @@ // value must match the return type of the enclosing function. let arguments = (ins Variadic:$input); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input? `:` type($input)) attr-dict "; + // Allow building a ReturnOp with no return operand. let builders = [OpBuilder< "Builder *b, OperationState &state", [{ build(b, state, llvm::None); }] diff --git a/mlir/examples/toy/Ch4/mlir/Dialect.cpp b/mlir/examples/toy/Ch4/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch4/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch4/mlir/Dialect.cpp @@ -14,6 +14,7 @@ #include "toy/Dialect.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" #include "mlir/IR/StandardTypes.h" #include "mlir/Transforms/InliningUtils.h" diff --git a/mlir/examples/toy/Ch5/include/toy/Ops.td b/mlir/examples/toy/Ch5/include/toy/Ops.td --- a/mlir/examples/toy/Ch5/include/toy/Ops.td +++ b/mlir/examples/toy/Ch5/include/toy/Ops.td @@ -124,7 +124,7 @@ arguments expected by the callee. For example: ```mlir - %4 = "toy.generic_call"(%1, %3) {callee = @my_func} + %4 = toy.generic_call @my_func(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> ``` @@ -139,6 +139,11 @@ // The generic call operation returns a single value of TensorType. let results = (outs F64Tensor); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = [{ + $callee `(` $inputs `)` attr-dict `:` functional-type($inputs, results) + }]; + // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " @@ -173,6 +178,8 @@ // The print operation takes an input tensor to print. // We also allow a F64MemRef to enable interop during partial lowering. let arguments = (ins AnyTypeOf<[F64Tensor, F64MemRef]>:$input); + + let assemblyFormat = "$input attr-dict `:` type($input)"; } def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { @@ -213,6 +220,9 @@ // value must match the return type of the enclosing function. let arguments = (ins Variadic:$input); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input? `:` type($input)) attr-dict "; + // Allow building a ReturnOp with no return operand. let builders = [OpBuilder< "Builder *b, OperationState &state", [{ build(b, state, llvm::None); }] diff --git a/mlir/examples/toy/Ch5/mlir/Dialect.cpp b/mlir/examples/toy/Ch5/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch5/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch5/mlir/Dialect.cpp @@ -14,6 +14,7 @@ #include "toy/Dialect.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" #include "mlir/IR/StandardTypes.h" #include "mlir/Transforms/InliningUtils.h" diff --git a/mlir/examples/toy/Ch6/include/toy/Ops.td b/mlir/examples/toy/Ch6/include/toy/Ops.td --- a/mlir/examples/toy/Ch6/include/toy/Ops.td +++ b/mlir/examples/toy/Ch6/include/toy/Ops.td @@ -124,7 +124,7 @@ arguments expected by the callee. For example: ```mlir - %4 = "toy.generic_call"(%1, %3) {callee = @my_func} + %4 = toy.generic_call @my_func(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> ``` @@ -139,6 +139,11 @@ // The generic call operation returns a single value of TensorType. let results = (outs F64Tensor); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = [{ + $callee `(` $inputs `)` attr-dict `:` functional-type($inputs, results) + }]; + // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " @@ -173,6 +178,8 @@ // The print operation takes an input tensor to print. // We also allow a F64MemRef to enable interop during partial lowering. let arguments = (ins AnyTypeOf<[F64Tensor, F64MemRef]>:$input); + + let assemblyFormat = "$input attr-dict `:` type($input)"; } def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { @@ -213,6 +220,9 @@ // value must match the return type of the enclosing function. let arguments = (ins Variadic:$input); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input? `:` type($input)) attr-dict "; + // Allow building a ReturnOp with no return operand. let builders = [OpBuilder< "Builder *b, OperationState &state", [{ build(b, state, llvm::None); }] diff --git a/mlir/examples/toy/Ch6/mlir/Dialect.cpp b/mlir/examples/toy/Ch6/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch6/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch6/mlir/Dialect.cpp @@ -14,6 +14,7 @@ #include "toy/Dialect.h" #include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" #include "mlir/IR/StandardTypes.h" #include "mlir/Transforms/InliningUtils.h" diff --git a/mlir/examples/toy/Ch7/include/toy/Ops.td b/mlir/examples/toy/Ch7/include/toy/Ops.td --- a/mlir/examples/toy/Ch7/include/toy/Ops.td +++ b/mlir/examples/toy/Ch7/include/toy/Ops.td @@ -136,7 +136,7 @@ arguments expected by the callee. For example: ```mlir - %4 = "toy.generic_call"(%1, %3) {callee = @my_func} + %4 = toy.generic_call @my_func(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> ``` @@ -152,6 +152,11 @@ // StructType. let results = (outs Toy_Type); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = [{ + $callee `(` $inputs `)` attr-dict `:` functional-type($inputs, results) + }]; + // Add custom build methods for the generic call operation. let builders = [ OpBuilder<"Builder *builder, OperationState &state, " @@ -186,6 +191,8 @@ // The print operation takes an input tensor to print. // We also allow a F64MemRef to enable interop during partial lowering. let arguments = (ins AnyTypeOf<[F64Tensor, F64MemRef]>:$input); + + let assemblyFormat = "$input attr-dict `:` type($input)"; } def ReshapeOp : Toy_Op<"reshape", [NoSideEffect]> { @@ -226,6 +233,9 @@ // value must match the return type of the enclosing function. let arguments = (ins Variadic:$input); + // The return operation only emits the input in the format if it is present. + let assemblyFormat = "($input? `:` type($input)) attr-dict "; + // Allow building a ReturnOp with no return operand. let builders = [OpBuilder< "Builder *b, OperationState &state", [{ build(b, state, llvm::None); }] diff --git a/mlir/examples/toy/Ch7/mlir/Dialect.cpp b/mlir/examples/toy/Ch7/mlir/Dialect.cpp --- a/mlir/examples/toy/Ch7/mlir/Dialect.cpp +++ b/mlir/examples/toy/Ch7/mlir/Dialect.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/OpImplementation.h" #include "mlir/IR/StandardTypes.h" #include "mlir/Transforms/InliningUtils.h" diff --git a/mlir/test/Examples/Toy/Ch2/codegen.toy b/mlir/test/Examples/Toy/Ch2/codegen.toy --- a/mlir/test/Examples/Toy/Ch2/codegen.toy +++ b/mlir/test/Examples/Toy/Ch2/codegen.toy @@ -18,14 +18,14 @@ # CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> () +# CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> # CHECK-LABEL: func @main() # CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> # CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64> -# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: [[VAL_9:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]], [[VAL_8]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: [[VAL_10:%.*]] = toy.generic_call @multiply_transpose([[VAL_8]], [[VAL_6]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: toy.print [[VAL_10]] : tensor<*xf64> +# CHECK-NEXT: toy.return diff --git a/mlir/test/Examples/Toy/Ch2/scalar.toy b/mlir/test/Examples/Toy/Ch2/scalar.toy --- a/mlir/test/Examples/Toy/Ch2/scalar.toy +++ b/mlir/test/Examples/Toy/Ch2/scalar.toy @@ -8,7 +8,7 @@ # CHECK-LABEL: func @main() { # CHECK-NEXT: %0 = "toy.constant"() {value = dense<5.500000e+00> : tensor} : () -> tensor # CHECK-NEXT: %1 = "toy.reshape"(%0) : (tensor) -> tensor<2x2xf64> -# CHECK-NEXT: "toy.print"(%1) : (tensor<2x2xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: toy.print %1 : tensor<2x2xf64> +# CHECK-NEXT: toy.return # CHECK-NEXT: } diff --git a/mlir/test/Examples/Toy/Ch3/codegen.toy b/mlir/test/Examples/Toy/Ch3/codegen.toy --- a/mlir/test/Examples/Toy/Ch3/codegen.toy +++ b/mlir/test/Examples/Toy/Ch3/codegen.toy @@ -18,14 +18,14 @@ # CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> () +# CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> # CHECK-LABEL: func @main() # CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> # CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64> -# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: [[VAL_9:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]], [[VAL_8]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: [[VAL_10:%.*]] = toy.generic_call @multiply_transpose([[VAL_8]], [[VAL_6]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: toy.print [[VAL_10]] : tensor<*xf64> +# CHECK-NEXT: toy.return diff --git a/mlir/test/Examples/Toy/Ch3/scalar.toy b/mlir/test/Examples/Toy/Ch3/scalar.toy --- a/mlir/test/Examples/Toy/Ch3/scalar.toy +++ b/mlir/test/Examples/Toy/Ch3/scalar.toy @@ -8,7 +8,7 @@ # CHECK-LABEL: func @main() { # CHECK-NEXT: %0 = "toy.constant"() {value = dense<5.500000e+00> : tensor} : () -> tensor # CHECK-NEXT: %1 = "toy.reshape"(%0) : (tensor) -> tensor<2x2xf64> -# CHECK-NEXT: "toy.print"(%1) : (tensor<2x2xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: toy.print %1 : tensor<2x2xf64> +# CHECK-NEXT: toy.return # CHECK-NEXT: } diff --git a/mlir/test/Examples/Toy/Ch4/codegen.toy b/mlir/test/Examples/Toy/Ch4/codegen.toy --- a/mlir/test/Examples/Toy/Ch4/codegen.toy +++ b/mlir/test/Examples/Toy/Ch4/codegen.toy @@ -18,14 +18,14 @@ # CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> () +# CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> # CHECK-LABEL: func @main() # CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> # CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64> -# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: [[VAL_9:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]], [[VAL_8]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: [[VAL_10:%.*]] = toy.generic_call @multiply_transpose([[VAL_8]], [[VAL_6]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: toy.print [[VAL_10]] : tensor<*xf64> +# CHECK-NEXT: toy.return diff --git a/mlir/test/Examples/Toy/Ch4/scalar.toy b/mlir/test/Examples/Toy/Ch4/scalar.toy --- a/mlir/test/Examples/Toy/Ch4/scalar.toy +++ b/mlir/test/Examples/Toy/Ch4/scalar.toy @@ -8,7 +8,7 @@ # CHECK-LABEL: func @main() { # CHECK-NEXT: %0 = "toy.constant"() {value = dense<5.500000e+00> : tensor} : () -> tensor # CHECK-NEXT: %1 = "toy.reshape"(%0) : (tensor) -> tensor<2x2xf64> -# CHECK-NEXT: "toy.print"(%1) : (tensor<2x2xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: toy.print %1 : tensor<2x2xf64> +# CHECK-NEXT: toy.return # CHECK-NEXT: } diff --git a/mlir/test/Examples/Toy/Ch4/shape_inference.mlir b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch4/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch4/shape_inference.mlir @@ -7,17 +7,17 @@ %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.return"(%2) : (tensor<*xf64>) -> () + toy.return %2 : tensor<*xf64> } func @main() { %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> - %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - "toy.print"(%5) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + toy.print %5 : tensor<*xf64> + toy.return } // CHECK-NOT: func @multiply_transpose @@ -27,5 +27,5 @@ // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> // CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> // CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -// CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () -// CHECK: "toy.return"() : () -> () +// CHECK: toy.print [[VAL_2]] : tensor<3x2xf64> +// CHECK: toy.return diff --git a/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir b/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir --- a/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch5/affine-lowering.mlir @@ -5,8 +5,8 @@ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %2 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> %3 = "toy.mul"(%2, %2) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> - "toy.print"(%3) : (tensor<3x2xf64>) -> () - "toy.return"() : () -> () + toy.print %3 : tensor<3x2xf64> + toy.return } // CHECK-LABEL: func @main() @@ -35,7 +35,7 @@ // CHECK: [[VAL_15:%.*]] = affine.load [[VAL_7]]{{\[}}[[VAL_12]], [[VAL_13]]] : memref<3x2xf64> // CHECK: [[VAL_16:%.*]] = mulf [[VAL_14]], [[VAL_15]] : f64 // CHECK: affine.store [[VAL_16]], [[VAL_6]]{{\[}}[[VAL_12]], [[VAL_13]]] : memref<3x2xf64> -// CHECK: "toy.print"([[VAL_6]]) : (memref<3x2xf64>) -> () +// CHECK: toy.print [[VAL_6]] : memref<3x2xf64> // CHECK: dealloc [[VAL_8]] : memref<2x3xf64> // CHECK: dealloc [[VAL_7]] : memref<3x2xf64> // CHECK: dealloc [[VAL_6]] : memref<3x2xf64> @@ -60,6 +60,6 @@ // OPT: [[VAL_10:%.*]] = affine.load [[VAL_7]]{{\[}}[[VAL_9]], [[VAL_8]]] : memref<2x3xf64> // OPT: [[VAL_11:%.*]] = mulf [[VAL_10]], [[VAL_10]] : f64 // OPT: affine.store [[VAL_11]], [[VAL_6]]{{\[}}[[VAL_8]], [[VAL_9]]] : memref<3x2xf64> -// OPT: "toy.print"([[VAL_6]]) : (memref<3x2xf64>) -> () +// OPT: toy.print [[VAL_6]] : memref<3x2xf64> // OPT: dealloc [[VAL_7]] : memref<2x3xf64> // OPT: dealloc [[VAL_6]] : memref<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch5/codegen.toy b/mlir/test/Examples/Toy/Ch5/codegen.toy --- a/mlir/test/Examples/Toy/Ch5/codegen.toy +++ b/mlir/test/Examples/Toy/Ch5/codegen.toy @@ -18,14 +18,14 @@ # CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> () +# CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> # CHECK-LABEL: func @main() # CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> # CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64> -# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: [[VAL_9:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]], [[VAL_8]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: [[VAL_10:%.*]] = toy.generic_call @multiply_transpose([[VAL_8]], [[VAL_6]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: toy.print [[VAL_10]] : tensor<*xf64> +# CHECK-NEXT: toy.return diff --git a/mlir/test/Examples/Toy/Ch5/scalar.toy b/mlir/test/Examples/Toy/Ch5/scalar.toy --- a/mlir/test/Examples/Toy/Ch5/scalar.toy +++ b/mlir/test/Examples/Toy/Ch5/scalar.toy @@ -8,7 +8,7 @@ # CHECK-LABEL: func @main() { # CHECK-NEXT: %0 = "toy.constant"() {value = dense<5.500000e+00> : tensor} : () -> tensor # CHECK-NEXT: %1 = "toy.reshape"(%0) : (tensor) -> tensor<2x2xf64> -# CHECK-NEXT: "toy.print"(%1) : (tensor<2x2xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: toy.print %1 : tensor<2x2xf64> +# CHECK-NEXT: toy.return # CHECK-NEXT: } diff --git a/mlir/test/Examples/Toy/Ch5/shape_inference.mlir b/mlir/test/Examples/Toy/Ch5/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch5/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch5/shape_inference.mlir @@ -7,17 +7,17 @@ %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.return"(%2) : (tensor<*xf64>) -> () + toy.return %2 : tensor<*xf64> } func @main() { %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> - %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - "toy.print"(%5) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + toy.print %5 : tensor<*xf64> + toy.return } // CHECK-NOT: func @multiply_transpose @@ -27,5 +27,5 @@ // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> // CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> // CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -// CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () -// CHECK: "toy.return"() : () -> () +// CHECK: toy.print [[VAL_2]] : tensor<3x2xf64> +// CHECK: toy.return diff --git a/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir b/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir --- a/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch6/affine-lowering.mlir @@ -5,8 +5,8 @@ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %2 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> %3 = "toy.mul"(%2, %2) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> - "toy.print"(%3) : (tensor<3x2xf64>) -> () - "toy.return"() : () -> () + toy.print %3 : tensor<3x2xf64> + toy.return } // CHECK-LABEL: func @main() @@ -35,7 +35,7 @@ // CHECK: [[VAL_15:%.*]] = affine.load [[VAL_7]]{{\[}}[[VAL_12]], [[VAL_13]]] : memref<3x2xf64> // CHECK: [[VAL_16:%.*]] = mulf [[VAL_14]], [[VAL_15]] : f64 // CHECK: affine.store [[VAL_16]], [[VAL_6]]{{\[}}[[VAL_12]], [[VAL_13]]] : memref<3x2xf64> -// CHECK: "toy.print"([[VAL_6]]) : (memref<3x2xf64>) -> () +// CHECK: toy.print [[VAL_6]] : memref<3x2xf64> // CHECK: dealloc [[VAL_8]] : memref<2x3xf64> // CHECK: dealloc [[VAL_7]] : memref<3x2xf64> // CHECK: dealloc [[VAL_6]] : memref<3x2xf64> @@ -60,6 +60,6 @@ // OPT: [[VAL_10:%.*]] = affine.load [[VAL_7]]{{\[}}[[VAL_9]], [[VAL_8]]] : memref<2x3xf64> // OPT: [[VAL_11:%.*]] = mulf [[VAL_10]], [[VAL_10]] : f64 // OPT: affine.store [[VAL_11]], [[VAL_6]]{{\[}}[[VAL_8]], [[VAL_9]]] : memref<3x2xf64> -// OPT: "toy.print"([[VAL_6]]) : (memref<3x2xf64>) -> () +// OPT: toy.print [[VAL_6]] : memref<3x2xf64> // OPT: dealloc [[VAL_7]] : memref<2x3xf64> // OPT: dealloc [[VAL_6]] : memref<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch6/codegen.toy b/mlir/test/Examples/Toy/Ch6/codegen.toy --- a/mlir/test/Examples/Toy/Ch6/codegen.toy +++ b/mlir/test/Examples/Toy/Ch6/codegen.toy @@ -18,14 +18,14 @@ # CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> () +# CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> # CHECK-LABEL: func @main() # CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> # CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64> -# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: [[VAL_9:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]], [[VAL_8]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: [[VAL_10:%.*]] = toy.generic_call @multiply_transpose([[VAL_8]], [[VAL_6]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: toy.print [[VAL_10]] : tensor<*xf64> +# CHECK-NEXT: toy.return diff --git a/mlir/test/Examples/Toy/Ch6/scalar.toy b/mlir/test/Examples/Toy/Ch6/scalar.toy --- a/mlir/test/Examples/Toy/Ch6/scalar.toy +++ b/mlir/test/Examples/Toy/Ch6/scalar.toy @@ -8,7 +8,7 @@ # CHECK-LABEL: func @main() { # CHECK-NEXT: %0 = "toy.constant"() {value = dense<5.500000e+00> : tensor} : () -> tensor # CHECK-NEXT: %1 = "toy.reshape"(%0) : (tensor) -> tensor<2x2xf64> -# CHECK-NEXT: "toy.print"(%1) : (tensor<2x2xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: toy.print %1 : tensor<2x2xf64> +# CHECK-NEXT: toy.return # CHECK-NEXT: } diff --git a/mlir/test/Examples/Toy/Ch6/shape_inference.mlir b/mlir/test/Examples/Toy/Ch6/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch6/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch6/shape_inference.mlir @@ -7,17 +7,17 @@ %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.return"(%2) : (tensor<*xf64>) -> () + toy.return %2 : tensor<*xf64> } func @main() { %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> - %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - "toy.print"(%5) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + toy.print %5 : tensor<*xf64> + toy.return } // CHECK-NOT: func @multiply_transpose @@ -27,5 +27,5 @@ // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> // CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> // CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -// CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () -// CHECK: "toy.return"() : () -> () +// CHECK: toy.print [[VAL_2]] : tensor<3x2xf64> +// CHECK: toy.return diff --git a/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir b/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir --- a/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir +++ b/mlir/test/Examples/Toy/Ch7/affine-lowering.mlir @@ -5,8 +5,8 @@ %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %2 = "toy.transpose"(%0) : (tensor<2x3xf64>) -> tensor<3x2xf64> %3 = "toy.mul"(%2, %2) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> - "toy.print"(%3) : (tensor<3x2xf64>) -> () - "toy.return"() : () -> () + toy.print %3 : tensor<3x2xf64> + toy.return } // CHECK-LABEL: func @main() @@ -35,7 +35,7 @@ // CHECK: [[VAL_15:%.*]] = affine.load [[VAL_7]]{{\[}}[[VAL_12]], [[VAL_13]]] : memref<3x2xf64> // CHECK: [[VAL_16:%.*]] = mulf [[VAL_14]], [[VAL_15]] : f64 // CHECK: affine.store [[VAL_16]], [[VAL_6]]{{\[}}[[VAL_12]], [[VAL_13]]] : memref<3x2xf64> -// CHECK: "toy.print"([[VAL_6]]) : (memref<3x2xf64>) -> () +// CHECK: toy.print [[VAL_6]] : memref<3x2xf64> // CHECK: dealloc [[VAL_8]] : memref<2x3xf64> // CHECK: dealloc [[VAL_7]] : memref<3x2xf64> // CHECK: dealloc [[VAL_6]] : memref<3x2xf64> @@ -60,6 +60,6 @@ // OPT: [[VAL_10:%.*]] = affine.load [[VAL_7]]{{\[}}[[VAL_9]], [[VAL_8]]] : memref<2x3xf64> // OPT: [[VAL_11:%.*]] = mulf [[VAL_10]], [[VAL_10]] : f64 // OPT: affine.store [[VAL_11]], [[VAL_6]]{{\[}}[[VAL_8]], [[VAL_9]]] : memref<3x2xf64> -// OPT: "toy.print"([[VAL_6]]) : (memref<3x2xf64>) -> () +// OPT: toy.print [[VAL_6]] : memref<3x2xf64> // OPT: dealloc [[VAL_7]] : memref<2x3xf64> // OPT: dealloc [[VAL_6]] : memref<3x2xf64> diff --git a/mlir/test/Examples/Toy/Ch7/codegen.toy b/mlir/test/Examples/Toy/Ch7/codegen.toy --- a/mlir/test/Examples/Toy/Ch7/codegen.toy +++ b/mlir/test/Examples/Toy/Ch7/codegen.toy @@ -18,14 +18,14 @@ # CHECK: [[VAL_2:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_3:%.*]] = "toy.transpose"([[VAL_1]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = "toy.mul"([[VAL_2]], [[VAL_3]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.return"([[VAL_4]]) : (tensor<*xf64>) -> () +# CHECK-NEXT: toy.return [[VAL_4]] : tensor<*xf64> # CHECK-LABEL: func @main() # CHECK-NEXT: [[VAL_5:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_6:%.*]] = "toy.reshape"([[VAL_5]]) : (tensor<2x3xf64>) -> tensor<2x3xf64> # CHECK-NEXT: [[VAL_7:%.*]] = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> # CHECK-NEXT: [[VAL_8:%.*]] = "toy.reshape"([[VAL_7]]) : (tensor<6xf64>) -> tensor<2x3xf64> -# CHECK-NEXT: [[VAL_9:%.*]] = "toy.generic_call"([[VAL_6]], [[VAL_8]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: [[VAL_10:%.*]] = "toy.generic_call"([[VAL_8]], [[VAL_6]]) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.print"([[VAL_10]]) : (tensor<*xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: [[VAL_9:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]], [[VAL_8]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: [[VAL_10:%.*]] = toy.generic_call @multiply_transpose([[VAL_8]], [[VAL_6]]) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> +# CHECK-NEXT: toy.print [[VAL_10]] : tensor<*xf64> +# CHECK-NEXT: toy.return diff --git a/mlir/test/Examples/Toy/Ch7/scalar.toy b/mlir/test/Examples/Toy/Ch7/scalar.toy --- a/mlir/test/Examples/Toy/Ch7/scalar.toy +++ b/mlir/test/Examples/Toy/Ch7/scalar.toy @@ -8,7 +8,7 @@ # CHECK-LABEL: func @main() { # CHECK-NEXT: %0 = "toy.constant"() {value = dense<5.500000e+00> : tensor} : () -> tensor # CHECK-NEXT: %1 = "toy.reshape"(%0) : (tensor) -> tensor<2x2xf64> -# CHECK-NEXT: "toy.print"(%1) : (tensor<2x2xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: toy.print %1 : tensor<2x2xf64> +# CHECK-NEXT: toy.return # CHECK-NEXT: } diff --git a/mlir/test/Examples/Toy/Ch7/shape_inference.mlir b/mlir/test/Examples/Toy/Ch7/shape_inference.mlir --- a/mlir/test/Examples/Toy/Ch7/shape_inference.mlir +++ b/mlir/test/Examples/Toy/Ch7/shape_inference.mlir @@ -7,17 +7,17 @@ %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> - "toy.return"(%2) : (tensor<*xf64>) -> () + toy.return %2 : tensor<*xf64> } func @main() { %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> - %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> - "toy.print"(%5) : (tensor<*xf64>) -> () - "toy.return"() : () -> () + %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> + toy.print %5 : tensor<*xf64> + toy.return } // CHECK-NOT: func @multiply_transpose @@ -27,5 +27,5 @@ // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> // CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> // CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -// CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () -// CHECK: "toy.return"() : () -> () +// CHECK: toy.print [[VAL_2]] : tensor<3x2xf64> +// CHECK: toy.return diff --git a/mlir/test/Examples/Toy/Ch7/struct-codegen.toy b/mlir/test/Examples/Toy/Ch7/struct-codegen.toy --- a/mlir/test/Examples/Toy/Ch7/struct-codegen.toy +++ b/mlir/test/Examples/Toy/Ch7/struct-codegen.toy @@ -29,17 +29,17 @@ # CHECK-NEXT: [[VAL_3:%.*]] = "toy.struct_access"([[VAL_0]]) {index = 1 : i64} : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_4:%.*]] = "toy.transpose"([[VAL_3]]) : (tensor<*xf64>) -> tensor<*xf64> # CHECK-NEXT: [[VAL_5:%.*]] = "toy.mul"([[VAL_2]], [[VAL_4]]) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> -# CHECK-NEXT: "toy.return"([[VAL_5]]) : (tensor<*xf64>) -> () +# CHECK-NEXT: toy.return [[VAL_5]] : tensor<*xf64> # CHECK-LABEL: func @main() # CHECK-NEXT: [[VAL_6:%.*]] = "toy.struct_constant"() {value = [dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>, dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>]} : () -> !toy.struct, tensor<*xf64>> -# CHECK-NEXT: [[VAL_7:%.*]] = "toy.generic_call"([[VAL_6]]) {callee = @multiply_transpose} : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> -# CHECK-NEXT: "toy.print"([[VAL_7]]) : (tensor<*xf64>) -> () -# CHECK-NEXT: "toy.return"() : () -> () +# CHECK-NEXT: [[VAL_7:%.*]] = toy.generic_call @multiply_transpose([[VAL_6]]) : (!toy.struct, tensor<*xf64>>) -> tensor<*xf64> +# CHECK-NEXT: toy.print [[VAL_7]] : tensor<*xf64> +# CHECK-NEXT: toy.return # OPT-LABEL: func @main() # OPT-NEXT: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> # OPT-NEXT: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> # OPT-NEXT: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> -# OPT-NEXT: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () -# OPT-NEXT: "toy.return"() : () -> () +# OPT-NEXT: toy.print [[VAL_2]] : tensor<3x2xf64> +# OPT-NEXT: toy.return diff --git a/mlir/test/Examples/Toy/Ch7/struct-opt.mlir b/mlir/test/Examples/Toy/Ch7/struct-opt.mlir --- a/mlir/test/Examples/Toy/Ch7/struct-opt.mlir +++ b/mlir/test/Examples/Toy/Ch7/struct-opt.mlir @@ -13,4 +13,4 @@ // CHECK-LABEL: func @main // CHECK-NEXT: %[[CST:.*]] = "toy.constant" // CHECK-SAME: dense<4.0 -// CHECK-NEXT: "toy.print"(%[[CST]]) +// CHECK-NEXT: toy.print %[[CST]]