diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -1707,15 +1707,15 @@ << ")"; if (!op.inputs().empty()) - p << " ins (" << op.inputs() << ")"; + p << " ins (" << op.inputs() << ": " << TypeRange(op.inputs()) << ")"; if (!op.outputs().empty()) - p << " outs (" << op.outputs() << ")"; + p << " outs (" << op.outputs() << ":" << TypeRange(op.outputs()) << ")"; if (llvm::any_of(op.iterator_types(), [](Attribute attr) { return attr.cast().getValue() != getParallelIteratorTypeName(); })) { - p << " iterators(" << op.iterator_types() << ")"; + p << " iterators" << op.iterator_types() << ""; } p.printRegion(op.region(), /*printEntryBlockArgs=*/false); @@ -1792,7 +1792,7 @@ if (succeeded(parser.parseOptionalKeyword("iterators"))) { StringAttr iterType; - if (parser.parseLParen() || parser.parseAttribute(iterType)) + if (parser.parseLSquare() || parser.parseAttribute(iterType)) return failure(); iterTypes.push_back(iterType); for (int i = 1, e = ivs.size(); i < e; ++i) { @@ -1800,7 +1800,7 @@ return failure(); iterTypes.push_back(iterType); } - if (parser.parseRParen()) + if (parser.parseRSquare()) return failure(); } else { auto parallelIter = builder.getStringAttr(getParallelIteratorTypeName()); diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir --- a/mlir/test/Dialect/Linalg/roundtrip.mlir +++ b/mlir/test/Dialect/Linalg/roundtrip.mlir @@ -1,10 +1,32 @@ -// RUN: mlir-opt -split-input-file %s | FileCheck %s +// RUN: mlir-opt %s | mlir-opt | FileCheck %s +// RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s // TODO: Re-enable LLVM lowering test after IndexedGenericOp is lowered. // // Test that we can lower all the way to LLVM without crashing, don't check results here. // DISABLED: mlir-opt %s --convert-linalg-to-llvm -o=/dev/null 2>&1 +// CHECK-DAG: #[[$permute_0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)> +// CHECK-DAG: #[[$permute_1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)> +// CHECK-DAG: #[[$reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)> +// CHECK-DAG: #[[$reshape5D0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)> +// CHECK-DAG: #[[$reshape5D1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)> +// CHECK-DAG: #[[$reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)> +// CHECK-DAG: #[[$reshape5D345:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)> +// CHECK-DAG: #[[$reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> +// CHECK-DAG: #[[$reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> +// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)> +// CHECK-DAG: #[[$reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)> +// CHECK-DAG: #[[$reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)> +// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)> +// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> +// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> +// CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)> +// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> +// CHECK-DAG: #[[$strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)> +// CHECK-DAG: #[[$strided3DT:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)> +// CHECK-DAG: #[[$strided6D:.*]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)> + func @pad_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index, %pad_value: f32) -> tensor<6x?x?x?xf32> { %0 = linalg.pad_tensor %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] { @@ -107,9 +129,6 @@ // ----- -// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> -// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> - func @ops(%arg0: memref, %arg1: memref, %arg2: memref, @@ -141,7 +160,6 @@ // ----- -// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> func @fill_view(%arg0: memref, %arg1: f32) { linalg.fill(%arg0, %arg1) : memref, f32 @@ -153,9 +171,6 @@ // ----- -// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> -// CHECK-DAG: #[[$strided3DT:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d1 * s2 + d0)> - func @transpose(%arg0: memref) { %0 = transpose %arg0 (i, j, k) -> (k, j, i) : memref to memref (d2 * s1 + s0 + d1 * s2 + d0)>> return @@ -166,7 +181,6 @@ // ----- -// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> func @fill_view3(%arg0: memref, %arg1: f32) { linalg.fill(%arg0, %arg1) : memref, f32 @@ -178,7 +192,6 @@ // ----- -// CHECK-DAG: #[[$strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> func @copy_view(%arg0: memref, %arg1: memref) { @@ -192,9 +205,6 @@ // ----- -// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> -// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1, d2) -> (d0, d2, d1)> -// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)> func @copy_view3(%arg0: memref, %arg1: memref) { @@ -206,14 +216,13 @@ // CHECK-LABEL: func @copy_view3( // CHECK: %{{.*}}: memref, %{{.*}}: memref) { // CHECK: linalg.copy(%{{.*}}, %{{.*}}) { -// CHECK-SAME: inputPermutation = #[[$map0]], -// CHECK-SAME: outputPermutation = #[[$map1]]} : +// CHECK-SAME: inputPermutation = #[[$permute_0]], +// CHECK-SAME: outputPermutation = #[[$permute_1]]} : // CHECK-SAME: memref, // CHECK-SAME: memref // ----- -// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> func @conv_view3(%arg0: memref, %arg1: memref, @@ -231,7 +240,6 @@ // ----- -// CHECK-DAG: #[[$strided6D:.*]] = affine_map<(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4, s5] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4 + d4 * s5 + d5)> func @conv_view6(%arg0: memref, %arg1: memref, @@ -315,23 +323,20 @@ // ----- -// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> -// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> - -#accesses = [ +#accesses_0 = [ affine_map<(i, j, k) -> (j, i)>, affine_map<(i, j, k) -> (i, k, i + j)> ] -#trait = { - indexing_maps = #accesses, +#trait_0 = { + indexing_maps = #accesses_0, iterator_types = ["parallel", "parallel", "parallel"], library_call = "some_external_function_name_1" } func @generic(%arg0: memref, offset: ?, strides: [?, 1]>, %arg1: memref) { - linalg.generic #trait + linalg.generic #trait_0 ins(%arg0 : memref, offset: ?, strides: [?, 1]>) outs(%arg1 : memref) attrs = {foo = 1} { @@ -352,7 +357,7 @@ func @generic_with_tensor_input(%arg0: tensor>, %arg1: memref) { - linalg.generic #trait + linalg.generic #trait_0 ins(%arg0 : tensor>) outs(%arg1 : memref) attrs = {foo = 1} { @@ -390,14 +395,14 @@ // ----- -#accesses2 = [ +#accesses_1 = [ affine_map<(i, j, k) -> (j, i)>, affine_map<(i, j, k) -> (i, k, i + j)>, affine_map<(i, j, k) -> (i, k, i + j)> ] -#trait2 = { - indexing_maps = #accesses2, +#trait_1 = { + indexing_maps = #accesses_1, iterator_types = ["parallel", "parallel", "parallel"], library_call = "some_external_function_name_1" } @@ -405,7 +410,7 @@ func @generic_with_tensor_input_and_output( %arg0: tensor>, %arg1: tensor) -> (tensor) { - %0 = linalg.generic #trait2 + %0 = linalg.generic #trait_1 ins(%arg0, %arg1 : tensor>, tensor) outs(%arg1 : tensor) attrs = {foo = 1} { @@ -427,14 +432,14 @@ // ----- -#accesses3 = [ +#accesses_2 = [ affine_map<(i, j, k) -> (j, i)>, affine_map<(i, j, k) -> (i, k, i + j)>, affine_map<(i, j, k) -> (i, k, i + j)> ] -#trait3 = { - indexing_maps = #accesses3, +#trait_2 = { + indexing_maps = #accesses_2, iterator_types = ["parallel", "parallel", "parallel"], library_call = "some_external_function_name_1" } @@ -442,7 +447,7 @@ func @indexed_generic_with_tensor_input_and_output( %arg0: tensor>, %arg1: tensor) -> (tensor) { - %0 = linalg.indexed_generic #trait3 + %0 = linalg.indexed_generic #trait_2 ins(%arg0, %arg1 : tensor>, tensor) outs(%arg1 : tensor) attrs = {foo = 1} { @@ -499,23 +504,21 @@ // ----- -// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> -// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> -#accesses = [ +#accesses_3 = [ affine_map<(i, j, k) -> (j, i)>, affine_map<(i, j, k) -> (i, k, i + j)> ] -#trait3 = { - indexing_maps = #accesses, +#trait_3 = { + indexing_maps = #accesses_3, iterator_types = ["parallel", "parallel", "parallel"], library_call = "some_external_function_name_2" } func @generic_region(%arg0: memref, offset: ?, strides: [?, 1]>, %arg1: memref) { - linalg.generic #trait3 + linalg.generic #trait_3 ins(%arg0 : memref, offset: ?, strides: [?, 1]>) outs(%arg1 : memref) attrs = {foo = 1} { @@ -537,7 +540,7 @@ func @indexed_generic(%arg0: memref, offset: ?, strides: [?, 1]>, %arg1: memref) { - linalg.indexed_generic #trait3 + linalg.indexed_generic #trait_3 ins(%arg0 : memref, offset: ?, strides: [?, 1]>) outs(%arg1 : memref) attrs = {foo = 1} { @@ -560,15 +563,6 @@ // ----- -// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)> -// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)> -// CHECK-DAG: #[[$reshapeD0:.*]] = affine_map<(d0, d1, d2) -> (d0)> -// CHECK-DAG: #[[$reshapeD12:.*]] = affine_map<(d0, d1, d2) -> (d1, d2)> -// CHECK-DAG: #[[$reshapeD012:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -// CHECK-DAG: #[[$reshape5D01:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d0, d1)> -// CHECK-DAG: #[[$reshape5D2:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d2)> -// CHECK-DAG: #[[$reshape5D34:.*]] = affine_map<(d0, d1, d2, d3, d4) -> (d3, d4)> - func @reshape_static(%arg0: memref<3x4x5xf32>, %arg1: tensor<3x4x5xf32>, %arg2: tensor<3x?x5xf32>) { // Reshapes that collapse and expand back a contiguous buffer. %0 = linalg.reshape %arg0 [affine_map<(i, j, k) -> (i, j)>, @@ -640,11 +634,6 @@ // ----- -// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> -// CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)> -// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> -// CHECK-DAG: #[[$strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)> - func @reshape_dynamic(%arg0: memref, %arg1: memref, %arg2: memref) { @@ -673,9 +662,6 @@ return } -// CHECK-DAG: #[[$reshapeD01:.*]] = affine_map<(d0, d1, d2) -> (d0, d1)> -// CHECK-DAG: #[[$reshapeD2:.*]] = affine_map<(d0, d1, d2) -> (d2)> - // CHECK-LABEL: func @reshape // CHECK: linalg.reshape {{.*}} [#[[$reshapeD01]], #[[$reshapeD2]]] // CHECK-SAME: memref into memref @@ -762,11 +748,8 @@ tensor into tensor return %0 : tensor } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)> // CHECK: func @legal_collapsing_reshape_dynamic_tensor -// CHECK: linalg.tensor_reshape %{{.+}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK: linalg.tensor_reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]] // ----- @@ -780,11 +763,8 @@ memref into memref return %0 : memref } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d0)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d1)> -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3, d4) -> (d2, d3, d4)> // CHECK: func @legal_collapsing_reshape_dynamic_memref -// CHECK: linalg.reshape %{{.+}} [#[[MAP0]], #[[MAP1]], #[[MAP2]]] +// CHECK: linalg.reshape %{{.+}} [#[[$reshape5D0]], #[[$reshape5D1]], #[[$reshape5D345]]] // ----- @@ -797,14 +777,14 @@ // ----- -#accesses = [ +#accesses_4 = [ affine_map<(i, j) -> (i, j)>, affine_map<(i, j) -> (i, j)>, affine_map<(i, j) -> (i, j)> ] -#trait = { - indexing_maps = #accesses, +#trait_4 = { + indexing_maps = #accesses_4, iterator_types = ["parallel", "parallel"] } @@ -825,7 +805,7 @@ %out_sub = subtensor %out[%i, 0] [%c4, %c64] [1, 1] : tensor<24x64xi8> to tensor - %sum = linalg.generic #trait + %sum = linalg.generic #trait_4 ins(%lhs_sub, %rhs_sub : tensor, tensor) outs(%out_sub : tensor) { ^bb(%l: i8, %r: i8, %o: i8) : @@ -840,7 +820,7 @@ return %prod : tensor<24x64xi8> } // CHECK-LABEL: func @tiled_loop -// CHECK-NOT: iterators( +// CHECK-NOT: iterators[ // ----- @@ -848,7 +828,7 @@ #id_2d = affine_map<(d0, d1, d2) -> (d0, d2)> #id_1d = affine_map<(d0, d1, d2) -> (d1)> -#trait = { +#trait_5 = { indexing_maps = [ #id_3d, #id_2d, @@ -874,7 +854,7 @@ = (%c0, %c0, %c0) to (%X, %Y, %Z) step (%c2, %c4, %c8) ins(%input_3d, %input_2d: tensor<16x24x32xf32>, tensor<16x32xf32>) outs( %output: tensor<24xf32>) - iterators("reduction", "parallel", "reduction") { + iterators["reduction", "parallel", "reduction"] { %sub_3d = subtensor %input_3d[%i, %j, %k][2, 4, 8][1, 1, 1] : tensor<16x24x32xf32> to tensor<2x4x8xf32> %sub_2d = subtensor %input_2d[%i, %k][2, 8][1, 1] @@ -883,7 +863,7 @@ : tensor<24xf32> to tensor<4xf32> %sub_out = subtensor %output[%j] [4] [1] : tensor<24xf32> to tensor<4xf32> - %acc = linalg.generic #trait + %acc = linalg.generic #trait_5 ins(%sub_3d, %sub_2d, %sub_1d : tensor<2x4x8xf32>, tensor<2x8xf32>, tensor<4xf32>) outs(%sub_out : tensor<4xf32>) { @@ -900,4 +880,4 @@ return %result : tensor<24xf32> } // CHECK-LABEL: func @tiled_loop_reduction -// CHECK: iterators( +// CHECK: iterators[