diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir @@ -217,7 +217,7 @@ // Compute the sum padding: // CHECK: %[[KERNEL:.+]] = tensor.empty() : tensor<4x4xf32> - // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum + // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum // CHECK-SAME: dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} // CHECK-SAME: ins(%[[PAD]], %[[KERNEL]] : tensor<1x8x36x62xf32>, tensor<4x4xf32>) // CHECK-SAME: outs(%[[FILL]] : tensor<1x5x33x62xf32>) @@ -233,7 +233,7 @@ // Divide the sum pooling by the number of summed values. // CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<1x5x33x62xf32> - // CHECK: %[[GENERIC:.+]] = linalg.generic + // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} // CHECK-SAME: ins(%[[POOL]] : tensor<1x5x33x62xf32>) // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x5x33x62xf32>) @@ -292,12 +292,12 @@ // ----- -// CHECK-LABLE: @avg_pool_i8 +// CHECK-LABEL: @avg_pool_i8 func.func @avg_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> (tensor<1x5x33x62xi8>) { - // CHECK: %[[GENERIC:.+]] = linalg.generic + // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} - // CHECK-SAME: ins(%[[POOL]] : tensor<1x5x33x62xi32>) - // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x5x33x62xi8>) + // CHECK-SAME: ins(%[[POOL:.+]] : tensor<1x5x33x62xi32>) + // CHECK-SAME: outs(%[[EMPTY:.+]] : tensor<1x5x33x62xi8>) // CHECK: ^bb0(%[[IN:.+]]: i32, %{{.+}}: i8) // Only different behavior is how the division is performed. @@ -346,7 +346,7 @@ // CHECK: %[[EMPTY:.+]] = tensor.empty(%[[BATCH]]) : tensor // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[F0]] : f32) outs(%[[EMPTY]] : tensor) // CHECK: %[[KERNEL:.+]] = tensor.empty() : tensor<4x4xf32> - // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum + // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum // CHECK-SAME: dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64> // CHECK-SAME: ins(%[[PADDED]], %[[KERNEL]] : tensor, tensor<4x4xf32>) // CHECK-SAME: outs(%[[FILL]] : tensor) -> tensor @@ -637,7 +637,7 @@ // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK-SAME: {indexing_maps = [#map, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} // CHECK-SAME: ins(%arg2, %[[CONV3D]] : tensor<28xf32>, tensor<1x47x45x43x28xf32>) - // CHECK--SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xf32>) { + // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xf32>) { // CHECK: ^bb0(%[[A1:.+]]: f32, %[[A2:.+]]: f32, %{{.+}}: f32): // CHECK: %[[ADD:.+]] = arith.addf %[[A1]], %[[A2]] : f32 // CHECK: linalg.yield %[[ADD]] @@ -664,7 +664,7 @@ // CHECK: %[[GENERIC:.+]] = linalg.generic // CHECK-SAME: {indexing_maps = [#map, #map1, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel", "parallel"]} // CHECK-SAME: ins(%arg2, %[[CONV3D]] : tensor<28xi32>, tensor<1x47x45x43x28xi32>) - // CHECK--SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xi32>) { + // CHECK-SAME: outs(%[[EMPTY]] : tensor<1x47x45x43x28xi32>) { // CHECK: ^bb0(%[[A1:.+]]: i32, %[[A2:.+]]: i32, %{{.+}}: i32): // CHECK: %[[ADD:.+]] = arith.addi %[[A1]], %[[A2]] : i32 // CHECK: linalg.yield %[[ADD]]