diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -3134,6 +3134,52 @@ return success(); } }; + +/// Forces `outs` operands of linalg operations to use `linalg.init_tensor` if +/// the value of the `outs` operand is not used within the op. This is only +/// implemented for `linalg.generic` operations for now, but should hold for all +/// linalg structured ops. +struct RemoveOutsDependency : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + LogicalResult matchAndRewrite(GenericOp op, + PatternRewriter &rewriter) const override { + rewriter.startRootUpdate(op); + bool modifiedOutput = false; + Location loc = op.getLoc(); + for (OpOperand &opOperand : op.getOutputOpOperands()) { + if (!op.payloadUsesValueFromOpOperand(&opOperand)) { + Value operandVal = opOperand.get(); + auto operandType = operandVal.getType().dyn_cast(); + if (!operandType) + continue; + + // If outs is already an `init_tensor` operation, nothing to do. + auto definingOp = operandVal.getDefiningOp(); + if (definingOp) + continue; + modifiedOutput = true; + SmallVector dynamicDims; + for (auto dim : llvm::enumerate(operandType.getShape())) { + if (dim.value() != ShapedType::kDynamicSize) + continue; + dynamicDims.push_back(rewriter.createOrFold( + loc, operandVal, dim.index())); + } + Value initTensor = rewriter.create( + loc, dynamicDims, operandType.getShape(), + operandType.getElementType()); + op->setOperand(opOperand.getOperandNumber(), initTensor); + } + } + if (!modifiedOutput) { + rewriter.cancelRootUpdate(op); + return failure(); + } + rewriter.finalizeRootUpdate(op); + return success(); + } +}; } // namespace #define CANONICALIZERS_AND_FOLDERS(XXX) \ @@ -3154,7 +3200,17 @@ CANONICALIZERS_AND_FOLDERS(PoolingSumOp) CANONICALIZERS_AND_FOLDERS(CopyOp) CANONICALIZERS_AND_FOLDERS(FillOp) -CANONICALIZERS_AND_FOLDERS(GenericOp) + +void GenericOp::getCanonicalizationPatterns(RewritePatternSet &results, + MLIRContext *context) { + results.add(context); +} + +LogicalResult GenericOp::fold(ArrayRef, + SmallVectorImpl &) { + return foldMemRefCast(*this); +} // All named ops canonicalizers and folders are auto-generated in the // .cpp.inc. diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -1132,3 +1132,39 @@ // CHECK-NEXT: %[[SUM1:.+]] = addi %[[SUM0]], %[[ARG2]] : index // CHECK-NEXT: %[[SUM2:.+]] = addi %[[SUM1]], %[[ARG3]] : index // CHECK-NEXT: linalg.yield %[[SUM2]] : index + +// ----- + +#map = affine_map<(d0, d1) -> (d0, d1)> +#trait = { + indexing_maps = [#map, #map], + iterator_types = ["parallel", "parallel"] +} +func @break_outs_dependency(%arg0 : tensor) -> tensor +{ + %0 = linalg.generic #trait ins(%arg0 : tensor) outs(%arg0 : tensor) { + ^bb0(%arg1 : f32, %arg2 : f32) : + %1 = addf %arg1, %arg1 : f32 + linalg.yield %1 : f32 + } -> tensor + %2 = linalg.generic #trait ins(%0 : tensor) outs(%0 : tensor) { + ^bb0(%arg1 : f32, %arg2 : f32) : + %3 = mulf %arg1, %arg1 : f32 + linalg.yield %3 : f32 + } -> tensor + return %2 : tensor +} +// CHECK: func @break_outs_dependency( +// CHECK-SAME: %[[ARG0:.+]]: tensor) +// CHECK-DAG: %[[C0:.+]] = constant 0 : index +// CHECK-DAG: %[[C1:.+]] = constant 1 : index +// CHECK-DAG: %[[D0:.+]] = memref.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = memref.dim %[[ARG0]], %[[C1]] +// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], %[[D1]]] +// CHECK: %[[GENERIC1:.+]] = linalg.generic +// CHECK-SAME: outs(%[[INIT]] : tensor) +// CHECK-DAG: %[[D0:.+]] = memref.dim %[[ARG0]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = memref.dim %[[ARG0]], %[[C1]] +// CHECK-DAG: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], %[[D1]]] +// CHECK: %[[RESULT:.+]] = linalg.generic +// CHECK-SAME: outs(%[[INIT]] : tensor) diff --git a/mlir/test/Dialect/Linalg/fusion-sequence.mlir b/mlir/test/Dialect/Linalg/fusion-sequence.mlir --- a/mlir/test/Dialect/Linalg/fusion-sequence.mlir +++ b/mlir/test/Dialect/Linalg/fusion-sequence.mlir @@ -176,16 +176,16 @@ // CHECK: %[[R0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG5:.+]] = %[[INIT]]) -> (tensor) { // CHECK: %[[R1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG7:.+]] = %[[ARG5]]) -> (tensor) { // CHECK-DAG: %[[STARG3:.+]] = subtensor %[[ARG3]] -// CHECK-DAG: %[[STARG7:.+]] = subtensor %[[ARG7]] // CHECK-DAG: %[[STARG0:.+]] = subtensor %[[ARG0]] // CHECK-DAG: %[[STARG1:.+]] = subtensor %[[ARG1]] // CHECK-DAG: %[[STARG2:.+]] = subtensor %[[ARG2]] // CHECK: %[[T0:.+]] = linalg.matmul // CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor, tensor) // CHECK-SAME: outs(%[[STARG2]] : tensor) -> tensor +// CHECK: %[[INIT_SUBTENSOR:.+]] = linalg.init_tensor // CHECK: %[[T1:.+]] = linalg.generic // CHECK-SAME: ins(%[[T0:.+]], %[[STARG3]] : tensor, tensor) -// CHECK-SAME: outs(%[[STARG7]] : tensor) +// CHECK-SAME: outs(%[[INIT_SUBTENSOR]] : tensor) // CHECK: %[[RESULT:.+]] = subtensor_insert %[[T1]] into %[[ARG7]] // CHECK: scf.yield %[[RESULT]] // CHECK: } diff --git a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir --- a/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir +++ b/mlir/test/Dialect/Linalg/fusion-tensor-pattern.mlir @@ -127,7 +127,6 @@ // CHECK-SAME: iter_args(%[[ARG4:.+]] = %{{[a-zA-Z0-9_]+}}) // CHECK: %[[YIELD:.+]] = scf.for %[[IV1:[a-zA-Z0-9_]+]] // CHECK-SAME: iter_args(%[[ARG6:.+]] = %[[ARG4]]) -// CHECK: %[[ST_ARG6:.+]] = subtensor %[[ARG6]][%[[IV0]], %[[IV1]]] // CHECK: %[[ST_ARG0:.+]] = subtensor %[[ARG0]][%[[IV0]], 0] // CHECK: %[[ST_ARG1:.+]] = subtensor %[[ARG1]][0, %[[IV1]]] // CHECK: %[[ST_ARG2:.+]] = subtensor %[[ARG2]][%[[IV0]], %[[IV1]]] @@ -135,9 +134,10 @@ // CHECK-SAME: ins(%[[ST_ARG0]], %[[ST_ARG1]] // CHECK-SAME: : tensor, tensor) // CHECK-SAME: outs(%[[ST_ARG2]] : tensor) +// CHECK: %[[INIT:.+]] = linalg.init_tensor // CHECK: %[[ST_RESULT:.+]] = linalg.generic // CHECK-SAME: ins(%[[LHS]] : tensor) -// CHECK-SAME: outs(%[[ST_ARG6]] : tensor) +// CHECK-SAME: outs(%[[INIT]] : tensor) // CHECK: %[[UPDATE:.+]] = subtensor_insert %[[ST_RESULT]] // CHECK-SAME: into %[[ARG6]][%[[IV0]], %[[IV1]]] // CHECK: scf.yield %[[UPDATE]] diff --git a/mlir/test/Dialect/Linalg/reshape_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_fusion.mlir --- a/mlir/test/Dialect/Linalg/reshape_fusion.mlir +++ b/mlir/test/Dialect/Linalg/reshape_fusion.mlir @@ -26,17 +26,24 @@ // CHECK: func @generic_op_reshape_producer_fusion // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor +// CHECK-DAG: %[[C0:.+]] = constant 0 : index +// CHECK-DAG: %[[C1:.+]] = constant 1 : index +// CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] // CHECK-SAME: [0], [1, 2], [3] // CHECK: %[[T1:.+]] = linalg.tensor_reshape %[[ARG1]] // CHECK-SAME: [0], [1], [2, 3] // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[T0]] // CHECK-SAME: [0], [1], [2, 3] +// CHECK-DAG: %[[D0:.+]] = memref.dim %[[T2]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = memref.dim %[[T2]], %[[C1]] +// CHECK-DAG: %[[D2:.+]] = memref.dim %[[T2]], %[[C2]] +// CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], %[[D1]], %[[D2]], 4] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP5]], #[[MAP6]], #[[MAP6]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel"] // CHECK-SAME: ins(%[[ARG0]], %[[T1]] : tensor, tensor) -// CHECK-SAME: outs(%[[T2]] : tensor) +// CHECK-SAME: outs(%[[INIT]] : tensor) // CHECK: %[[T4:.+]] = linalg.tensor_reshape %[[T3]] // CHECK-SAME: [0], [1], [2, 3] // CHECK-SAME: tensor into tensor @@ -67,6 +74,8 @@ // CHECK: func @generic_op_reshape_consumer_fusion // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor +// CHECK-DAG: %[[C0:.+]] = constant 0 : index +// CHECK-DAG: %[[C2:.+]] = constant 2 : index // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] // CHECK-SAME: [0], [1, 2, 3] // CHECK-SAME: tensor into tensor @@ -75,14 +84,16 @@ // CHECK-SAME: tensor into tensor // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]] // CHECK-SAME: [0], [1, 2, 3] +// CHECK-DAG: %[[D0:.+]] = memref.dim %[[T2]], %[[C0]] +// CHECK-DAG: %[[D2:.+]] = memref.dim %[[T2]], %[[C2]] +// CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], 4, %[[D2]], 5] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP2]], #[[MAP2]], #[[MAP2]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel"] // CHECK-SAME: ins(%[[T0]], %[[T1]] : tensor, tensor) -// CHECK-SAME: outs(%[[T2]] : tensor) +// CHECK-SAME: outs(%[[INIT]] : tensor) // CHECK: return %[[T3]] : tensor - // ----- func @reshape_as_consumer_permutation @@ -109,6 +120,9 @@ // CHECK: func @reshape_as_consumer_permutation // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor +// CHECK-DAG: %[[C0:.+]] = constant 0 : index +// CHECK-DAG: %[[C2:.+]] = constant 2 : index +// CHECK-DAG: %[[C5:.+]] = constant 5 : index // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] // CHECK-SAME: [0, 1, 2], [3, 4], [5] // CHECK-SAME: tensor into tensor<3x4x?x?x2x?xf32> @@ -117,11 +131,15 @@ // CHECK-SAME: tensor into tensor<3x4x?x?xf32> // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]] // CHECK-SAME: [0, 1], [2], [3, 4, 5] +// CHECK-DAG: %[[D0:.+]] = memref.dim %[[T2]], %[[C0]] +// CHECK-DAG: %[[D2:.+]] = memref.dim %[[T2]], %[[C2]] +// CHECK-DAG: %[[D5:.+]] = memref.dim %[[T2]], %[[C5]] +// CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], 2, %[[D2]], 3, 4, %[[D5]]] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP8]], #[[MAP9]], #[[MAP10]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel", "parallel", "parallel"] // CHECK-SAME: ins(%[[T0]], %[[T1]] : tensor<3x4x?x?x2x?xf32>, tensor<3x4x?x?xf32>) -// CHECK-SAME: outs(%[[T2]] : tensor) +// CHECK-SAME: outs(%[[INIT]] : tensor) // CHECK: return %[[T3]] : tensor // ----- @@ -361,6 +379,11 @@ // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0 + d1 * 8)> // CHECK: @reshape_as_producer_projected_permutation // CHECK-SAME: %[[ARG0:.+]]: tensor<33x8x?xi32> +// CHECK-SAME: %[[ARG1:.+]]: tensor<264x?x4xi32> +// CHECK-DAG: %[[C2:.+]] = constant 2 : index +// CHECK: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG1]] {{\[}}[0, 1], [2], [3]{{\]}} +// CHECK-DAG: %[[D2:.+]] = memref.dim %[[RESHAPE]], %[[C2]] +// CHECK: %[[INIT:.+]] = linalg.init_tensor [33, 8, %[[D2]], 4] // CHECK: %[[RES:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]] // CHECK-SAME: ins(%[[ARG0]] : tensor<33x8x?xi32>) @@ -411,6 +434,8 @@ // CHECK: func @generic_op_reshape_consumer_fusion_projected // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor +// CHECK-DAG: %[[C0:.+]] = constant 0 : index +// CHECK-DAG: %[[C1:.+]] = constant 1 : index // CHECK: %[[T0:.+]] = linalg.tensor_reshape %[[ARG0]] // CHECK-SAME: [0, 1, 2], [3] // CHECK-SAME: tensor into tensor @@ -419,11 +444,14 @@ // CHECK-SAME: tensor into tensor // CHECK: %[[T2:.+]] = linalg.tensor_reshape %[[ARG0]] // CHECK-SAME: [0], [1, 2, 3] +// CHECK-DAG: %[[D0:.+]] = memref.dim %[[T2]], %[[C0]] +// CHECK-DAG: %[[D1:.+]] = memref.dim %[[T2]], %[[C1]] +// CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[D0]], %[[D1]], 4, 5] // CHECK: %[[T3:.+]] = linalg.generic // CHECK-SAME: indexing_maps = [#[[MAP4]], #[[MAP4]], #[[MAP5]]] // CHECK-SAME: ["parallel", "parallel", "parallel", "parallel"] // CHECK-SAME: ins(%[[T0]], %[[T1]] : tensor, tensor) -// CHECK-SAME: outs(%[[T2]] : tensor) +// CHECK-SAME: outs(%[[INIT]] : tensor) // CHECK: return %[[T3]] : tensor // ----- @@ -501,8 +529,6 @@ // FOLDUNITDIM-SAME: %[[ARG0:.+]]: tensor<1x?x1x2x1x4xf32> // FOLDUNITDIM-SAME: %[[ARG1:.+]]: tensor // FOLDUNITDIM-DAG: %[[RESHAPE:.+]] = linalg.tensor_reshape %[[ARG1]] -// FOLDUNITDIM-DAG: %[[INIT:.+]] = linalg.init_tensor [1, %{{.+}}, 1, 2, 1, 4] // FOLDUNITDIM: linalg.generic // FOLDUNITDIM-SAME: ins(%[[ARG0]], %[[RESHAPE]] : tensor<1x?x1x2x1x4xf32>, tensor<1x?x1x2x1x4xf32>) -// FOLDUNITDIM-SAME: outs(%[[INIT]] : tensor<1x?x1x2x1x4xf32>) - +// FOLDUNITDIM-SAME: outs(%{{.+}} : tensor<1x?x1x2x1x4xf32>) diff --git a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir @@ -122,6 +122,7 @@ // CHECK: %[[INIT:.+]] = linalg.init_tensor [1, 112, 112, 32] : tensor<1x112x112x32xf32> // CHECK-NEXT: %[[FILL:.+]] = linalg.fill(%[[INIT]], %cst) : tensor<1x112x112x32xf32>, f32 -> tensor<1x112x112x32xf32> +// CHECK: %[[INIT2:.+]] = linalg.init_tensor [1, 8, 16, 4] // CHECK-NEXT: scf.for %[[IV0:.+]] = %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG0:.+]] = %[[FILL]]) // CHECK-NEXT: %[[OFFSET_H:.+]] = affine.apply #[[MAP0]](%[[IV0]]) @@ -130,7 +131,6 @@ // CHECK-NEXT: %[[ST_INPUT:.+]] = subtensor %arg0[0, %[[OFFSET_H]], %[[OFFSET_W]], 0] [1, 17, 33, 3] [1, 1, 1, 1] : tensor<1x225x225x3xf32> to tensor<1x17x33x3xf32> // CHECK-NEXT: scf.for %[[IV2:.+]] = %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG2:.+]] = %[[ARG1]]) // CHECK-NEXT: %[[ST_ELEM:.+]] = subtensor %[[ELEM]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4] [1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32> -// CHECK-NEXT: %[[ST_ARG2:.+]] = subtensor %[[ARG2]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4] [1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32> // CHECK-NEXT: %[[ST_FILTER:.+]] = subtensor %[[FILTER]][0, 0, 0, %[[IV2]]] [3, 3, 3, 4] [1, 1, 1, 1] : tensor<3x3x3x32xf32> to tensor<3x3x3x4xf32> // CHECK-NEXT: %[[ST_FILL:.+]] = subtensor %[[FILL]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4] [1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32> // CHECK-NEXT: %[[ST_CONV:.+]] = linalg.conv_2d_input_nhwc_filter_hwcf @@ -138,7 +138,7 @@ // CHECK-SAME: outs(%[[ST_FILL]] : tensor<1x8x16x4xf32>) // CHECK-NEXT: %[[ADD:.+]] = linalg.generic // CHECK-SAME: ins(%[[ST_CONV]], %[[ST_ELEM]] : tensor<1x8x16x4xf32>, tensor<1x8x16x4xf32>) -// CHECK-SAME: outs(%[[ST_ARG2]] : tensor<1x8x16x4xf32>) +// CHECK-SAME: outs(%[[INIT2]] : tensor<1x8x16x4xf32>) // CHECK: subtensor_insert %[[ADD]] into %[[ARG2]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4] // ----- @@ -260,11 +260,10 @@ // CHECK-NEXT: %[[ST_INPUT:.+]] = subtensor %[[INPUT]][%[[IV0]], %[[OFFSET_OH]], %[[OFFSET_OW]], 0] // CHECK-SAME: [%[[SIZE_INPUT_N]], %[[SIZE_INPUT_H]], %[[SIZE_INPUT_W]], %[[INPUT_C]]] // CHECK-NEXT: %[[SIZE_ELEM_OW_2:.+]] = affine.min #[[BOUND4_MAP_2]](%[[IV2]])[%[[ELEM_OW]]] +// CHECK-NEXT: %[[INIT_SUBTENSOR:.+]] = linalg.init_tensor [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]] // CHECK-NEXT: scf.for %[[IV3:.+]] = %{{.+}} to %[[ELEM_OC]] step %{{.+}} iter_args(%[[ARG:[a-z0-9]+]] // CHECK-NEXT: %[[ST_ELEM:.+]] = subtensor %[[ELEM]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]] // CHECK-SAME: [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]] -// CHECK-NEXT: %[[ST_ARG:.+]] = subtensor %[[ARG]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]] -// CHECK-SAME: [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]] // CHECK-NEXT: %[[SIZE_ELEM_OC_2:.+]] = affine.min #[[BOUND2_MAP_2]](%[[IV3]], %[[IV2]])[%[[FILTER_OC]], %[[ELEM_OC]]] // CHECK-NEXT: %[[ST_FILTER:.+]] = subtensor %[[FILTER]][0, 0, 0, %[[IV3]]] // CHECK-SAME: [%[[FILTER_H]], %[[FILTER_W]], %[[FILTER_IC]], %[[SIZE_ELEM_OC_2]]] @@ -276,6 +275,6 @@ // CHECK-SAME: outs(%[[ST_FILL]] : tensor) -> tensor // CHECK-NEXT: %[[ST_ADD:.+]] = linalg.generic // CHECK-SAME: ins(%[[ST_CONV]], %[[ST_ELEM]] : tensor, tensor) -// CHECK-SAME: outs(%[[ST_ARG]] : tensor) +// CHECK-SAME: outs(%[[INIT_SUBTENSOR]] : tensor) // CHECK: subtensor_insert %[[ST_ADD]] into %[[ARG]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]] // CHECK-SAME: [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]] diff --git a/mlir/test/Dialect/Linalg/tile-tensors.mlir b/mlir/test/Dialect/Linalg/tile-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-tensors.mlir @@ -94,10 +94,10 @@ // CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor) { // CHECK: %[[STARG0:.+]] = subtensor %[[ARG0]][{{.+}}] : tensor to tensor // CHECK: %[[STARG1:.+]] = subtensor %[[ARG1]][{{.+}}] : tensor to tensor -// CHECK: %[[STARG2:.+]] = subtensor %[[TC2]][{{.+}}] : tensor to tensor +// CHECK: %[[INIT_SUBTENSOR:.+]] = linalg.init_tensor // CHECK: %[[STRETURN:.+]] = linalg.generic // CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor, tensor) -// CHECK-SAME: outs(%[[STARG2]] : tensor) +// CHECK-SAME: outs(%[[INIT_SUBTENSOR]] : tensor) // CHECK: %[[TD:.+]] = subtensor_insert %[[STRETURN]] into %[[TC2]] // CHECK: scf.yield %[[TD]] // CHECK: }