diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -257,11 +257,8 @@ return failure(); // Setup RAII guard to return properly. - bool succeeded = true; LinalgOp tiledOp = res->op; auto guard = llvm::make_scope_exit([&]() { - if (!succeeded) - return; // Return relevant information to derived pattern. result = *res; // Replace filter on both tiledOp and tiledAndPaddedOp, if necessary. @@ -278,7 +275,6 @@ // Try to pad on the fly by rewriting res->op as a padded op. if (failed(rewriteAsPaddedOp(rewriter, *res, options))) { // Set so RAII guard does not propagate TiledLinalgOp to `result`. - succeeded = false; return failure(); } diff --git a/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir b/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-and-pad-tensors.mlir @@ -1,4 +1,5 @@ -// RUN: mlir-opt %s -test-linalg-transform-patterns=test-tile-and-pad-pattern -canonicalize | FileCheck %s +// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-and-pad-pattern tile-sizes-for-padding=2,3,4" -canonicalize | FileCheck %s +// RUN: mlir-opt %s -test-linalg-transform-patterns="test-tile-and-pad-pattern tile-sizes-for-padding=2,3" -canonicalize | FileCheck %s -check-prefix=CHECK-1DIM-TILE // CHECK-LABEL: func @matmul_tensors( // CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor @@ -39,3 +40,10 @@ // CHECK: return %[[TD0]] : tensor return %0 : tensor } + +// CHECK-1DIM-TILE: func @matmul_tensors( +// CHECK-1DIM-TILE: %[[TA:[0-9a-z]+]]: tensor +// CHECK-1DIM-TILE: %[[TB:[0-9a-z]+]]: tensor +// CHECK-1DIM-TILE: %[[TC:[0-9a-z]+]]: tensor) -> tensor { +// CHECK-1DIM-TILE-NOT: scf.for +// CHECK-1DIM-TILE: linalg.matmul_i8_i8_i32 ins(%[[TA]], %[[TB]] : tensor, tensor) outs(%[[TC]] : tensor) -> tensor diff --git a/mlir/test/lib/Transforms/TestLinalgTransforms.cpp b/mlir/test/lib/Transforms/TestLinalgTransforms.cpp --- a/mlir/test/lib/Transforms/TestLinalgTransforms.cpp +++ b/mlir/test/lib/Transforms/TestLinalgTransforms.cpp @@ -87,6 +87,10 @@ Option testHoistPadding{*this, "test-hoist-padding", llvm::cl::desc("Test hoist padding"), llvm::cl::init(0)}; + ListOption tileSizesForPadding{ + *this, "tile-sizes-for-padding", + llvm::cl::desc("Linalg tile sizes when tile+pad"), llvm::cl::ZeroOrMore, + llvm::cl::MiscFlags::CommaSeparated}; }; } // end anonymous namespace @@ -522,12 +526,12 @@ return b.create(op.getOwner()->getLoc(), t, b.getZeroAttr(t)); } -static void applyTileAndPadPattern(FuncOp funcOp) { +static void applyTileAndPadPattern(FuncOp funcOp, ArrayRef tileSizes) { MLIRContext *context = funcOp.getContext(); RewritePatternSet tilingPattern(context); auto linalgTilingOptions = linalg::LinalgTilingOptions() - .setTileSizes({2, 3, 4}) + .setTileSizes(tileSizes) .setPaddingValueComputationFunction(getNeutralOfLinalgOp); tilingPattern.add>( context, linalgTilingOptions, @@ -570,7 +574,7 @@ if (testAffineMinSCFCanonicalizationPatterns) return applyAffineMinSCFCanonicalizationPatterns(getFunction()); if (testTileAndPadPattern) - return applyTileAndPadPattern(getFunction()); + return applyTileAndPadPattern(getFunction(), tileSizesForPadding); if (testHoistPadding) { getFunction().walk([&](linalg::PadTensorOp padTensorOp) { (void)linalg::hoistPaddingOnTensors(padTensorOp, testHoistPadding);