diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/CodegenStrategy.h b/mlir/include/mlir/Dialect/Linalg/Transforms/CodegenStrategy.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/CodegenStrategy.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/CodegenStrategy.h @@ -268,7 +268,6 @@ vectorize(StringRef opName, LinalgTransformationFilter::FilterFunction f = nullptr, bool vectorizePadding = false) { - assert(!opName.empty() && "expected an op name"); transformationSequence.emplace_back(std::make_unique( opName, linalg::LinalgVectorizationOptions(), f, vectorizePadding)); return *this; diff --git a/mlir/test/Dialect/Linalg/codegen-strategy.mlir b/mlir/test/Dialect/Linalg/codegen-strategy.mlir --- a/mlir/test/Dialect/Linalg/codegen-strategy.mlir +++ b/mlir/test/Dialect/Linalg/codegen-strategy.mlir @@ -2,6 +2,7 @@ // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 promote promote-full-tile-pad register-tile-sizes=2,4,8 vectorize vectorize-contraction-to=outerproduct split-transfers=true unroll-vector-transfers=false" -split-input-file | FileCheck %s --check-prefix=CHECK-OUTER // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 tile-interchange=1,2,0 generalize iterator-interchange=0,2,1" -split-input-file | FileCheck %s --check-prefix=CHECK-INTERCHANGE // RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 pad pack-paddings=1,1,0 hoist-paddings=3,3,0" -split-input-file | FileCheck %s --check-prefix=CHECK-PAD +// RUN: mlir-opt %s -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.matmul tile-sizes=16,32,64 fuse pad vectorize" -split-input-file | FileCheck %s --check-prefix=CHECK-FUSE // CHECK-INTRINSIC: func @matmul( // CHECK-OUTER: func @matmul( @@ -56,3 +57,20 @@ %0 = linalg.matmul ins(%arg0, %arg1: tensor<72x72xf32>, tensor<72x72xf32>) outs(%arg2: tensor<72x72xf32>) -> tensor<72x72xf32> return %0 : tensor<72x72xf32> } + +// ----- + +// CHECK-FUSE: func @matmul( +func @matmul(%arg0: tensor<72x72xf32>, %arg1: tensor<72x72xf32>, %arg2: tensor<72x72xf32>) -> tensor<72x72xf32> { + + // Check the padding and vectorization applies to the fill operation due to the empty anchor op string. + // CHECK-FUSE: %[[CST:.*]] = arith.constant dense<0.000000e+00> + // CHECK-FUSE: vector.transfer_write %[[CST]] + %cst = arith.constant 0.0 : f32 + %0 = linalg.fill(%cst, %arg0) : f32, tensor<72x72xf32> -> tensor<72x72xf32> + + // Check the matmul is padded and vectorized despite the empty anchor op string. + // CHECK-FUSE: vector.outerproduct + %1 = linalg.matmul ins(%arg0, %arg1: tensor<72x72xf32>, tensor<72x72xf32>) outs(%0: tensor<72x72xf32>) -> tensor<72x72xf32> + return %1 : tensor<72x72xf32> +} diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp @@ -165,24 +165,24 @@ vector::VectorTransferSplit vectorTransferSplit) { assert(!anchorOpName.empty()); CodegenStrategy strategy; - StringRef genericOpName = GenericOp::getOperationName(); strategy .tileAndFuseIf(fuse && !tileSizes.empty(), anchorOpName, tilingAndFusionOptions) .tileIf(!fuse && !tileSizes.empty(), anchorOpName, tilingOptions) - .promoteIf(promote, anchorOpName, + .promoteIf(!fuse && promote, anchorOpName, LinalgPromotionOptions() .setAlignment(16) .setUseFullTileBuffersByDefault(promoteFullTile)) - .tileIf(!registerTileSizes.empty(), anchorOpName, registerTilingOptions) - .promoteIf(registerPromote, anchorOpName, + .tileIf(!fuse && !registerTileSizes.empty(), anchorOpName, + registerTilingOptions) + .promoteIf(!fuse && registerPromote, anchorOpName, LinalgPromotionOptions() .setAlignment(16) .setUseFullTileBuffersByDefault(registerPromoteFullTile)) - .padIf(pad, anchorOpName, paddingOptions) - .generalizeIf(generalize, anchorOpName) + .padIf(pad, "", paddingOptions) + .generalizeIf(generalize, "") .interchangeIf(!iteratorInterchange.empty(), iteratorInterchange) - .vectorizeIf(vectorize, generalize ? genericOpName : anchorOpName) + .vectorizeIf(vectorize, "") .vectorLowering( LinalgVectorLoweringOptions() .setVectorTransformsOptions(