diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td @@ -1594,6 +1594,7 @@ let arguments = (ins PDL_Operation:$target, Variadic:$vector_sizes, + UnitAttr:$vectorize_nd_extract, DefaultValuedOptionalAttr: $static_vector_sizes); let results = (outs); diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -2986,7 +2986,8 @@ << "cannot vectorize non-Linalg op"; } - if (failed(linalg::vectorize(rewriter, linalgOp, vectorSizes))) { + if (failed(linalg::vectorize(rewriter, linalgOp, vectorSizes, + getVectorizeNdExtract()))) { return mlir::emitSilenceableFailure(target->getLoc()) << "failed to vectorize op"; } diff --git a/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir b/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Linalg/vectorization-unsupported.mlir @@ -0,0 +1,29 @@ +// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file -verify-diagnostics + +// Masked vectorisation of `tensor.extract`: +// * requires the `{ vectorize_nd_extract }` attribute, +// * has not been implemented yet (hence the attribute is absent). +// TOOD: Implement masked vectorization for `tensor.extract` + +#map1 = affine_map<(d0, d1) -> (d0, d1)> +func.func @extract_masked_vectorize(%arg0: tensor, %arg1: tensor) -> tensor { + %c0 = arith.constant 1 : index + %c1 = arith.constant 2 : index + // expected-error@+1 {{failed to vectorize op}} + %2 = linalg.generic { + indexing_maps = [#map1], + iterator_types = ["parallel", "parallel"] + } outs(%arg1 : tensor) { + ^bb0(%arg3: f32): + %7 = tensor.extract %arg0[%c0, %c1] : tensor + linalg.yield %7 : f32 + } -> tensor + return %2 : tensor +} + + +transform.sequence failures(propagate) { + ^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [3, 3] + }