diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1284,10 +1284,6 @@ if (!isa(op)) return failure(); - // TODO: Index vectorization assumes static shape. - if (op.hasIndexSemantics()) - return failure(); - LDBG("Dynamically-shaped op meets vectorization pre-conditions\n"); return success(); } diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -1834,6 +1834,39 @@ // ----- +func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_contiguous(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x3xf32>) -> tensor<1x3xf32> { + %c79 = arith.constant 79 : index + %1 = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] + } outs(%extracted_slice : tensor<1x3xf32>) { + ^bb0(%out: f32): + %2 = linalg.index 1 : index + %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) + %extracted = tensor.extract %6[%c79, %3] : tensor<80x16xf32> + linalg.yield %extracted : f32 + } -> tensor<1x3xf32> + return %1 : tensor<1x3xf32> +} + +// CHECK-LABEL: func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_contiguous +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 3 : index +// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_5]] : vector<1x4xi1> +// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<1x3xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_11:.*]] = vector.broadcast {{.*}} : index to vector<4xindex> +// CHECK: %[[VAL_12:.*]] = arith.addi {{.*}} : vector<4xindex> +// CHECK: %[[VAL_20:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<80x16xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_22:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32> + +transform.sequence failures(propagate) { + ^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [1, 4] { vectorize_nd_extract } + } + +// ----- + // The vectorizer converts `affine.apply` so that the subsequent Ops can be vectorised based on the converted ops. Gather load. func.func @vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x4xf32>) -> tensor<1x4xf32> { %c16 = arith.constant 16 : index @@ -1878,6 +1911,44 @@ // ----- +func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x3xf32>) -> tensor<1x3xf32> { + %c16 = arith.constant 16 : index + %1 = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] + } outs(%extracted_slice : tensor<1x3xf32>) { + ^bb0(%out: f32): + %2 = linalg.index 1 : index + %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) + %extracted = tensor.extract %6[%3, %c16] : tensor<80x16xf32> + linalg.yield %extracted : f32 + } -> tensor<1x3xf32> + return %1 : tensor<1x3xf32> +} + +// CHECK-LABEL: func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 3 : index +// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_5]] : vector<1x4xi1> +// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<1x3xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_11:.*]] = vector.broadcast {{.*}} : index to vector<4xindex> +// CHECK: %[[VAL_12:.*]] = arith.addi {{.*}} : vector<4xindex> +// CHECK: %[[VAL_16:.*]] = vector.broadcast {{.*}} : vector<4xindex> to vector<1x4xindex> +// CHECK: %[[VAL_18:.*]] = tensor.dim {{.*}} : tensor<80x16xf32> +// CHECK: %[[VAL_19:.*]] = vector.broadcast {{.*}} : index to vector<1x4xindex> +// CHECK: %[[VAL_20:.*]] = arith.muli {{.*}} : vector<1x4xindex> +// CHECK: %[[VAL_22:.*]] = arith.addi {{.*}} : vector<1x4xindex> +// CHECK: %[[VAL_23:.*]] = vector.mask %[[VAL_8]] { vector.gather {{.*}} : tensor<80x16xf32>, vector<1x4xindex>, vector<1x4xi1>, vector<1x4xf32> into vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_25:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32> + +transform.sequence failures(propagate) { + ^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [1, 4] { vectorize_nd_extract } + } + +// ----- + // Make sure that non-linear arithmetic operations (e.g. arith.maxsi) are allowed when calculating indices for load operations. Gather load. func.func @vectorize_nd_tensor_extract_with_maxsi_gather(%arg0: tensor<80x16xf32>, %extracted_slice : tensor<1x4xf32>) -> tensor<1x4xf32> { %c79 = arith.constant 79 : index