diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1285,10 +1285,6 @@ if (!isa(op)) return failure(); - // TODO: Index vectorization assumes static shape. - if (op.hasIndexSemantics()) - return failure(); - LDBG("Dynamically-shaped op meets vectorization pre-conditions\n"); return success(); } diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -1834,6 +1834,94 @@ // ----- +func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguous(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x3xf32>) -> tensor<1x3xf32> { + %c79 = arith.constant 79 : index + %1 = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] + } outs(%extracted_slice : tensor<1x3xf32>) { + ^bb0(%out: f32): + %2 = linalg.index 1 : index + %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) + %extracted = tensor.extract %6[%c79, %3] : tensor<80x16xf32> + linalg.yield %extracted : f32 + } -> tensor<1x3xf32> + return %1 : tensor<1x3xf32> +} + +// CHECK-LABEL: func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguous +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 3 : index +// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_5]] : vector<1x4xi1> +// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<1x3xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_11:.*]] = vector.broadcast {{.*}} : index to vector<4xindex> +// CHECK: %[[VAL_12:.*]] = arith.addi {{.*}} : vector<4xindex> +// CHECK: %[[VAL_20:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<80x16xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_22:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32> + +transform.sequence failures(propagate) { + ^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [1, 4] { vectorize_nd_extract } + } + + // ----- + +func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguous(%6: tensor, %arg0: index, %extracted_slice : tensor) -> tensor { + %c79 = arith.constant 79 : index + %1 = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] + } outs(%extracted_slice : tensor) { + ^bb0(%out: f32): + %2 = linalg.index 1 : index + %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) + %extracted = tensor.extract %6[%c79, %3] : tensor + linalg.yield %extracted : f32 + } -> tensor + return %1 : tensor +} + +// CHECK-LABEL: func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguous( +// CHECK-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-SAME: %[[VAL_1:.*]]: index, +// CHECK-SAME: %[[VAL_2:.*]]: tensor) -> tensor { +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 79 : index +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor +// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_2]], %[[VAL_6]] : tensor +// CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_5]], %[[VAL_7]] : vector<1x4xi1> +// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_2]]{{\[}}%[[VAL_8]], %[[VAL_8]]], %[[VAL_9]] {in_bounds = [true, true]} : tensor, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_12:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex> +// CHECK: %[[VAL_13:.*]] = vector.broadcast %[[VAL_1]] : index to vector<4xindex> +// CHECK: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_13]] : vector<4xindex> +// CHECK-DAG: %[[VAL_15:.*]] = arith.constant dense : vector<1x4xi1> +// CHECK-DAG: %[[VAL_16:.*]] = arith.constant dense<0.000000e+00> : vector<1x4xf32> +// CHECK-DAG: %[[VAL_17:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_18:.*]] = arith.constant dense<79> : vector<1x4xindex> +// CHECK-DAG: %[[VAL_19:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_20:.*]] = tensor.dim %[[VAL_0]], %[[VAL_19]] : tensor +// CHECK: %[[VAL_21:.*]] = vector.broadcast %[[VAL_20]] : index to vector<1x4xindex> +// CHECK: %[[VAL_22:.*]] = arith.muli %[[VAL_18]], %[[VAL_21]] : vector<1x4xindex> +// CHECK: %[[VAL_23:.*]] = vector.broadcast %[[VAL_14]] : vector<4xindex> to vector<1x4xindex> +// CHECK: %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_22]] : vector<1x4xindex> +// CHECK: %[[VAL_25:.*]] = vector.mask %[[VAL_10]] { vector.gather %[[VAL_0]]{{\[}}%[[VAL_17]], %[[VAL_17]]] {{\[}}%[[VAL_24]]], %[[VAL_15]], %[[VAL_16]] : tensor, vector<1x4xindex>, vector<1x4xi1>, vector<1x4xf32> into vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_26:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_27:.*]] = vector.mask %[[VAL_10]] { vector.transfer_write %[[VAL_25]], %[[VAL_2]]{{\[}}%[[VAL_26]], %[[VAL_26]]] {in_bounds = [true, true]} : vector<1x4xf32>, tensor } : vector<1x4xi1> -> tensor +// CHECK: return %[[VAL_27]] : tensor +// CHECK: } + +transform.sequence failures(propagate) { + ^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [1, 4] { vectorize_nd_extract } + } + +// ----- + // The vectorizer converts `affine.apply` so that the subsequent Ops can be vectorised based on the converted ops. Gather load. func.func @vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x4xf32>) -> tensor<1x4xf32> { %c16 = arith.constant 16 : index @@ -1878,6 +1966,99 @@ // ----- +func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x3xf32>) -> tensor<1x3xf32> { + %c16 = arith.constant 16 : index + %1 = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] + } outs(%extracted_slice : tensor<1x3xf32>) { + ^bb0(%out: f32): + %2 = linalg.index 1 : index + %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) + %extracted = tensor.extract %6[%3, %c16] : tensor<80x16xf32> + linalg.yield %extracted : f32 + } -> tensor<1x3xf32> + return %1 : tensor<1x3xf32> +} + +// CHECK-LABEL: func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 3 : index +// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_5]] : vector<1x4xi1> +// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<1x3xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_11:.*]] = vector.broadcast {{.*}} : index to vector<4xindex> +// CHECK: %[[VAL_12:.*]] = arith.addi {{.*}} : vector<4xindex> +// CHECK: %[[VAL_16:.*]] = vector.broadcast {{.*}} : vector<4xindex> to vector<1x4xindex> +// CHECK: %[[VAL_18:.*]] = tensor.dim {{.*}} : tensor<80x16xf32> +// CHECK: %[[VAL_19:.*]] = vector.broadcast {{.*}} : index to vector<1x4xindex> +// CHECK: %[[VAL_20:.*]] = arith.muli {{.*}} : vector<1x4xindex> +// CHECK: %[[VAL_22:.*]] = arith.addi {{.*}} : vector<1x4xindex> +// CHECK: %[[VAL_23:.*]] = vector.mask %[[VAL_8]] { vector.gather {{.*}} : tensor<80x16xf32>, vector<1x4xindex>, vector<1x4xi1>, vector<1x4xf32> into vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_25:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32> + +transform.sequence failures(propagate) { + ^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [1, 4] { vectorize_nd_extract } + } + + // ----- + +func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor, %arg0: index, %extracted_slice : tensor) -> tensor { + %c16 = arith.constant 16 : index + %1 = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] + } outs(%extracted_slice : tensor) { + ^bb0(%out: f32): + %2 = linalg.index 1 : index + %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) + %extracted = tensor.extract %6[%3, %c16] : tensor + linalg.yield %extracted : f32 + } -> tensor + return %1 : tensor +} + +// CHECK-LABEL: func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_gather( +// CHECK-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-SAME: %[[VAL_1:.*]]: index, +// CHECK-SAME: %[[VAL_2:.*]]: tensor) -> tensor { +// CHECK: %[[VAL_3:.*]] = arith.constant 16 : index +// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor +// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_2]], %[[VAL_6]] : tensor +// CHECK: %[[VAL_8:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_5]], %[[VAL_7]] : vector<1x4xi1> +// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_2]]{{\[}}%[[VAL_8]], %[[VAL_8]]], %[[VAL_9]] {in_bounds = [true, true]} : tensor, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_12:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex> +// CHECK: %[[VAL_13:.*]] = vector.broadcast %[[VAL_1]] : index to vector<4xindex> +// CHECK: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_13]] : vector<4xindex> +// CHECK: %[[VAL_15:.*]] = arith.constant dense : vector<1x4xi1> +// CHECK: %[[VAL_16:.*]] = arith.constant dense<0.000000e+00> : vector<1x4xf32> +// CHECK: %[[VAL_17:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_18:.*]] = vector.broadcast %[[VAL_14]] : vector<4xindex> to vector<1x4xindex> +// CHECK: %[[VAL_19:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_20:.*]] = tensor.dim %[[VAL_0]], %[[VAL_19]] : tensor +// CHECK: %[[VAL_21:.*]] = vector.broadcast %[[VAL_20]] : index to vector<1x4xindex> +// CHECK: %[[VAL_22:.*]] = arith.muli %[[VAL_18]], %[[VAL_21]] : vector<1x4xindex> +// CHECK: %[[VAL_23:.*]] = arith.constant dense<16> : vector<1x4xindex> +// CHECK: %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_22]] : vector<1x4xindex> +// CHECK: %[[VAL_25:.*]] = vector.mask %[[VAL_10]] { vector.gather %[[VAL_0]]{{\[}}%[[VAL_17]], %[[VAL_17]]] {{\[}}%[[VAL_24]]], %[[VAL_15]], %[[VAL_16]] : tensor, vector<1x4xindex>, vector<1x4xi1>, vector<1x4xf32> into vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> +// CHECK: %[[VAL_26:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_27:.*]] = vector.mask %[[VAL_10]] { vector.transfer_write %[[VAL_25]], %[[VAL_2]]{{\[}}%[[VAL_26]], %[[VAL_26]]] {in_bounds = [true, true]} : vector<1x4xf32>, tensor } : vector<1x4xi1> -> tensor +// CHECK: return %[[VAL_27]] : tensor +// CHECK: } + +transform.sequence failures(propagate) { + ^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [1, 4] { vectorize_nd_extract } + } + +// ----- + // Make sure that non-linear arithmetic operations (e.g. arith.maxsi) are allowed when calculating indices for load operations. Gather load. func.func @vectorize_nd_tensor_extract_with_maxsi_gather(%arg0: tensor<80x16xf32>, %extracted_slice : tensor<1x4xf32>) -> tensor<1x4xf32> { %c79 = arith.constant 79 : index