diff --git a/mlir/docs/Traits/Broadcastable.md b/mlir/docs/Traits/Broadcastable.md --- a/mlir/docs/Traits/Broadcastable.md +++ b/mlir/docs/Traits/Broadcastable.md @@ -69,17 +69,17 @@ ```python InferShape(shape0, shape1): - # Equalize ranks - rank = max(GetRank(shape0), GetRank(shape1)) - ExpandRank(shape0, rank) - ExpandRank(shape1, rank) + # Equalize ranks + rank = max(GetRank(shape0), GetRank(shape1)) + ExpandRank(shape0, rank) + ExpandRank(shape1, rank) - # Infer shape - inferredShape = [] - for (dim0, dim1) in zip(shape0, shape1): - inferredDim = InferDim(dim0, dim1) - inferredShape.append(inferredDim) - return inferredShape + # Infer shape + inferredShape = [] + for (dim0, dim1) in zip(shape0, shape1): + inferredDim = InferDim(dim0, dim1) + inferredShape.append(inferredDim) + return inferredShape ``` The result shape for an operation with an arbitrary number of input operands is then inferred by discarding unranked operands, applying shape inference on the first ranked operand pair, and updating the inferred shape with each additional ranked operand. If the operation has no ranked operands, the result shape cannot be inferred. If the operation has exactly one ranked operand, its shape is directly provided as the inferred result shape. Formally: @@ -111,7 +111,7 @@ | `inferredDim` | `actualDim` | Verification outcome | | ------------- | ----------- | -------------------- | | ? | ? | **OK** | -| ? | static | **Error**
An inferred dimension being dynamic indicates that its size cannot be inferred at compile time from its input operands. The presence of a static dimension in the actual result is counterintuitive and is therefore not allowed. | +| ? | static | **OK**
A failure to guarantee that the runtime dimension size of the result is equal to `actualDim` causes undefined behavior. While unusual, this implicit dynamic-to-static cast is convenient in certain scenarios, such as an intermediate state of a shape inference pass. Ultimately, a static dimension in the result implies that all input dimension sizes are also known at compile time and may therefore become static as well, preferably. | | static | ? | **OK**
The actual result dimension may be dynamic even when a static size can be inferred at compile time. The programmer may choose to relax the specificity of the result dimension for forward compatibility of the result type. | | static | static | **OK if equal**
When both the inferred and actual dimensions are static, they must be set to the same size. | @@ -134,7 +134,6 @@ # Verify for (inferredDim, actualDim) in zip(inferredShape, actualShape): - ERROR_IF(IsDynamic(inferredDim) and IsStatic(actualDim)) ERROR_IF(IsStatic(actualDim) and inferredDim != actualDim) ``` @@ -195,3 +194,5 @@ // tensor<4xi32>. Broadcast semantics are not applicable for results. %result = "test.broadcastable"(%arg0, %arg1) : (tensor<1xi32>, tensor<1xi32) -> tensor<4xi32> ``` + + diff --git a/mlir/lib/Dialect/Traits.cpp b/mlir/lib/Dialect/Traits.cpp --- a/mlir/lib/Dialect/Traits.cpp +++ b/mlir/lib/Dialect/Traits.cpp @@ -195,17 +195,10 @@ static bool isCompatibleInferredReturnShape(ArrayRef inferred, ArrayRef existing) { + // If both interred and existing dimensions are static, they must be equal. auto isCompatible = [](int64_t inferredDim, int64_t existingDim) { - // The following criterion is used to determine the validity of an existing - // dimension: - // - // inferredDim existingDim Behavior - // ----------- ----------- -------- - // dynamic dynamic OK - // dynamic static Error - // static dynamic OK - // static static OK if equal - return ShapedType::isDynamic(existingDim) || inferredDim == existingDim; + return ShapedType::isDynamic(existingDim) || + ShapedType::isDynamic(inferredDim) || inferredDim == existingDim; }; if (inferred.size() != existing.size()) return false; diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -20,24 +20,45 @@ // ----- // CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)> -// CHECK-LABEL: @test_abs_1d_cast_result +// CHECK-LABEL: @test_abs_1d_cast_static_to_dynamic // CHECK-SAME: ([[ARG0:%[0-9a-zA-Z_]*]] -func.func @test_abs_1d_cast_result(%arg0: tensor<5xf32>) -> tensor { +func.func @test_abs_1d_cast_static_to_dynamic(%arg0: tensor<5xf32>) -> tensor { // CHECK: [[EMPTY:%.+]] = tensor.empty() : tensor<5xf32> // CHECK: [[RESULT:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins([[ARG0]] : tensor<5xf32>) outs([[EMPTY]] : tensor<5xf32>) { // CHECK: ^bb0([[IN0:%.+]]: f32, [[OUT0:%.+]]: f32): // CHECK: [[ABS:%.+]] = math.absf [[IN0]] : f32 // CHECK: linalg.yield [[ABS]] : f32 // CHECK: } -> tensor<5xf32> + // CHECK: [[CAST_RESULT:%.+]] = tensor.cast [[RESULT]] : tensor<5xf32> to tensor %0 = "tosa.abs"(%arg0) : (tensor<5xf32>) -> tensor - // CHECK: [[CAST_RESULT:%.+]] = tensor.cast [[RESULT]] : tensor<5xf32> to tensor // CHECK: return [[CAST_RESULT]] : tensor return %0 : tensor } // ----- +// CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)> +// CHECK-LABEL: @test_abs_1d_cast_dynamic_to_static +// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]] +func.func @test_abs_1d_cast_dynamic_to_static(%arg0: tensor) -> tensor<5xf32> { + // CHECK: %[[ZERO:.*]] = arith.constant 0 : index + // CHECK: %[[DIM_SIZE:.*]] = tensor.dim %[[ARG0]], %[[ZERO]] : tensor + // CHECK: %[[EMPTY:.*]] = tensor.empty(%[[DIM_SIZE]]) : tensor + // CHECK: %[[RESULT:.*]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins(%[[ARG0]] : tensor) outs(%[[EMPTY]] : tensor) { + // CHECK: ^bb0(%[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: f32): + // CHECK: %[[VAL_2:.*]] = math.absf %[[VAL_0]] : f32 + // CHECK: linalg.yield %[[VAL_2]] : f32 + // CHECK: } -> tensor + // CHECK: %[[CAST_RESULT:.*]] = tensor.cast %[[RESULT]] : tensor to tensor<5xf32> + %0 = "tosa.abs"(%arg0) : (tensor) -> tensor<5xf32> + + // CHECK: return %[[CAST_RESULT]] : tensor<5xf32> + return %0 : tensor<5xf32> +} + +// ----- + // CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)> // CHECK-LABEL: @test_abs_1d_dynamic // CHECK-SAME: ([[ARG0:%[0-9a-zA-Z_]*]] diff --git a/mlir/test/Dialect/traits.mlir b/mlir/test/Dialect/traits.mlir --- a/mlir/test/Dialect/traits.mlir +++ b/mlir/test/Dialect/traits.mlir @@ -111,9 +111,10 @@ // ----- -// Error for inferred dynamic dimension but existing static dimensions +// It is alright to have an implicit dynamic-to-static cast in a dimension size +// as long as the runtime result size is consistent with the result tensor's +// static dimension. func.func @broadcast_tensor_tensor_tensor(%arg0: tensor, %arg1: tensor) -> tensor<2xi32> { - // expected-error @+1 {{op result type '2' not broadcast compatible with broadcasted operands's shapes '?'}} %0 = "test.broadcastable"(%arg0, %arg1) : (tensor, tensor) -> tensor<2xi32> return %0 : tensor<2xi32> }