diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -1543,13 +1543,7 @@ } for (const auto &[idx, inputDimIdx] : llvm::enumerate(reverseDimMap)) { - if (inputDimIdx == kUnmappedDim) { - // This dimensions is being added. Should be statically known. - if (ShapedType::isDynamic(initShape[idx])) - return emitOpError() - << "init dim " << idx - << " can't be dynamic, because it's not matched to input"; - } else { + if (inputDimIdx != kUnmappedDim) { // This dimensions is mapped from the input. Init and input dims should // match. if (inputShape[inputDimIdx] != initShape[idx]) diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir --- a/mlir/test/Dialect/Linalg/invalid.mlir +++ b/mlir/test/Dialect/Linalg/invalid.mlir @@ -728,19 +728,6 @@ // ----- -func.func @broadcast_added_dynamic_mismatch( - %input: tensor<4x16xf32>, %init: tensor<4x?x16xf32>) - -> tensor<4x?x16xf32> { - // expected-error @+1 {{'linalg.broadcast' op init dim 1 can't be dynamic, because it's not matched to input}} - %bcast = linalg.broadcast - ins(%input:tensor<4x16xf32>) - outs(%init:tensor<4x?x16xf32>) - dimensions = [0, 2] - func.return %bcast : tensor<4x?x16xf32> -} - -// ----- - func.func @broadcast_size_1_extension_not_supported( %input: tensor<1x16xf32>, %init: tensor<4x?x16xf32>) -> tensor<4x?x16xf32> {