diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -1077,7 +1077,7 @@ if (isa(op)) return b.getFloatAttr( resultType, llvm::APFloat::getInf(semantic, /*Negative=*/false)); - return Attribute(); + return std::nullopt; } if (isa(op)) return b.getIntegerAttr(resultType, 0); diff --git a/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir b/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir --- a/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir +++ b/mlir/test/Dialect/Linalg/transform-tile-reduction.mlir @@ -330,3 +330,28 @@ by num_threads = [5], tile_sizes = [3], mapping = [#gpu.thread] } + +// ----- + +#map = affine_map<(d0, d1) -> (d0, d1)> +#map1 = affine_map<(d0, d1) -> (d0)> + +module { + func.func @fail_for_float_neutral(%arg0: tensor, %arg1: tensor) -> tensor { + // expected-error @below {{'linalg.generic' op Failed to get an identity value for the reduction operation.}} + // expected-note @below {{when applied to this op}} + %0 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%arg0 : tensor) outs(%arg1 : tensor) { + ^bb0(%in: f32, %out: f32): + %1 = llvm.fmul %in, %in : f32 + %2 = llvm.fadd %1, %out : f32 + linalg.yield %2 : f32 + } -> tensor + return %0 : tensor + } + transform.sequence failures(propagate) { + ^bb0(%arg0: !pdl.operation): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!pdl.operation) -> !pdl.operation + // expected-error @below {{transform.structured.tile_reduction_using_scf failed to apply}} + %for_op, %fill_op, %split_linalg_op, %combining_linalg_op = transform.structured.tile_reduction_using_scf %0 by tile_sizes = [0, 5] + } +}