diff --git a/mlir/include/mlir/Dialect/Transform/IR/TransformInterfaces.h b/mlir/include/mlir/Dialect/Transform/IR/TransformInterfaces.h --- a/mlir/include/mlir/Dialect/Transform/IR/TransformInterfaces.h +++ b/mlir/include/mlir/Dialect/Transform/IR/TransformInterfaces.h @@ -126,7 +126,7 @@ } /// Take the diagnostic and silence. - SmallVector &&takeDiagnostics() { + SmallVector takeDiagnostics() { assert(!diagnostics.empty() && "expected a diagnostic to be present"); auto guard = llvm::make_scope_exit([&]() { diagnostics.clear(); }); return std::move(diagnostics); diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -1346,8 +1346,8 @@ gpu::LaunchOp gpuLaunch = dyn_cast(target); if (!gpuLaunch) { - target->emitError("Given target is not gpu.launch"); - return DiagnosedSilenceableFailure::definiteFailure(); + results.assign({target}); + return emitSilenceableError() << "Given target is not gpu.launch"; } SmallVector blockDim = extractFromI64ArrayAttr(getBlockDim()); diff --git a/mlir/test/Dialect/Linalg/transform-gpu.mlir b/mlir/test/Dialect/Linalg/transform-gpu.mlir --- a/mlir/test/Dialect/Linalg/transform-gpu.mlir +++ b/mlir/test/Dialect/Linalg/transform-gpu.mlir @@ -174,3 +174,17 @@ transform.structured.map_nested_foreach_thread_to_gpu_threads %funcop { blockDim = [12, 9, 1], syncAfterDistribute = false } } } + +// ----- + +func.func @foo() -> () { + %1 = linalg.init_tensor [4] : tensor<4xf32> + return +} + +transform.sequence failures(propagate) { +^bb0(%arg0: !pdl.operation): + %funcop = transform.structured.match ops{["linalg.init_tensor"]} in %arg0 + // expected-error @+1 {{Given target is not gpu.launch}} + %1 = transform.structured.map_nested_foreach_thread_to_gpu_threads %funcop +}