diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ComprehensiveBufferize.cpp @@ -1516,6 +1516,46 @@ namespace scf_ext { +struct ExecuteRegionOpInterface + : public BufferizableOpInterface::ExternalModel { + SmallVector getAliasingOpOperand(Operation *op, + OpResult opResult) const { + // ExecuteRegionOps do not have tensor OpOperands. The yielded value can be + // any SSA value that is in scope. To allow for use-def chain traversal + // through ExecuteRegionOps in the analysis, the corresponding yield value + // is considered to be aliasing with the result. + auto executeRegionOp = cast(op); + size_t resultNum = std::distance(op->getOpResults().begin(), + llvm::find(op->getOpResults(), opResult)); + assert(executeRegionOp.region().getBlocks().size() == 1 && + "expected exactly 1 block"); + auto yieldOp = dyn_cast( + executeRegionOp.region().front().getTerminator()); + assert(yieldOp && "expected scf.yield terminator in scf.execute_region"); + return {&yieldOp->getOpOperand(resultNum)}; + } + + bool mustBufferizeInPlace(Operation *op, OpResult opResult) const { + // ExecuteRegionOp results always bufferize in-place. Since they have no + // OpOperands, they are mostly ignored by the analysis once alias sets are + // set up. + return true; + } + + LogicalResult bufferize(Operation *op, OpBuilder &b, + BufferizationState &state) const { + // TODO: Add bufferization support when needed. scf.execute_region should be + // bufferized similar to scf.if. + bool hasTensorReturnType = any_of( + op->getResultTypes(), [](Type t) { return t.isa(); }); + if (hasTensorReturnType) + return op->emitError( + "scf.execute_region with tensor result not supported"); + return success(); + } +}; + struct IfOpInterface : public BufferizableOpInterface::ExternalModel { SmallVector getAliasingOpOperand(Operation *op, @@ -2490,6 +2530,8 @@ void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry) { registry.addOpInterface(); + registry.addOpInterface(); registry.addOpInterface(); registry.addOpInterface(); registry.addOpInterface(); diff --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir --- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir +++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-invalid.mlir @@ -160,7 +160,7 @@ // ----- func @main() -> tensor<4xi32> { - // expected-error @+1 {{unsupported op with tensors}} + // expected-error @+1 {{scf.execute_region with tensor result not supported}} %r = scf.execute_region -> tensor<4xi32> { %A = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32> scf.yield %A: tensor<4xi32>