diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td @@ -15,9 +15,29 @@ let description = [{ An op interface for One-Shot Bufferize. Ops that implement this interface interface can be analyzed and bufferized using One-Shot Bufferize. + + Note: All "bufferizesTo*" and "getAliasing*" interface methods must be + implemented conservatively. If it is not statically known whether an + OpOperand/OpResult bufferizes in a certain way (e.g., to a memory write), + the worst case must be assumed (e.g., that it does). Similarly, + "getAliasing*" interface methods may always return additional OpOperands or + OpResults, but must not miss an OpOperand or OpResult that could potentially + alias at runtime. }]; let cppNamespace = "::mlir::bufferization"; let methods = [ + InterfaceMethod< + /*desc=*/[{ + Return `true` if the given OpResult may bufferize to a new buffer + allocation. If it is statically unknown if the given OpResult + bufferizes to a buffer allocation, `true` should be returned. + }], + /*retType=*/"bool", + /*methodName=*/"bufferizesToAllocation", + /*args=*/(ins "OpResult":$opResult), + /*methodBody=*/"", + /*defaultImplementation=*/"return false;" + >, InterfaceMethod< /*desc=*/[{ Return `true` if the given OpOperand bufferizes to a memory read. This diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td @@ -39,6 +39,16 @@ /// arguments during One-Shot Module Bufferize. constexpr const static ::llvm::StringLiteral kBufferLayoutAttrName = "bufferization.buffer_layout"; + + /// Attribute name used to mark escaping behavior of buffer allocations. + /// Escaping allocations cannot be deallocated in the same block and must + /// be treated specially: They are currently deallocated with the + /// BufferDeallocation pass. + /// + /// Note: Only ops with at least one OpResult that bufferizes to a buffer + /// allocation (as per BufferizableOpInterface) may have this attribute. + constexpr const static ::llvm::StringLiteral + kEscapeAttrName = "bufferization.escape"; }]; let hasOperationAttrVerify = 1; let emitAccessorPrefix = kEmitAccessorPrefix_Prefixed; diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -38,11 +38,6 @@ If `copy` is specified, no dynamic sizes should be passed, since they are the same as the dynamic sizes of the `copy` operand. - The optional `escape` attribute indicates whether the buffer escapes the - parent block or not. In the latter case, the buffer is deallocated at the - of the block (during bufferization). In the former case, the buffer is not - deallocated and must be deallocated through some other mechanism. - `alloc_tensor` is a helper op for bufferization. The operation is provided as an anchor that marks the beginning of a new tensor SSA use-def chain. It can be used to control in-place bufferization decisions during One-Shot @@ -65,8 +60,7 @@ }]; let arguments = (ins Variadic:$dynamic_sizes, - Optional:$copy, - OptionalAttr:$escape); + Optional:$copy); let results = (outs AnyTensor:$result); @@ -76,6 +70,8 @@ bool isMemoryWrite(OpResult opResult, const AnalysisState &state); + bool bufferizesToAllocation(OpResult opResult) { return true; } + bool bufferizesToMemoryRead(OpOperand &opOperand, const AnalysisState &state); @@ -119,16 +115,8 @@ }]; let builders = [ - // Build an op without `copy` operand and `escape` attribute. + // Build an op without `copy` operand. OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes)>, - - // Build an op without `escape` attribute. - OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes, - "Value":$copy)>, - - // Build an op with `copy` and `escape` attribute. - OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes, - "Value":$copy, "bool":$escape)>, ]; let hasCanonicalizer = 1; diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -84,8 +84,11 @@ populateDynamicDimSizes(b, loc, tensor, dynamicSizes); } - return b.create(loc, tensorType, dynamicSizes, - copy ? tensor : Value(), escape); + auto allocTensorOp = b.create(loc, tensorType, dynamicSizes, + copy ? tensor : Value()); + allocTensorOp->setAttr(BufferizationDialect::kEscapeAttrName, + b.getBoolArrayAttr({escape})); + return allocTensorOp; } LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts( diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/Affine/IR/AffineOps.h" +#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" #include "mlir/Dialect/Bufferization/IR/Bufferization.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" @@ -27,6 +28,9 @@ constexpr const ::llvm::StringLiteral BufferizationDialect::kBufferLayoutAttrName; +/// Attribute name used to mark escaping behavior of buffer allocations. +constexpr const ::llvm::StringLiteral BufferizationDialect::kEscapeAttrName; + //===----------------------------------------------------------------------===// // Bufferization Dialect Interfaces //===----------------------------------------------------------------------===// @@ -80,6 +84,37 @@ << " to be used on function-like operations"; return success(); } + if (attr.getName() == kEscapeAttrName) { + auto arrayAttr = attr.getValue().dyn_cast(); + if (!arrayAttr) + return op->emitError() << "'" << kEscapeAttrName + << "' is expected to be a bool array attribute"; + if (arrayAttr.size() != op->getNumResults()) + return op->emitError() + << "'" << kEscapeAttrName + << "' has wrong number of elements, expected " + << op->getNumResults() << ", got " << arrayAttr.size(); + auto bufferizableOp = dyn_cast(op); + if (!bufferizableOp) + return op->emitError() + << "'" << kEscapeAttrName << "' only valid on bufferizable ops"; + for (const auto &it : llvm::enumerate(arrayAttr)) { + auto attr = it.value(); + auto boolAttr = attr.dyn_cast(); + if (!boolAttr) + return op->emitError() << "'" << kEscapeAttrName + << "' is expected to be a bool array attribute"; + if (!boolAttr.getValue()) + continue; + if (!op->getResult(it.index()).getType().isa()) + return op->emitError() + << "'" << kEscapeAttrName << "' only valid for tensor results"; + if (!bufferizableOp.bufferizesToAllocation(op->getOpResult(it.index()))) + return op->emitError() << "'" << kEscapeAttrName + << "' only valid for allocation results"; + } + return success(); + } return op->emitError() << "attribute '" << attr.getName() << "' not supported by the bufferization dialect"; diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -152,6 +152,7 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter, const BufferizationOptions &options) { OpBuilder::InsertionGuard g(rewriter); + Operation *op = this->getOperation(); Location loc = getLoc(); // Nothing to do for dead AllocTensorOps. @@ -185,8 +186,11 @@ // Should the buffer be deallocated? AnalysisState analysisState(options); bool dealloc; - if (getEscape()) { - dealloc = !*getEscape(); + if (op->hasAttr(BufferizationDialect::kEscapeAttrName)) { + // AllocTensorOp has one result. + ArrayAttr escapeAttr = + op->getAttr(BufferizationDialect::kEscapeAttrName).cast(); + dealloc = !escapeAttr[0].cast().getValue(); } else { // No "escape" annotation found. if (options.createDeallocs) { @@ -251,20 +255,7 @@ void AllocTensorOp::build(OpBuilder &builder, OperationState &result, RankedTensorType type, ValueRange dynamicSizes) { - build(builder, result, type, dynamicSizes, /*copy=*/Value(), - /*escape=*/BoolAttr()); -} - -void AllocTensorOp::build(OpBuilder &builder, OperationState &result, - RankedTensorType type, ValueRange dynamicSizes, - Value copy) { - build(builder, result, type, dynamicSizes, copy, /*escape=*/BoolAttr()); -} - -void AllocTensorOp::build(OpBuilder &builder, OperationState &result, - RankedTensorType type, ValueRange dynamicSizes, - Value copy, bool escape) { - build(builder, result, type, dynamicSizes, copy, builder.getBoolAttr(escape)); + build(builder, result, type, dynamicSizes, /*copy=*/Value()); } namespace { @@ -305,8 +296,7 @@ if (newType == op.getType()) return failure(); auto newOp = rewriter.create( - op.getLoc(), newType, newDynamicSizes, /*copy=*/Value(), - /*escape=*/op.getEscapeAttr()); + op.getLoc(), newType, newDynamicSizes, /*copy=*/Value()); rewriter.replaceOpWithNewOp(op, op.getType(), newOp); return success(); } diff --git a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp @@ -44,20 +44,31 @@ mlir::bufferization::insertTensorCopies(Operation *op, const AnalysisState &state) { IRRewriter rewriter(op->getContext()); + StringRef escapeAttrName = BufferizationDialect::kEscapeAttrName; + WalkResult result = op->walk([&](Operation *op) { auto bufferizableOp = state.getOptions().dynCastBufferizableOp(op); if (!bufferizableOp) return WalkResult::skip(); - // Find AllocTensorOps without an `escape` attribute and add the attribute + // Find allocations without an `escape` attribute and add the attribute // based on analysis results. - if (auto allocTensorOp = dyn_cast(op)) { - if (allocTensorOp.getEscape()) - return WalkResult::advance(); - bool escape = !state.getOptions().createDeallocs || - state.isTensorYielded(allocTensorOp.getResult()); - allocTensorOp.setEscapeAttr(rewriter.getBoolAttr(escape)); - return WalkResult::advance(); + if (!op->hasAttr(escapeAttrName)) { + SmallVector escapeAttrValue; + bool foundTensorResult = false; + for (OpResult opResult : op->getOpResults()) { + if (!opResult.getType().isa() || + !bufferizableOp.bufferizesToAllocation(opResult)) { + escapeAttrValue.push_back(false); + continue; + } + foundTensorResult = true; + bool escape = !state.getOptions().createDeallocs || + state.isTensorYielded(opResult); + escapeAttrValue.push_back(escape); + } + if (foundTensorResult) + op->setAttr(escapeAttrName, rewriter.getBoolArrayAttr(escapeAttrValue)); } // Find inplacability conflicts and resolve them. (Typically with explicit diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp @@ -461,9 +461,8 @@ yieldValues.push_back(value); continue; } - Value alloc = rewriter.create( - yieldOp.getLoc(), value.getType().cast(), - /*dynamicSizes=*/ValueRange(), value, /*escape=*/true); + Value alloc = allocateTensorForShapedValue(rewriter, yieldOp.getLoc(), + value, /*escape=*/true); yieldValues.push_back(alloc); } @@ -673,9 +672,8 @@ beforeYieldValues.push_back(value); continue; } - Value alloc = rewriter.create( - conditionOp.getLoc(), value.getType().cast(), - /*dynamicSizes=*/ValueRange(), value, /*escape=*/true); + Value alloc = allocateTensorForShapedValue(rewriter, conditionOp.getLoc(), + value, /*escape=*/true); beforeYieldValues.push_back(alloc); } rewriter.updateRootInPlace(conditionOp, [&]() { @@ -692,9 +690,8 @@ afterYieldValues.push_back(value); continue; } - Value alloc = rewriter.create( - yieldOp.getLoc(), value.getType().cast(), - /*dynamicSizes=*/ValueRange(), value, /*escape=*/true); + Value alloc = allocateTensorForShapedValue(rewriter, yieldOp.getLoc(), + value, /*escape=*/true); afterYieldValues.push_back(alloc); } rewriter.updateRootInPlace(yieldOp, [&]() { @@ -938,13 +935,11 @@ if (state.isInPlace(*destOperands.front())) continue; - // Create AllocTensorOp. + // Insert tensor allocation. bool isYielded = state.isTensorYielded(opResult); - auto resultType = opResult.getType().cast(); - Value alloc = rewriter.create( - op->getLoc(), resultType, /*dynamicDims=*/ValueRange(), - /*copy=*/destOperands.front()->get(), - /*escape=*/isYielded); + Value alloc = allocateTensorForShapedValue(rewriter, op->getLoc(), + destOperands.front()->get(), + /*escape=*/isYielded); // Update terminator operand. rewriter.updateRootInPlace(destOperands.front()->getOwner(), diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir @@ -9,9 +9,9 @@ func.func @read_after_write_conflict(%t: tensor, %idx: index, %f: f32) -> (tensor, tensor) { - // CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[t]]) {escape = false} : tensor - // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {escape = true} : tensor - // CHECK-NO-DEALLOC: bufferization.alloc_tensor() copy(%{{.*}}) {escape = true} : tensor + // CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[t]]) {bufferization.escape = [false]} : tensor + // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : tensor + // CHECK-NO-DEALLOC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : tensor // CHECK: %[[insert:.*]] = tensor.insert %{{.*}} into %[[copy]] %0 = tensor.insert %f into %t[%idx] : tensor // CHECK: return %[[insert]], %[[t]] @@ -24,9 +24,9 @@ // CHECK-FUNC-LABEL: func @return_alloc_tensor // CHECK-NO-DEALLOC-LABEL: func @return_alloc_tensor func.func @return_alloc_tensor() -> (tensor<5xf32>) { - // CHECK: bufferization.alloc_tensor() {escape = false} : tensor<5xf32> - // CHECK-FUNC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32> - // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> + // CHECK-FUNC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> + // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> %0 = bufferization.alloc_tensor() : tensor<5xf32> return %0 : tensor<5xf32> } @@ -38,12 +38,12 @@ func.func @do_not_copy_undefined_tensor(%f: f32, %idx: index) -> (tensor<5xf32>, tensor<5xf32>) { - // CHECK: bufferization.alloc_tensor() {escape = false} : tensor<5xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> // The second alloc_tensor should not have a copy operand. - // CHECK: bufferization.alloc_tensor() {escape = false} : tensor<5xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> - // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32> - // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {escape = true} : tensor<5xf32> + // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> + // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> %0 = bufferization.alloc_tensor() : tensor<5xf32> %1 = tensor.insert %f into %0[%idx] : tensor<5xf32> return %0, %1 : tensor<5xf32>, tensor<5xf32> @@ -55,7 +55,7 @@ func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32) -> (tensor<5xf32>, tensor<5xf32>) { - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {escape = false} : tensor<5xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], @@ -74,7 +74,7 @@ -> (tensor<3xf32>) { %0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32> - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {escape = false} : tensor<3xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<3xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], diff --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir --- a/mlir/test/Dialect/Bufferization/invalid.mlir +++ b/mlir/test/Dialect/Bufferization/invalid.mlir @@ -27,7 +27,31 @@ // ----- func.func @alloc_tensor_invalid_escape_attr(%sz: index) { - // expected-error @+1{{op attribute 'escape' failed to satisfy constraint: bool attribute}} - %0 = bufferization.alloc_tensor(%sz) {escape = 5} : tensor + // expected-error @+1{{'bufferization.escape' is expected to be a bool array attribute}} + %0 = bufferization.alloc_tensor(%sz) {bufferization.escape = 5} : tensor return } + +// ----- + +func.func @alloc_tensor_invalid_escape_attr_size(%sz: index) { + // expected-error @+1{{'bufferization.escape' has wrong number of elements, expected 1, got 2}} + %0 = bufferization.alloc_tensor(%sz) {bufferization.escape = [true, false]} : tensor + return +} + +// ----- + +func.func @escape_attr_non_allocating(%t0: tensor) { + // expected-error @+1{{'bufferization.escape' only valid for allocation results}} + %0 = tensor.extract_slice %t0[0][5][1] {bufferization.escape = [true]} : tensor to tensor<5xf32> + return +} + +// ----- + +func.func @escape_attr_non_bufferizable(%m0: memref) { + // expected-error @+1{{'bufferization.escape' only valid on bufferizable ops}} + %0 = memref.cast %m0 {bufferization.escape = [true]} : memref to memref<10xf32> + return +} \ No newline at end of file diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize-tensor-copy-insertion.mlir @@ -7,8 +7,8 @@ %lb : index, %ub : index, %step : index) -> (tensor, tensor) { - // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor - // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor + // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor + // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor // CHECK: %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[A_copy]], %[[iter2:.*]] = %[[B_copy]]) %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B) -> (tensor, tensor) @@ -28,15 +28,15 @@ %lb : index, %ub : index, %step : index) -> (tensor, tensor) { - // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor - // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor + // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor + // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor // CHECK: %[[for:.*]]:2 = scf.for {{.*}} iter_args(%[[iter1:.*]] = %[[A_copy]], %[[iter2:.*]] = %[[B_copy]]) %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B) -> (tensor, tensor) { // Yield tensors in different order. - // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[iter2]]) {escape = true} : tensor - // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[iter1]]) {escape = true} : tensor + // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[iter2]]) {bufferization.escape = [true]} : tensor + // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[iter1]]) {bufferization.escape = [true]} : tensor // CHECK: scf.yield %[[yield1]], %[[yield2]] scf.yield %tB, %tA : tensor, tensor } @@ -51,8 +51,8 @@ func.func @scf_while(%A: tensor<5xi1>, %B: tensor<5xi1>, %idx: index) -> (tensor<5xi1>, tensor<5xi1>) { - // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor<5xi1> - // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor<5xi1> + // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor<5xi1> + // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor<5xi1> // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[A_copy]], %[[w1:.*]] = %[[B_copy]]) {{.*}} { %r0, %r1 = scf.while (%w0 = %A, %w1 = %B) : (tensor<5xi1>, tensor<5xi1>) -> (tensor<5xi1>, tensor<5xi1>) { @@ -82,24 +82,24 @@ %idx: index) -> (tensor<5xi1>, tensor<5xi1>) { - // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {escape = false} : tensor<5xi1> - // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {escape = false} : tensor<5xi1> + // CHECK: %[[A_copy:.*]] = bufferization.alloc_tensor() copy(%[[A]]) {bufferization.escape = [false]} : tensor<5xi1> + // CHECK: %[[B_copy:.*]] = bufferization.alloc_tensor() copy(%[[B]]) {bufferization.escape = [false]} : tensor<5xi1> // CHECK: %[[loop:.*]]:2 = scf.while (%[[w0:.*]] = %[[A_copy]], %[[w1:.*]] = %[[B_copy]]) {{.*}} { %r0, %r1 = scf.while (%w0 = %A, %w1 = %B) : (tensor<5xi1>, tensor<5xi1>) -> (tensor<5xi1>, tensor<5xi1>) { // CHECK: %[[condition:.*]] = tensor.extract %[[w0]] %condition = tensor.extract %w0[%idx] : tensor<5xi1> // Yield tensors in different order. - // CHECK-DAG: %[[yield0:.*]] = bufferization.alloc_tensor() copy(%[[w1]]) {escape = true} : tensor<5xi1> - // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[w0]]) {escape = true} : tensor<5xi1> + // CHECK-DAG: %[[yield0:.*]] = bufferization.alloc_tensor() copy(%[[w1]]) {bufferization.escape = [true]} : tensor<5xi1> + // CHECK-DAG: %[[yield1:.*]] = bufferization.alloc_tensor() copy(%[[w0]]) {bufferization.escape = [true]} : tensor<5xi1> // CHECK: scf.condition(%[[condition]]) %[[yield0]], %[[yield1]] scf.condition(%condition) %w1, %w0 : tensor<5xi1>, tensor<5xi1> } do { ^bb0(%b0: tensor<5xi1>, %b1: tensor<5xi1>): // CHECK: } do { // CHECK: ^bb0(%[[b0:.*]]: tensor<5xi1>, %[[b1:.*]]: tensor<5xi1>): - // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[b1]]) {escape = true} : tensor<5xi1> - // CHECK-DAG: %[[yield3:.*]] = bufferization.alloc_tensor() copy(%[[b0]]) {escape = true} : tensor<5xi1> + // CHECK-DAG: %[[yield2:.*]] = bufferization.alloc_tensor() copy(%[[b1]]) {bufferization.escape = [true]} : tensor<5xi1> + // CHECK-DAG: %[[yield3:.*]] = bufferization.alloc_tensor() copy(%[[b0]]) {bufferization.escape = [true]} : tensor<5xi1> // CHECK: scf.yield %[[yield2]], %[[yield3]] // CHECK: } scf.yield %b1, %b0 : tensor<5xi1>, tensor<5xi1> @@ -119,7 +119,7 @@ %num_threads = arith.constant 100 : index // CHECK-FUNC-NOT: alloc_tensor - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[arg1]]) {escape = false} : tensor<100xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[arg1]]) {bufferization.escape = [false]} : tensor<100xf32> // CHECK: scf.foreach_thread %result = scf.foreach_thread (%thread_idx) in (%num_threads) -> tensor<100xf32> { // CHECK: tensor.extract_slice diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir --- a/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir +++ b/mlir/test/Dialect/Tensor/one-shot-bufferize-tensor-copy-insertion.mlir @@ -9,8 +9,8 @@ { // CHECK: %[[extract_slice:.*]] = tensor.extract_slice %[[t]][10] [5] [1] %0 = tensor.extract_slice %t[10][5][1] : tensor to tensor<5xf32> - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[extract_slice]]) {escape = false} : tensor<5xf32> - // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {escape = true} : tensor<5xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() copy(%[[extract_slice]]) {bufferization.escape = [false]} : tensor<5xf32> + // CHECK-FUNC: bufferization.alloc_tensor() copy(%{{.*}}) {bufferization.escape = [true]} : tensor<5xf32> // CHECK: %[[insert:.*]] = tensor.insert %{{.*}} into %[[alloc]] %1 = tensor.insert %f into %0[%idx] : tensor<5xf32> // CHECK: return %[[insert]], %[[t]]