diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h @@ -490,6 +490,20 @@ FailureOr getBufferType(Value value, const BufferizationOptions &options); +/// Try to infer the memory space from the given tensor value. +FailureOr inferMemorySpace(Value value, + const BufferizationOptions &options); + +/// Try to infer the memory space from the given tensor value. If no memory +/// space could be inferred, take the default memory space from `options`. If no +/// default memory space is specified, return `failure`. +FailureOr +inferOrDefaultMemorySpace(Value value, const BufferizationOptions &options); + +/// Try to infer the memory space from the given tensor value. +FailureOr inferMemorySpace(OpResult opResult, + const AnalysisState &state); + /// Replace an op with replacement values. The op is deleted. Tensor OpResults /// must be replaced with memref values. void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td @@ -264,6 +264,46 @@ return failure(); }] >, + InterfaceMethod< + /*desc=*/[{ + Try to infer the memory space of the given value after bufferization. + The value must belong to this op, i.e., it is an OpResult of this op + or a BlockArgument of a block of this op. + + Ops that bufferize to an allocation allow users to specify a memory + space directly on the op. Such ops should return that memory space if + specified. + + By default, this method follows the reverse SSA use-def chains, until + a value is found for which a memory space can be inferred. Only values + that are guaranteed to bufferize to equivalent buffers are considered + when following the chains. Following all potentially aliasing + OpOperands is not safe because there could be false positives, i.e., + OpOperands that are not actually aliasing with the given OpResult at + runtime; aliasing is a may-be property in BufferizableOpInterface, + equivalence is a must-be property. + + Ops that do not necessarily bufferize to equivalent buffers but can + provide memory space guarantees with respect to their OpResults or + BlockArguments should override this method for better memory space + inference. + }], + /*retType=*/"FailureOr", + /*methodName=*/"inferMemorySpace", + /*args=*/(ins "Value":$value, "const AnalysisState &":$state), + /*methodBody=*/"", + /*defaultImplementation=*/[{ + if (auto bbArg = value.dyn_cast()) { + assert(bbArg.getOwner()->getParentOp() == $_op.getOperation() && + "block argument must belong to this op"); + return failure(); + } + OpResult opResult = value.cast(); + assert(opResult.getDefiningOp() == $_op.getOperation() && + "op result must belong to this op"); + return bufferization::inferMemorySpace(opResult, state); + }] + >, InterfaceMethod< /*desc=*/[{ Return `true` if the given Value can be written to in-place. Value is diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -81,6 +81,9 @@ SmallVector getAliasingOpResult( OpOperand &opOperand, const AnalysisState &state); + FailureOr inferMemorySpace(Value value, + const AnalysisState &state); + RankedTensorType getType() { return getResult().getType().cast(); } @@ -242,6 +245,11 @@ return success(); } + FailureOr inferMemorySpace(Value value, + const AnalysisState &state) { + return getMemref().getType().cast().getMemorySpaceAsInt(); + } + bool isWritable(Value value, const AnalysisState &state) const { // It is unknown whether the memref operand is writable or not. return false; diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td @@ -347,6 +347,10 @@ "Bufferize function boundaries (experimental).">, Option<"createDeallocs", "create-deallocs", "bool", /*default=*/"true", "Specify if new allocations should be deallocated.">, + Option<"mustInferMemorySpace", "must-infer-memory-space", "bool", + /*default=*/"false", + "The memory space of an memref types must always be inferred. If " + "unset, a default memory space of 0 is used otherwise.">, ]; let constructor = "mlir::bufferization::createTensorCopyInsertionPass()"; } diff --git a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp @@ -133,6 +133,26 @@ &op->getOpOperand(2) /*false_value*/}; } + FailureOr inferMemorySpace(Operation *op, Value value, + const AnalysisState &state) const { + const BufferizationOptions &options = state.getOptions(); + auto selectOp = cast(op); +#ifndef NDEBUG + OpResult opResult = value.cast(); + assert(opResult.getOwner() == op && "value must be an OpResult of op"); +#endif // NDEBUG + + FailureOr trueMemorySpace = + bufferization::inferMemorySpace(selectOp.getTrueValue(), options); + FailureOr falseMemorySpace = + bufferization::inferMemorySpace(selectOp.getFalseValue(), options); + + if (failed(trueMemorySpace) || failed(falseMemorySpace) || + *trueMemorySpace != *falseMemorySpace) + return failure(); + return *trueMemorySpace; + } + LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options) const { auto selectOp = cast(op); @@ -151,6 +171,15 @@ Value trueBuffer = *maybeTrueBuffer; Value falseBuffer = *maybeFalseBuffer; + // Infer memory space. + // Note: We try to infer the memory space here instead of just taking the + // memory space of either true/false operand, so that a better error message + // is produced. + FailureOr memorySpace = + inferOrDefaultMemorySpace(selectOp.getResult(), options); + if (failed(memorySpace)) + return selectOp->emitError("could not infer memory space"); + // The "true" and the "false" operands must have the same type. If the // buffers have different types, they differ only in their layout map. Cast // both of them to the most dynamic MemRef type. @@ -163,7 +192,7 @@ dynamicStrides, dynamicOffset, op->getContext()); auto castedType = MemRefType::get(trueType.getShape(), trueType.getElementType(), - stridedLayout, trueType.getMemorySpaceAsInt()); + stridedLayout, *memorySpace); trueBuffer = rewriter.create(loc, castedType, trueBuffer); falseBuffer = rewriter.create(loc, castedType, falseBuffer); diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -84,10 +84,28 @@ populateDynamicDimSizes(b, loc, tensor, dynamicSizes); } + // Create AllocTensorOp. auto allocTensorOp = b.create(loc, tensorType, dynamicSizes, copy ? tensor : Value()); allocTensorOp->setAttr(BufferizationDialect::kEscapeAttrName, b.getBoolArrayAttr({escape})); + + // Add 'memory_space' attribute. Not needed if 'copy' operand is specified. + if (copy) + return allocTensorOp.getResult(); + FailureOr maybeMemorySpace = + inferOrDefaultMemorySpace(tensor, options); + if (failed(maybeMemorySpace)) { + if (OpResult opResult = tensor.dyn_cast()) + return opResult.getDefiningOp()->emitError( + "could not infer memory space"); + return tensor.cast() + .getParentBlock() + ->getParentOp() + ->emitError("could not infer memory space"); + } + allocTensorOp->setAttr(BufferizationDialect::kMemorySpaceAttrName, + b.getI64ArrayAttr({*maybeMemorySpace})); return allocTensorOp.getResult(); } @@ -486,20 +504,24 @@ FailureOr bufferization::getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options) { +#ifndef NDEBUG auto tensorType = value.getType().dyn_cast(); assert(tensorType && "unexpected non-tensor type"); +#endif // NDEBUG // Replace "%t = to_tensor %m" with %m. if (auto toTensorOp = value.getDefiningOp()) return toTensorOp.getMemref(); // Insert to_memref op. + FailureOr memrefType = getBufferType(value, options); + if (failed(memrefType)) + return failure(); + ensureToMemrefOpIsValid(value, *memrefType); OpBuilder::InsertionGuard g(rewriter); setInsertionPointAfter(rewriter, value); - Type memrefType = getMemRefType(tensorType, options); - ensureToMemrefOpIsValid(value, memrefType); return rewriter - .create(value.getLoc(), memrefType, value) + .create(value.getLoc(), *memrefType, value) .getResult(); } @@ -512,7 +534,73 @@ if (auto toTensorOp = value.getDefiningOp()) return toTensorOp.getMemref().getType().cast(); - return getMemRefType(tensorType, options); + FailureOr memorySpace = inferOrDefaultMemorySpace(value, options); + if (failed(memorySpace)) { + Operation *op = + value.isa() + ? value.getDefiningOp() + : value.cast().getParentBlock()->getParentOp(); + return op->emitError("could not infer memory space"); + } + return getMemRefType(tensorType, options, /*layout=*/None, *memorySpace); +} + +FailureOr +bufferization::inferMemorySpace(Value value, + const BufferizationOptions &options) { + auto bufferizableOp = options.dynCastBufferizableOp(value); + if (!bufferizableOp) + return failure(); + + AnalysisState state(options); + return bufferizableOp.inferMemorySpace(value, state); +} + +FailureOr +bufferization::inferOrDefaultMemorySpace(Value value, + const BufferizationOptions &options) { + FailureOr inferred = inferMemorySpace(value, options); + if (succeeded(inferred)) + return inferred; + + if (options.defaultMemorySpace.hasValue()) + return *options.defaultMemorySpace; + + return failure(); +} + +// Follow reverse SSA use-def chains (only taking into account equivalent +// tensors), until an op is found for which the memory space is known. +FailureOr +bufferization::inferMemorySpace(OpResult opResult, const AnalysisState &state) { + auto bufferizableOp = + state.getOptions().dynCastBufferizableOp(opResult.getDefiningOp()); + if (!bufferizableOp) + return failure(); + + // Query aliasing tensor operands. + SmallVector alasingOpOperands = + bufferizableOp.getAliasingOpOperand(opResult, state); + if (alasingOpOperands.empty()) + return failure(); + // Only consider equivalent buffers. "Aliasing" is a may-be property in + // the BufferizableOpInterface and we cannot have any false negatives here. + if (bufferizableOp.bufferRelation(opResult, state) != + BufferRelation::Equivalent) + return failure(); + + for (OpOperand *opOperand : alasingOpOperands) { + Value value = opOperand->get(); + auto nextBufferizableOp = state.getOptions().dynCastBufferizableOp(value); + if (!nextBufferizableOp) + continue; + FailureOr nextResult = + nextBufferizableOp.inferMemorySpace(value, state); + if (succeeded(nextResult)) + return *nextResult; + } + + return failure(); } void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter, diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -171,23 +171,15 @@ } // Compute memory space of this allocation. - unsigned memorySpace; - if (op->hasAttr(BufferizationDialect::kMemorySpaceAttrName)) { - memorySpace = op->getAttrOfType( - BufferizationDialect::kMemorySpaceAttrName)[0] - .cast() - .getValue() - .getZExtValue(); - } else if (options.defaultMemorySpace.hasValue()) { - memorySpace = *options.defaultMemorySpace; - } else { - return op->emitError("could not infer memory space"); - } + FailureOr inferredMemSpace = + inferOrDefaultMemorySpace(getResult(), options); + if (failed(inferredMemSpace)) + return emitError("could not infer memory space"); // Create memory allocation. auto allocType = MemRefType::get(getType().getShape(), getType().getElementType(), - AffineMap(), memorySpace); + AffineMap(), *inferredMemSpace); SmallVector dynamicDims = getDynamicSizes(); if (getCopy()) { assert(dynamicDims.empty() && "expected either `copy` or `dynamicDims`"); @@ -205,7 +197,6 @@ } // Should the buffer be deallocated? - AnalysisState analysisState(options); bool dealloc; if (op->hasAttr(BufferizationDialect::kEscapeAttrName)) { // AllocTensorOp has one result. @@ -216,6 +207,7 @@ // No "escape" annotation found. if (options.createDeallocs) { // Perform an ad-hoc analysis. + AnalysisState analysisState(options); dealloc = !analysisState.isTensorYielded(getResult()); } else { dealloc = false; @@ -235,6 +227,29 @@ return success(); } +FailureOr +AllocTensorOp::inferMemorySpace(Value value, const AnalysisState &state) { + Operation *op = getOperation(); + if (op->hasAttr(BufferizationDialect::kMemorySpaceAttrName)) + // Memory space was specified as an attribute. + return op + ->getAttrOfType( + BufferizationDialect::kMemorySpaceAttrName)[0] + .cast() + .getValue() + .getZExtValue(); + if (getCopy()) { + // Memory space can be inferred from the "copy" operand. But only if the op + // is bufferizable. + auto bufferizableOp = state.getOptions().dynCastBufferizableOp(getCopy()); + if (!bufferizableOp) + return failure(); + return bufferizableOp.inferMemorySpace(getCopy(), state); + } + + return failure(); +} + bool AllocTensorOp::isMemoryWrite(OpResult opResult, const AnalysisState &state) { // AllocTensorOps do not write unless they have a `copy` value. diff --git a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp @@ -104,6 +104,8 @@ options.allowReturnAllocs = allowReturnAllocs; options.bufferizeFunctionBoundaries = bufferizeFunctionBoundaries; options.createDeallocs = createDeallocs; + if (mustInferMemorySpace) + options.defaultMemorySpace = None; if (failed(insertTensorCopies(getOperation(), options))) signalPassFailure(); } diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -275,6 +275,16 @@ return BufferRelation::None; } + FailureOr inferMemorySpace(Operation *op, Value value, + const AnalysisState &state) const { + auto extractSliceOp = cast(op); + FailureOr bufferType = + getBufferType(extractSliceOp.source(), state.getOptions()); + if (failed(bufferType)) + return failure(); + return bufferType->getMemorySpaceAsInt(); + } + LogicalResult bufferize(Operation *op, RewriterBase &rewriter, const BufferizationOptions &options) const { auto extractSliceOp = cast(op); @@ -792,7 +802,9 @@ if (failed(srcBuffer) || failed(shapeBuffer)) return failure(); auto resultTensorType = reshapeOp.getResult().getType().cast(); - auto resultMemRefType = getMemRefType(resultTensorType, options); + auto resultMemRefType = getMemRefType( + resultTensorType, options, /*layout=*/None, + srcBuffer->getType().cast().getMemorySpaceAsInt()); replaceOpWithNewBufferizedOp( rewriter, op, resultMemRefType, *srcBuffer, *shapeBuffer); return success(); diff --git a/mlir/test/Dialect/Arithmetic/one-shot-bufferize-memory-space-invalid.mlir b/mlir/test/Dialect/Arithmetic/one-shot-bufferize-memory-space-invalid.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Arithmetic/one-shot-bufferize-memory-space-invalid.mlir @@ -0,0 +1,12 @@ +// RUN: mlir-opt %s -one-shot-bufferize="must-infer-memory-space" -split-input-file -verify-diagnostics + +func.func @inconsistent_memory_space_arith_select(%c: i1) -> tensor<10xf32> { + // Selecting tensors with different memory spaces. Such IR cannot be + // bufferized. + %0 = bufferization.alloc_tensor() {bufferization.memory_space = [0]} : tensor<10xf32> + %1 = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<10xf32> + // expected-error @+2 {{could not infer memory space}} + // expected-error @+1 {{failed to bufferize op}} + %r = arith.select %c, %0, %1 : tensor<10xf32> + func.return %r : tensor<10xf32> +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-memory-space-invalid.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-memory-space-invalid.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-memory-space-invalid.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-memory-space-invalid.mlir @@ -6,3 +6,14 @@ %0 = bufferization.alloc_tensor() : tensor<10xf32> return %0 : tensor<10xf32> } + +// ----- + +func.func @memory_space_of_unknown_op() -> f32 { + %c0 = arith.constant 0 : index + // expected-error @+1 {{could not infer memory space}} + %t = "test.dummy_op"() : () -> (tensor<10xf32>) + // expected-error @+1 {{failed to bufferize op}} + %s = tensor.extract %t[%c0] : tensor<10xf32> + return %s : f32 +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space-invalid.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space-invalid.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space-invalid.mlir @@ -0,0 +1,12 @@ +// RUN: mlir-opt %s -tensor-copy-insertion="must-infer-memory-space" -split-input-file -verify-diagnostics + +// An alloc is inserted but the copy is emitted. Therefore, the memory space +// should be specified on the alloc_tensor op. +func.func @memory_space_of_unknown_op() -> (tensor<10xf32>, tensor<10xf32>) { + %c0 = arith.constant 0 : index + %cst = arith.constant 0.0 : f32 + // expected-error @+1 {{could not infer memory space}} + %t = bufferization.alloc_tensor() : tensor<10xf32> + %s = tensor.insert %cst into %t[%c0] : tensor<10xf32> + return %s, %t : tensor<10xf32>, tensor<10xf32> +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space.mlir @@ -0,0 +1,25 @@ +// RUN: mlir-opt %s -tensor-copy-insertion="must-infer-memory-space" -split-input-file | FileCheck %s + +// CHECK-LABEL: func @unknown_op_copy +func.func @unknown_op_copy() -> (tensor<10xf32>, tensor<10xf32>) { + %c0 = arith.constant 0 : index + %cst = arith.constant 0.0 : f32 + // CHECK: %[[dummy:.*]] = "test.dummy_op"() : () -> tensor<10xf32> + %t = "test.dummy_op"() : () -> tensor<10xf32> + // CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[dummy]]) {bufferization.escape = [false]} : tensor<10xf32> + %s = tensor.insert %cst into %t[%c0] : tensor<10xf32> + return %s, %t : tensor<10xf32>, tensor<10xf32> +} + +// ----- + +// CHECK-LABEL: func @alloc_tensor_copy +func.func @alloc_tensor_copy() -> (tensor<10xf32>, tensor<10xf32>) { + %c0 = arith.constant 0 : index + %cst = arith.constant 0.0 : f32 + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [1]} : tensor<10xf32> + %t = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<10xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [1]} : tensor<10xf32> + %s = tensor.insert %cst into %t[%c0] : tensor<10xf32> + return %s, %t : tensor<10xf32>, tensor<10xf32> +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir @@ -40,10 +40,10 @@ { // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> // The second alloc_tensor should not have a copy operand. - // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<5xf32> // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> - // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> + // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], bufferization.memory_space = [0]} : tensor<5xf32> %0 = bufferization.alloc_tensor() : tensor<5xf32> %1 = tensor.insert %f into %0[%idx] : tensor<5xf32> return %0, %1 : tensor<5xf32>, tensor<5xf32> @@ -55,7 +55,7 @@ func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32) -> (tensor<5xf32>, tensor<5xf32>) { - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<5xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], @@ -74,7 +74,7 @@ -> (tensor<3xf32>) { %0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32> - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<3xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<3xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>],