diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h --- a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h @@ -57,6 +57,15 @@ LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter, ToMemrefOp toMemref); +/// Return the memory space of the given OpResult as indicated by the optional +/// `bufferization.memory_space` attribute. Return `None` if no memory space is +/// specified. +Optional getMemorySpaceAttr(OpResult result); + +/// Set the memory space of the given OpResult as indicated via the +/// `bufferization.memory_space` attribute. +void setMemorySpaceAttr(OpResult result, Optional value); + } // namespace bufferization } // namespace mlir diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationBase.td @@ -49,6 +49,17 @@ /// allocation (as per BufferizableOpInterface) may have this attribute. constexpr const static ::llvm::StringLiteral kEscapeAttrName = "bufferization.escape"; + + /// Attribute name used to indicate the memory space of buffer allocations. + /// + /// Note: Only ops that bufferize to an allocation may have this attribute. + constexpr const static ::llvm::StringLiteral + kMemorySpaceAttrName = "bufferization.memory_space"; + + /// Attribute value used to indicate that no memory space is specified for + /// the corresponding OpResult. + constexpr const static int64_t + kNoMemorySpaceSpecifiedAttrValue = -1; }]; let hasOperationAttrVerify = 1; } diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -46,10 +46,10 @@ conflicts that would have been introduced by the in-place bufferization of another op. - The optional `memory_space` attribute specifies the memory space when - bufferizing this op. The memory space is inferred from `copy` if specified. - If neither `copy` nor `memory_space` is specified, the default memory space - is used during bufferization. + The optional `bufferization.memory_space` attribute specifies the memory + space when bufferizing this op. The memory space is inferred from `copy` if + there is no `memory_space` attribute. If neither `copy` nor `memory_space` + is specified, the default memory space is used during bufferization. Both dense and sparse tensor types are supported. The result of a `bufferization.alloc_tensor` is a tensor value that can be used like any @@ -69,8 +69,7 @@ }]; let arguments = (ins Variadic:$dynamic_sizes, - Optional:$copy, - OptionalAttr:$memory_space); + Optional:$copy); let results = (outs AnyTensor:$result); @@ -129,12 +128,8 @@ }]; let builders = [ - // Build an op without `copy` or `memory_space`. + // Build an op without `copy`. OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes)>, - - // Build an op without `memory_space`. - OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes, - "Value":$copy)>, ]; let hasCanonicalizer = 1; diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -114,9 +114,8 @@ FailureOr copyBufferType = getBufferType(tensor, options); if (failed(copyBufferType)) return failure(); - allocTensorOp.setMemorySpaceAttr( - b.getIntegerAttr(b.getIntegerType(64, /*isSigned=*/false), - copyBufferType->getMemorySpaceAsInt())); + setMemorySpaceAttr(allocTensorOp->getOpResult(0), + copyBufferType->getMemorySpaceAsInt()); return allocTensorOp.getResult(); } diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationDialect.cpp @@ -31,6 +31,14 @@ /// Attribute name used to mark escaping behavior of buffer allocations. constexpr const ::llvm::StringLiteral BufferizationDialect::kEscapeAttrName; +/// Attribute name used to indicate the memory space of buffer allocations. +constexpr const ::llvm::StringLiteral + BufferizationDialect::kMemorySpaceAttrName; + +/// Attribute value used to indicate that no memory space is specified for the +/// corresponding OpResult. +constexpr const int64_t BufferizationDialect::kNoMemorySpaceSpecifiedAttrValue; + //===----------------------------------------------------------------------===// // Bufferization Dialect Interfaces //===----------------------------------------------------------------------===// @@ -115,6 +123,38 @@ } return success(); } + if (attr.getName() == kMemorySpaceAttrName) { + auto arrayAttr = attr.getValue().dyn_cast(); + if (!arrayAttr) + return op->emitError() << "'" << kMemorySpaceAttrName + << "' is expected to be an int array attribute"; + if (arrayAttr.size() != op->getNumResults()) + return op->emitError() + << "'" << kMemorySpaceAttrName + << "' has wrong number of elements, expected " + << op->getNumResults() << ", got " << arrayAttr.size(); + auto bufferizableOp = dyn_cast(op); + if (!bufferizableOp) + return op->emitError() << "'" << kMemorySpaceAttrName + << "' only valid on bufferizable ops"; + for (const auto &it : llvm::enumerate(arrayAttr)) { + auto attr = it.value(); + auto intAttr = attr.dyn_cast(); + if (!intAttr) + return op->emitError() << "'" << kMemorySpaceAttrName + << "' is expected to be an int array attribute"; + if (intAttr.getValue() == + BufferizationDialect::kNoMemorySpaceSpecifiedAttrValue) + continue; + if (!op->getResult(it.index()).getType().isa()) + return op->emitError() << "'" << kMemorySpaceAttrName + << "' only valid for tensor results"; + if (!bufferizableOp.bufferizesToAllocation(op->getOpResult(it.index()))) + return op->emitError() << "'" << kMemorySpaceAttrName + << "' only valid for allocation results"; + } + return success(); + } return op->emitError() << "attribute '" << attr.getName() << "' not supported by the bufferization dialect"; diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -145,6 +145,40 @@ } } +Optional mlir::bufferization::getMemorySpaceAttr(OpResult result) { + Operation *op = result.getDefiningOp(); + if (!op->hasAttr(BufferizationDialect::kMemorySpaceAttrName)) + return None; + auto arrayAttr = + op->getAttr(BufferizationDialect::kMemorySpaceAttrName).cast(); + int64_t memorySpace = + arrayAttr[result.getResultNumber()].cast().getInt(); + if (memorySpace == BufferizationDialect::kNoMemorySpaceSpecifiedAttrValue) + return None; + return static_cast(memorySpace); +} + +void mlir::bufferization::setMemorySpaceAttr(OpResult result, + Optional value) { + Operation *op = result.getDefiningOp(); + assert(cast(op).bufferizesToAllocation(result) && + "expected that OpResult bufferizes to an allocation"); + SmallVector arrayValues( + op->getNumResults(), + BufferizationDialect::kNoMemorySpaceSpecifiedAttrValue); + if (op->hasAttr(BufferizationDialect::kMemorySpaceAttrName)) + arrayValues = llvm::to_vector(llvm::map_range( + op->getAttr(BufferizationDialect::kMemorySpaceAttrName) + .cast(), + [](Attribute a) { return a.cast().getInt(); })); + arrayValues[result.getResultNumber()] = + value.has_value() + ? value.value() + : BufferizationDialect::kNoMemorySpaceSpecifiedAttrValue; + op->setAttr(BufferizationDialect::kMemorySpaceAttrName, + OpBuilder(op->getContext()).getI64ArrayAttr(arrayValues)); +} + //===----------------------------------------------------------------------===// // AllocTensorOp //===----------------------------------------------------------------------===// @@ -240,8 +274,9 @@ // Compute memory space of this allocation. unsigned memorySpace; - if (getMemorySpace().has_value()) { - memorySpace = *getMemorySpace(); + if (auto maybeMemorySpace = + getMemorySpaceAttr(getOperation()->getOpResult(0))) { + memorySpace = *maybeMemorySpace; } else if (getCopy()) { auto copyBufferType = bufferization::getBufferType(getCopy(), options, fixedTypes); @@ -281,15 +316,7 @@ void AllocTensorOp::build(OpBuilder &builder, OperationState &result, RankedTensorType type, ValueRange dynamicSizes) { - build(builder, result, type, dynamicSizes, /*copy=*/Value(), - /*memory_space=*/IntegerAttr()); -} - -void AllocTensorOp::build(OpBuilder &builder, OperationState &result, - RankedTensorType type, ValueRange dynamicSizes, - Value copy) { - build(builder, result, type, dynamicSizes, copy, - /*memory_space=*/IntegerAttr()); + build(builder, result, type, dynamicSizes, /*copy=*/Value()); } namespace { @@ -331,6 +358,8 @@ return failure(); auto newOp = rewriter.create( op.getLoc(), newType, newDynamicSizes, /*copy=*/Value()); + if (auto maybeMemorySpace = getMemorySpaceAttr(op->getOpResult(0))) + setMemorySpaceAttr(newOp->getOpResult(0), *maybeMemorySpace); rewriter.replaceOpWithNewOp(op, op.getType(), newOp); return success(); } diff --git a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/TensorCopyInsertion.cpp @@ -87,7 +87,7 @@ auto tensorCopy = rewriter.create( bufferizableOp->getLoc(), operand.getType().cast(), /*dynamicSizes=*/ValueRange(), - /*copy=*/operand, /*memory_space=*/IntegerAttr()); + /*copy=*/operand); for (OpOperand *use : usesInsideRegion) use->set(tensorCopy); } diff --git a/mlir/test/Dialect/Arith/one-shot-bufferize-memory-space-invalid.mlir b/mlir/test/Dialect/Arith/one-shot-bufferize-memory-space-invalid.mlir --- a/mlir/test/Dialect/Arith/one-shot-bufferize-memory-space-invalid.mlir +++ b/mlir/test/Dialect/Arith/one-shot-bufferize-memory-space-invalid.mlir @@ -3,8 +3,8 @@ func.func @inconsistent_memory_space_arith_select(%c: i1) -> tensor<10xf32> { // Selecting tensors with different memory spaces. Such IR cannot be // bufferized. - %0 = bufferization.alloc_tensor() {memory_space = 0 : ui64} : tensor<10xf32> - %1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<10xf32> + %0 = bufferization.alloc_tensor() {bufferization.memory_space = [0]} : tensor<10xf32> + %1 = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<10xf32> // expected-error @+2 {{inconsistent memory space on true/false operands}} // expected-error @+1 {{failed to bufferize op}} %r = arith.select %c, %0, %1 : tensor<10xf32> diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir @@ -171,7 +171,7 @@ // CHECK-LABEL: func @alloc_tensor_with_memory_space() func.func @alloc_tensor_with_memory_space() -> tensor<5xf32> { // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1> - %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32> + %0 = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<5xf32> // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] // CHECK: memref.dealloc %[[alloc]] // CHECK: return %[[r]] diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion-memory-space.mlir @@ -17,9 +17,9 @@ func.func @alloc_tensor_copy() -> (tensor<10xf32>, tensor<10xf32>) { %c0 = arith.constant 0 : index %cst = arith.constant 0.0 : f32 - // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 1 : ui64} : tensor<10xf32> - %t = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<10xf32> - // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 1 : ui64} : tensor<10xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [1]} : tensor<10xf32> + %t = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<10xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [1]} : tensor<10xf32> %s = tensor.insert %cst into %t[%c0] : tensor<10xf32> return %s, %t : tensor<10xf32>, tensor<10xf32> } diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir @@ -40,10 +40,10 @@ { // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> // The second alloc_tensor should not have a copy operand. - // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<5xf32> // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> - // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], memory_space = 0 : ui64} : tensor<5xf32> + // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], bufferization.memory_space = [0]} : tensor<5xf32> %0 = bufferization.alloc_tensor() : tensor<5xf32> %1 = tensor.insert %f into %0[%idx] : tensor<5xf32> return %0, %1 : tensor<5xf32>, tensor<5xf32> @@ -55,7 +55,7 @@ func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32) -> (tensor<5xf32>, tensor<5xf32>) { - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<5xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], @@ -74,7 +74,7 @@ -> (tensor<3xf32>) { %0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32> - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<3xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<3xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], diff --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir --- a/mlir/test/Dialect/Bufferization/invalid.mlir +++ b/mlir/test/Dialect/Bufferization/invalid.mlir @@ -81,9 +81,24 @@ // ----- -func.func @alloc_tensor_invalid_memory_space_attr(%sz: index) { - // expected-error @+1{{'bufferization.alloc_tensor' op attribute 'memory_space' failed to satisfy constraint: 64-bit unsigned integer attribute}} - %0 = bufferization.alloc_tensor(%sz) {memory_space = "foo"} : tensor +func.func @invalid_memory_space_attr_type(%sz: index) { + // expected-error @+1{{'bufferization.memory_space' is expected to be an int array attribute}} + %0 = bufferization.alloc_tensor(%sz) {bufferization.memory_space = "foo"} : tensor return } +// ----- + +func.func @invalid_memory_space_attr_size(%sz: index) { + // expected-error @+1{{'bufferization.memory_space' has wrong number of elements, expected 1, got 2}} + %0 = bufferization.alloc_tensor(%sz) {bufferization.memory_space = [1, 2]} : tensor + return +} + +// ----- + +func.func @invalid_memory_space_attr_op(%t: tensor, %pos: index) { + // expected-error @+1{{'bufferization.memory_space' only valid for allocation results}} + tensor.extract_slice %t[%pos][5][1] {bufferization.memory_space = [1]} : tensor to tensor<5xf32> + return +} diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize-invalid.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-invalid.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize-invalid.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize-invalid.mlir @@ -3,8 +3,8 @@ func.func @inconsistent_memory_space_scf_if(%c: i1) -> tensor<10xf32> { // Yielding tensors with different memory spaces. Such IR cannot be // bufferized. - %0 = bufferization.alloc_tensor() {memory_space = 0 : ui64} : tensor<10xf32> - %1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<10xf32> + %0 = bufferization.alloc_tensor() {bufferization.memory_space = [0]} : tensor<10xf32> + %1 = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<10xf32> // expected-error @+1 {{inconsistent memory space on then/else branches}} %r = scf.if %c -> tensor<10xf32> { // expected-error @+1 {{failed to bufferize op}} diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir @@ -691,7 +691,7 @@ { %c0 = arith.constant 0 : index // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1> - %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32> + %0 = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<5xf32> // CHECK: scf.if %{{.*}} -> (memref<5xf32, 1>) { %1 = scf.if %c -> tensor<5xf32> { // CHECK: %[[cloned:.*]] = bufferization.clone %[[alloc]] @@ -721,7 +721,7 @@ func.func @scf_execute_region_memory_space(%f: f32) -> f32 { %c0 = arith.constant 0 : index %0 = scf.execute_region -> tensor<5xf32> { - %1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32> + %1 = bufferization.alloc_tensor() {bufferization.memory_space = [1]} : tensor<5xf32> %2 = tensor.insert %f into %1[%c0] : tensor<5xf32> scf.yield %2 : tensor<5xf32> } @@ -741,8 +741,8 @@ { // CHECK: memref.alloc(%{{.*}}) {{.*}} : memref // CHECK: memref.alloc(%{{.*}}) {{.*}} : memref - %A = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor - %B = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor + %A = bufferization.alloc_tensor(%sz) {bufferization.memory_space = [1]} : tensor + %B = bufferization.alloc_tensor(%sz) {bufferization.memory_space = [1]} : tensor // CHECK: scf.for {{.*}} { %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B) diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir @@ -22,7 +22,7 @@ // CHECK-LABEL: func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> { // CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<1024x1024xf64> -// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<1024x1024xf64> +// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<1024x1024xf64> // CHECK: return %[[VAL_1]] : tensor<1024x1024xf64> // CHECK: } func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> { @@ -41,7 +41,7 @@ // CHECK-LABEL: func.func @fold_yield_direct_zero() -> tensor<32xf64> { // CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<32xf64> -// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<32xf64> +// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<32xf64> // CHECK: return %[[VAL_1]] : tensor<32xf64> // CHECK: } func.func @fold_yield_direct_zero() -> tensor<32xf64> { @@ -65,7 +65,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64> // CHECK: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false]} : tensor<8x8xf64> -// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<8x8xf64> +// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], bufferization.memory_space = [0]} : tensor<8x8xf64> // CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64> // CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref