diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h @@ -187,7 +187,7 @@ /// Tensor -> MemRef type converter. /// Parameters: Value, memory space, bufferization options using UnknownTypeConverterFn = std::function; + Value, Attribute memorySpace, const BufferizationOptions &)>; BufferizationOptions(); @@ -234,9 +234,9 @@ bool bufferizeFunctionBoundaries = false; /// The default memory space that should be used when it cannot be inferred - /// from the context. If no default memory space is specified, bufferization - /// fails when the memory space cannot be inferred at any point. - Optional defaultMemorySpace = 0; + /// from the context. If case of llvm::None, bufferization fails when the + /// memory space cannot be inferred at any point. + Optional defaultMemorySpace = Attribute(); /// Certain ops have aliasing OpOperand/OpResult invariants (e.g., scf.for). /// If this flag is set to `false`, those invariants are no longer enforced @@ -547,17 +547,19 @@ /// canonicalizations are currently not implemented. BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout = {}, - unsigned memorySpace = 0); + Attribute memorySpace = nullptr); /// Return a MemRef type with fully dynamic layout. If the given tensor type /// is unranked, return an unranked MemRef type. -BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, - unsigned memorySpace = 0); +BaseMemRefType +getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, + Attribute memorySpace = nullptr); /// Return a MemRef type with a static identity layout (i.e., no layout map). If /// the given tensor type is unranked, return an unranked MemRef type. -BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, - unsigned memorySpace = 0); +BaseMemRefType +getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, + Attribute memorySpace = nullptr); /// Return the owner of the given value. In case of a BlockArgument that is the /// owner of the block. In case of an OpResult that is the defining op. diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -81,7 +81,7 @@ let arguments = (ins Variadic:$dynamic_sizes, Optional:$copy, Optional:$size_hint, - OptionalAttr:$memory_space); + OptionalAttr:$memory_space); let results = (outs AnyTensor:$result); diff --git a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp @@ -28,7 +28,7 @@ // TODO: Implement memory space for this op. E.g., by adding a memory_space // attribute to ConstantOp. - if (options.defaultMemorySpace != static_cast(0)) + if (options.defaultMemorySpace != Attribute()) return op->emitError("memory space not implemented yet"); // Only ranked tensors are supported. @@ -188,7 +188,7 @@ return failure(); if (*trueType == *falseType) return *trueType; - if (trueType->getMemorySpaceAsInt() != falseType->getMemorySpaceAsInt()) + if (trueType->getMemorySpace() != falseType->getMemorySpace()) return op->emitError("inconsistent memory space on true/false operands"); // If the buffers have different types, they differ only in their layout @@ -197,7 +197,7 @@ return getMemRefTypeWithFullyDynamicLayout( RankedTensorType::get(memrefType.getShape(), memrefType.getElementType()), - memrefType.getMemorySpaceAsInt()); + memrefType.getMemorySpace()); } BufferRelation bufferRelation(Operation *op, OpResult opResult, diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -114,9 +114,10 @@ FailureOr copyBufferType = getBufferType(tensor, options); if (failed(copyBufferType)) return failure(); - allocTensorOp.setMemorySpaceAttr( - b.getIntegerAttr(b.getIntegerType(64, /*isSigned=*/false), - copyBufferType->getMemorySpaceAsInt())); + Attribute memorySpace = copyBufferType->getMemorySpace(); + if (!memorySpace) + memorySpace = b.getI64IntegerAttr(0); + allocTensorOp.setMemorySpaceAttr(memorySpace); return allocTensorOp.getResult(); } @@ -258,7 +259,7 @@ /// Default unknown type converter: Use a fully dynamic layout map. static BaseMemRefType -defaultUnknownTypeConverter(Value value, unsigned memorySpace, +defaultUnknownTypeConverter(Value value, Attribute memorySpace, const BufferizationOptions &options) { return getMemRefTypeWithFullyDynamicLayout(value.getType().cast(), memorySpace); @@ -731,16 +732,14 @@ BaseMemRefType bufferization::getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout, - unsigned memorySpace) { + Attribute memorySpace) { auto tensorType = value.getType().cast(); - auto memorySpaceAttr = IntegerAttr::get( - IntegerType::get(tensorType.getContext(), 64), memorySpace); // Case 1: Unranked memref type. if (auto unrankedTensorType = tensorType.dyn_cast()) { assert(!layout && "UnrankedTensorType cannot have a layout map"); return UnrankedMemRefType::get(unrankedTensorType.getElementType(), - memorySpaceAttr); + memorySpace); } // Case 2: Ranked memref type with specified layout. @@ -748,7 +747,7 @@ if (layout) { return MemRefType::get(rankedTensorType.getShape(), rankedTensorType.getElementType(), layout, - memorySpaceAttr); + memorySpace); } return options.unknownTypeConverterFn(value, memorySpace, options); @@ -756,7 +755,7 @@ BaseMemRefType bufferization::getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, - unsigned memorySpace) { + Attribute memorySpace) { // Case 1: Unranked memref type. if (auto unrankedTensorType = tensorType.dyn_cast()) { return UnrankedMemRefType::get(unrankedTensorType.getElementType(), @@ -764,8 +763,6 @@ } // Case 2: Ranked memref type. - auto memorySpaceAttr = IntegerAttr::get( - IntegerType::get(tensorType.getContext(), 64), memorySpace); auto rankedTensorType = tensorType.cast(); int64_t dynamicOffset = ShapedType::kDynamic; SmallVector dynamicStrides(rankedTensorType.getRank(), @@ -774,14 +771,14 @@ dynamicOffset, dynamicStrides); return MemRefType::get(rankedTensorType.getShape(), rankedTensorType.getElementType(), stridedLayout, - memorySpaceAttr); + memorySpace); } /// Return a MemRef type with a static identity layout (i.e., no layout map). If /// the given tensor type is unranked, return an unranked MemRef type. BaseMemRefType bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, - unsigned memorySpace) { + Attribute memorySpace) { // Case 1: Unranked memref type. if (auto unrankedTensorType = tensorType.dyn_cast()) { return UnrankedMemRefType::get(unrankedTensorType.getElementType(), @@ -790,12 +787,10 @@ // Case 2: Ranked memref type. auto rankedTensorType = tensorType.cast(); - auto memorySpaceAttr = IntegerAttr::get( - IntegerType::get(tensorType.getContext(), 64), memorySpace); MemRefLayoutAttrInterface layout = {}; return MemRefType::get(rankedTensorType.getShape(), rankedTensorType.getElementType(), layout, - memorySpaceAttr); + memorySpace); } bool bufferization::detail::defaultIsRepetitiveRegion( diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -238,7 +238,7 @@ assert(value == getResult() && "invalid value"); // Compute memory space of this allocation. - unsigned memorySpace; + Attribute memorySpace; if (getMemorySpace().has_value()) { memorySpace = *getMemorySpace(); } else if (getCopy()) { @@ -246,7 +246,7 @@ bufferization::getBufferType(getCopy(), options, fixedTypes); if (failed(copyBufferType)) return failure(); - memorySpace = copyBufferType->getMemorySpaceAsInt(); + memorySpace = copyBufferType->getMemorySpace(); } else if (options.defaultMemorySpace.has_value()) { memorySpace = *options.defaultMemorySpace; } else { diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp @@ -218,7 +218,7 @@ // Configure type converter. LayoutMapOption unknownTypeConversionOption = parseLayoutMapOption(unknownTypeConversion); - opt.unknownTypeConverterFn = [=](Value value, unsigned memorySpace, + opt.unknownTypeConverterFn = [=](Value value, Attribute memorySpace, const BufferizationOptions &options) { auto tensorType = value.getType().cast(); if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap) @@ -507,7 +507,7 @@ options.allowUnknownOps = true; options.createDeallocs = false; options.enforceAliasingInvariants = false; - options.unknownTypeConverterFn = [](Value value, unsigned memorySpace, + options.unknownTypeConverterFn = [](Value value, Attribute memorySpace, const BufferizationOptions &options) { return getMemRefTypeWithStaticIdentityLayout( value.getType().cast(), memorySpace); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -86,7 +86,7 @@ assert(rankedMemrefType && "buffer layout not supported on unranked tensors"); return MemRefType::get( rankedMemrefType.getShape(), rankedMemrefType.getElementType(), - layoutAttr.getValue(), rankedMemrefType.getMemorySpaceAsInt()); + layoutAttr.getValue(), rankedMemrefType.getMemorySpace()); } /// Return the FuncOp called by `callOp`. diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp @@ -294,14 +294,12 @@ return thenBufferType; // Memory space mismatch. - if (thenBufferType.getMemorySpaceAsInt() != - elseBufferType.getMemorySpaceAsInt()) + if (thenBufferType.getMemorySpace() != elseBufferType.getMemorySpace()) return op->emitError("inconsistent memory space on then/else branches"); // Layout maps are different: Promote to fully dynamic layout map. return getMemRefTypeWithFullyDynamicLayout( - opResult.getType().cast(), - thenBufferType.getMemorySpaceAsInt()); + opResult.getType().cast(), thenBufferType.getMemorySpace()); } BufferRelation bufferRelation(Operation *op, OpResult opResult, @@ -445,13 +443,12 @@ auto iterRanked = initArgBufferType->cast(); assert(llvm::equal(yieldedRanked.getShape(), iterRanked.getShape()) && "expected same shape"); - assert(yieldedRanked.getMemorySpaceAsInt() == - iterRanked.getMemorySpaceAsInt() && + assert(yieldedRanked.getMemorySpace() == iterRanked.getMemorySpace() && "expected same memory space"); #endif // NDEBUG return getMemRefTypeWithFullyDynamicLayout( iterArg.getType().cast(), - yieldedRanked.getMemorySpaceAsInt()); + yieldedRanked.getMemorySpace()); } /// Return `true` if the given loop may have 0 iterations. diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp --- a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp +++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp @@ -33,7 +33,7 @@ // should be disallowed. options.allowReturnAllocs = true; options.functionBoundaryTypeConversion = LayoutMapOption::IdentityLayoutMap; - options.unknownTypeConverterFn = [](Value value, unsigned memorySpace, + options.unknownTypeConverterFn = [](Value value, Attribute memorySpace, const BufferizationOptions &options) { return getMemRefTypeWithStaticIdentityLayout( value.getType().cast(), memorySpace); diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -70,9 +70,8 @@ layout = rankedMemRefType.getLayout(); // Compute the new memref type. - Type resultMemRefType = - getMemRefType(castOp.getResult(), options, layout, - sourceMemRefType.getMemorySpaceAsInt()); + Type resultMemRefType = getMemRefType(castOp.getResult(), options, layout, + sourceMemRefType.getMemorySpace()); // Replace the op with a memref.cast. assert(memref::CastOp::areCastCompatible(resultBuffer->getType(), @@ -127,7 +126,7 @@ // If dims cannot be collapsed, this op bufferizes to a new allocation. RankedTensorType tensorResultType = collapseShapeOp.getResultType(); return bufferization::getMemRefTypeWithStaticIdentityLayout( - tensorResultType, srcBufferType.getMemorySpaceAsInt()); + tensorResultType, srcBufferType.getMemorySpace()); } return memref::CollapseShapeOp::computeCollapsedType( @@ -188,7 +187,7 @@ auto memrefType = MemRefType::get(collapseShapeOp.getSrcType().getShape(), collapseShapeOp.getSrcType().getElementType(), - AffineMap(), bufferType.getMemorySpaceAsInt()); + AffineMap(), bufferType.getMemorySpace()); buffer = rewriter.create( op->getLoc(), memrefType, *tensorAlloc); } @@ -436,7 +435,7 @@ fromElementsOp.getResult().cast(), options); // TODO: Implement memory space for this op. - if (options.defaultMemorySpace != static_cast(0)) + if (options.defaultMemorySpace != Attribute()) return op->emitError("memory space not implemented yet"); // Allocate a buffer for the result. @@ -556,7 +555,7 @@ generateOp.getResult().cast(), options); // TODO: Implement memory space for this op. - if (options.defaultMemorySpace != static_cast(0)) + if (options.defaultMemorySpace != Attribute()) return op->emitError("memory space not implemented yet"); // Allocate memory. @@ -951,7 +950,7 @@ return failure(); auto resultMemRefType = getMemRefType( reshapeOp.getResult(), options, /*layout=*/{}, - srcBuffer->getType().cast().getMemorySpaceAsInt()); + srcBuffer->getType().cast().getMemorySpace()); replaceOpWithNewBufferizedOp( rewriter, op, resultMemRefType, *srcBuffer, *shapeBuffer); return success(); diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir @@ -171,7 +171,7 @@ // CHECK-LABEL: func @alloc_tensor_with_memory_space() func.func @alloc_tensor_with_memory_space() -> tensor<5xf32> { // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1> - %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32> + %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32> // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] // CHECK: memref.dealloc %[[alloc]] // CHECK: return %[[r]] diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/tensor-copy-insertion.mlir @@ -40,10 +40,10 @@ { // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<5xf32> // The second alloc_tensor should not have a copy operand. - // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32> + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<5xf32> // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true]} : tensor<5xf32> - // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], memory_space = 0 : ui64} : tensor<5xf32> + // CHECK-NO-DEALLOC: bufferization.alloc_tensor() {bufferization.escape = [true], memory_space = 0 : i64} : tensor<5xf32> %0 = bufferization.alloc_tensor() : tensor<5xf32> %1 = tensor.insert %f into %0[%idx] : tensor<5xf32> return %0, %1 : tensor<5xf32>, tensor<5xf32> @@ -55,7 +55,7 @@ func.func @do_not_copy_when_overwritten(%t: tensor<5xf32>, %f: f32) -> (tensor<5xf32>, tensor<5xf32>) { - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<5xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<5xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<5xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], @@ -74,7 +74,7 @@ -> (tensor<3xf32>) { %0 = tensor.extract_slice %t[0][3][1] : tensor<5xf32> to tensor<3xf32> - // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<3xf32> + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 0 : i64} : tensor<3xf32> // CHECK: linalg.generic {{.*}} outs(%[[alloc]] : tensor<3xf32>) %r = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>], diff --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir --- a/mlir/test/Dialect/Bufferization/invalid.mlir +++ b/mlir/test/Dialect/Bufferization/invalid.mlir @@ -78,12 +78,3 @@ call @foo(%0) : (tensor<20x40xf32, #DCSR>) -> () return } - -// ----- - -func.func @alloc_tensor_invalid_memory_space_attr(%sz: index) { - // expected-error @+1{{'bufferization.alloc_tensor' op attribute 'memory_space' failed to satisfy constraint: 64-bit unsigned integer attribute}} - %0 = bufferization.alloc_tensor(%sz) {memory_space = "foo"} : tensor - return -} - diff --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir --- a/mlir/test/Dialect/Bufferization/ops.mlir +++ b/mlir/test/Dialect/Bufferization/ops.mlir @@ -46,6 +46,8 @@ %c100 = arith.constant 100 : index // CHECK: bufferization.alloc_tensor() size_hint= %6 = bufferization.alloc_tensor() size_hint=%c100 : tensor<100x100xf64, #CSR> + // CHECK: bufferization.alloc_tensor(%{{.+}}) {memory_space = "foo"} : tensor + %7 = bufferization.alloc_tensor(%sz) {memory_space = "foo"} : tensor return %1 : tensor } diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir @@ -717,7 +717,7 @@ { %c0 = arith.constant 0 : index // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32, 1> - %0 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32> + %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32> // CHECK: scf.if %{{.*}} -> (memref<5xf32, 1>) { %1 = scf.if %c -> tensor<5xf32> { // CHECK: %[[cloned:.*]] = bufferization.clone %[[alloc]] @@ -747,7 +747,7 @@ func.func @scf_execute_region_memory_space(%f: f32) -> f32 { %c0 = arith.constant 0 : index %0 = scf.execute_region -> tensor<5xf32> { - %1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<5xf32> + %1 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<5xf32> %2 = tensor.insert %f into %1[%c0] : tensor<5xf32> scf.yield %2 : tensor<5xf32> } @@ -767,8 +767,8 @@ { // CHECK: memref.alloc(%{{.*}}) {{.*}} : memref // CHECK: memref.alloc(%{{.*}}) {{.*}} : memref - %A = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor - %B = bufferization.alloc_tensor(%sz) {memory_space = 1 : ui64} : tensor + %A = bufferization.alloc_tensor(%sz) {memory_space = 1 : i64} : tensor + %B = bufferization.alloc_tensor(%sz) {memory_space = 1 : i64} : tensor // CHECK: scf.for {{.*}} { %r0:2 = scf.for %i = %lb to %ub step %step iter_args(%tA = %A, %tB = %B) diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir @@ -22,7 +22,7 @@ // CHECK-LABEL: func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> { // CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<1024x1024xf64> -// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<1024x1024xf64> +// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<1024x1024xf64> // CHECK: return %[[VAL_1]] : tensor<1024x1024xf64> // CHECK: } func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> { @@ -41,7 +41,7 @@ // CHECK-LABEL: func.func @fold_yield_direct_zero() -> tensor<32xf64> { // CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<32xf64> -// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<32xf64> +// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<32xf64> // CHECK: return %[[VAL_1]] : tensor<32xf64> // CHECK: } func.func @fold_yield_direct_zero() -> tensor<32xf64> { @@ -65,7 +65,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64> // CHECK: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false]} : tensor<8x8xf64> -// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : ui64} : tensor<8x8xf64> +// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<8x8xf64> // CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64> // CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir @@ -261,7 +261,7 @@ // CHECK: %[[alloc_tensor:.*]] = memref.alloc{{.*}} : memref // CHECK: memref.copy %[[t]], %[[alloc_tensor]] %0 = bufferization.alloc_tensor() copy(%t) - {memory_space = 3 : ui64} : tensor + {memory_space = 3 : i64} : tensor // CHECK: %[[padded_alloc:.*]] = memref.alloc() {{.*}} : memref<15xf32, 3> // CHECK: linalg.map // CHECK: outs(%[[padded_alloc]] : memref<15xf32, 3>)