diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -51,6 +51,11 @@ If neither `copy` nor `memory_space` is specified, the default memory space is used during bufferization. + The optional `size_hint` operand specifies the number of non-zero elements + for sparse tensors. The value of `size_hint` should be not less than 1 and + not larger than the linear size of the corresponding dense tensor type. If + this requirement is not met, the behavior of the operator is not defined. + Both dense and sparse tensor types are supported. The result of a `bufferization.alloc_tensor` is a tensor value that can be used like any other tensor value. In practice, it is often used as the "out" operand of @@ -66,10 +71,16 @@ outs(%c: tensor) -> tensor return %0 : tensor ``` + + ```mlir + %c = bufferization.alloc_tensor(%d1, %d2) size_hint = %noe + : tensor + ``` }]; let arguments = (ins Variadic:$dynamic_sizes, Optional:$copy, + Optional:$size_hint, OptionalAttr:$memory_space); let results = (outs AnyTensor:$result); @@ -129,12 +140,16 @@ }]; let builders = [ - // Build an op without `copy` or `memory_space`. + // Build an op without `copy` or `memory_space` or `size_hint`. OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes)>, - // Build an op without `memory_space`. + // Build an op without `memory_space` or `size_hint`. OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes, "Value":$copy)>, + + // Build an op without `size_hint`. + OpBuilder<(ins "TensorType":$type, "ValueRange":$dynamicSizes, + "Value":$copy, "IntegerAttr":$memory_space)>, ]; let hasCanonicalizer = 1; diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -282,16 +282,24 @@ void AllocTensorOp::build(OpBuilder &builder, OperationState &result, RankedTensorType type, ValueRange dynamicSizes) { build(builder, result, type, dynamicSizes, /*copy=*/Value(), + /*size_hint=*/Value(), /*memory_space=*/IntegerAttr()); } void AllocTensorOp::build(OpBuilder &builder, OperationState &result, RankedTensorType type, ValueRange dynamicSizes, Value copy) { - build(builder, result, type, dynamicSizes, copy, + build(builder, result, type, dynamicSizes, copy, /*size_hint=*/Value(), /*memory_space=*/IntegerAttr()); } +void AllocTensorOp::build(OpBuilder &builder, OperationState &result, + TensorType type, ValueRange dynamicSizes, Value copy, + IntegerAttr memorySpace) { + build(builder, result, type, dynamicSizes, copy, /*size_hint=*/Value(), + memorySpace); +} + namespace { /// Change the type of the result of a `bufferization.alloc_tensor` by making /// the result type statically sized along dimension that in the original @@ -383,6 +391,11 @@ if (parser.parseLParen() || parser.parseOperand(copyOperand) || parser.parseRParen()) return failure(); + ParseResult sizeHintKeyword = parser.parseOptionalKeyword("size_hint"); + OpAsmParser::UnresolvedOperand sizeHintOperand; + if (sizeHintKeyword.succeeded()) + if (parser.parseEqual() || parser.parseOperand(sizeHintOperand)) + return failure(); if (parser.parseOptionalAttrDict(result.attributes) || parser.parseColon()) return failure(); @@ -397,10 +410,14 @@ if (copyKeyword.succeeded()) if (parser.resolveOperand(copyOperand, type, result.operands)) return failure(); + if (sizeHintKeyword.succeeded()) + if (parser.resolveOperand(sizeHintOperand, indexType, result.operands)) + return failure(); result.addAttribute(AllocTensorOp::getOperandSegmentSizeAttr(), parser.getBuilder().getDenseI32ArrayAttr( {static_cast(dynamicSizesOperands.size()), - static_cast(copyKeyword.succeeded())})); + static_cast(copyKeyword.succeeded()), + static_cast(sizeHintKeyword.succeeded())})); return success(); } @@ -408,6 +425,8 @@ p << "(" << getDynamicSizes() << ")"; if (getCopy()) p << " copy(" << getCopy() << ")"; + if (getSizeHint()) + p << " size_hint=" << getSizeHint(); p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{ AllocTensorOp::getOperandSegmentSizeAttr()}); p << " : "; diff --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir --- a/mlir/test/Dialect/Bufferization/ops.mlir +++ b/mlir/test/Dialect/Bufferization/ops.mlir @@ -1,6 +1,10 @@ // RUN: mlir-opt %s | mlir-opt | FileCheck %s // RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s +#CSR = #sparse_tensor.encoding<{ + dimLevelType = ["dense", "compressed"] +}> + // CHECK-LABEL: func @test_clone func.func @test_clone(%buf : memref<*xf32>) -> memref<*xf32> { %clone = bufferization.clone %buf : memref<*xf32> to memref<*xf32> @@ -39,6 +43,9 @@ %4 = bufferization.alloc_tensor() copy(%t) {escape = true} : tensor // CHECK: bufferization.alloc_tensor() copy(%{{.*}}) {escape = false} : tensor %5 = bufferization.alloc_tensor() copy(%t) {escape = false} : tensor + %c100 = arith.constant 100 : index + // CHECK: bufferization.alloc_tensor() size_hint= + %6 = bufferization.alloc_tensor() size_hint=%c100 : tensor<100x100xf64, #CSR> return %1 : tensor }