diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h @@ -0,0 +1,20 @@ +//===- BufferizableOpInterfaceImpl.h - Impl. of BufferizableOpInterface ---===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_BUFFERIZABLEOPINTERFACEIMPL_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_BUFFERIZABLEOPINTERFACEIMPL_H_ + +namespace mlir { +class DialectRegistry; + +namespace sparse_tensor { +void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry); +} // namespace sparse_tensor +} // namespace mlir + +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_BUFFERIZABLEOPINTERFACEIMPL_H_ diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -53,6 +53,7 @@ #include "mlir/Dialect/Shape/IR/Shape.h" #include "mlir/Dialect/Shape/Transforms/BufferizableOpInterfaceImpl.h" #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.h" #include "mlir/Dialect/Tensor/IR/TensorTilingInterfaceImpl.h" @@ -119,6 +120,7 @@ linalg::registerBufferizableOpInterfaceExternalModels(registry); scf::registerBufferizableOpInterfaceExternalModels(registry); shape::registerBufferizableOpInterfaceExternalModels(registry); + sparse_tensor::registerBufferizableOpInterfaceExternalModels(registry); tensor::registerBufferizableOpInterfaceExternalModels(registry); tensor::registerInferTypeOpInterfaceExternalModels(registry); tensor::registerTilingOpInterfaceExternalModels(registry); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -0,0 +1,129 @@ +//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These BufferizableOpInterface implementations provide analysis-related +// interface methods only. They are getting bufferized by the +// SparseTensorConversion pass. + +#include "mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h" + +#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" +#include "mlir/Dialect/Bufferization/IR/Bufferization.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/IR/Dialect.h" +#include "mlir/IR/Operation.h" +#include "mlir/IR/PatternMatch.h" + +using namespace mlir::bufferization; +using namespace mlir::sparse_tensor; + +namespace mlir { +namespace sparse_tensor { +namespace { + +struct ConvertOpInterface + : public BufferizableOpInterface::ExternalModel { + bool bufferizesToAllocation(Operation *op, OpResult opResult) const { + // ConvertOps may allocate. (Unless they convert between two identical + // types, then they fold away.) + return true; + } + + bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return true; + } + + bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return false; + } + + SmallVector getAliasingOpResult(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return {}; + } + + bool isWritable(Operation *op, Value value, + const AnalysisState &state) const { + return true; + } +}; + +struct LoadOpInterface + : public BufferizableOpInterface::ExternalModel { + bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return false; + } + + bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return false; + } + + SmallVector getAliasingOpResult(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return {op->getOpResult(0)}; + } + + BufferRelation bufferRelation(Operation *op, OpResult opResult, + const AnalysisState &state) const { + return BufferRelation::Equivalent; + } +}; + +struct NewOpInterface + : public BufferizableOpInterface::ExternalModel { + bool isMemoryWrite(Operation *op, OpResult opResult, + const AnalysisState &state) const { + // NewOps allocate but do not write. + return false; + } + + bool bufferizesToAllocation(Operation *op, OpResult opResult) const { + return true; + } +}; + +struct ReleaseOpInterface + : public BufferizableOpInterface::ExternalModel { + bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return false; + } + + bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return false; + } + + SmallVector getAliasingOpResult(Operation *op, OpOperand &opOperand, + const AnalysisState &state) const { + return {}; + } +}; + +} // namespace +} // namespace sparse_tensor +} // namespace mlir + +void mlir::sparse_tensor::registerBufferizableOpInterfaceExternalModels( + DialectRegistry ®istry) { + registry.addExtension( + +[](MLIRContext *ctx, sparse_tensor::SparseTensorDialect *dialect) { + sparse_tensor::ConvertOp::attachInterface(*ctx); + sparse_tensor::LoadOp::attachInterface(*ctx); + sparse_tensor::NewOp::attachInterface(*ctx); + sparse_tensor::ReleaseOp::attachInterface(*ctx); + }); +} diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt @@ -1,4 +1,5 @@ add_mlir_dialect_library(MLIRSparseTensorTransforms + BufferizableOpInterfaceImpl.cpp CodegenUtils.cpp Sparsification.cpp SparseTensorConversion.cpp @@ -13,6 +14,7 @@ LINK_LIBS PUBLIC MLIRArithmeticDialect MLIRBufferizationDialect + MLIRBufferizationTransforms MLIRComplexDialect MLIRFuncDialect MLIRIR diff --git a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir @@ -0,0 +1,42 @@ +// RUN: mlir-opt %s -tensor-copy-insertion="allow-return-allocs" | FileCheck %s +// RUN: mlir-opt %s -tensor-copy-insertion="bufferize-function-boundaries allow-return-allocs" | FileCheck %s --check-prefix=CHECK-FUNC + +#DCSR = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed", "compressed" ], + dimOrdering = affine_map<(i,j) -> (i,j)> +}> + +// CHECK-LABEL: func @bufferization_alloc_tensor +// CHECK-FUNC-LABEL: func @bufferization_alloc_tensor +func.func @bufferization_alloc_tensor() -> tensor<20x40xf32, #DCSR> { + // CHECK: bufferization.alloc_tensor() {bufferization.escape = [false]} + // CHECK-FUNC: bufferization.alloc_tensor() {bufferization.escape = [true]} + %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR> + %1 = sparse_tensor.load %0 : tensor<20x40xf32, #DCSR> + return %1 : tensor<20x40xf32, #DCSR> +} + +!Filename = !llvm.ptr +// CHECK-LABEL: func @sparse_tensor_new +// CHECK-FUNC-LABEL: func @sparse_tensor_new +func.func @sparse_tensor_new(%file: !Filename) -> tensor<20x40xf32, #DCSR> { + // CHECK: sparse_tensor.new {{.*}} {bufferization.escape = [false]} + // CHECK-FUNC: sparse_tensor.new {{.*}} {bufferization.escape = [true]} + %0 = sparse_tensor.new %file : !Filename to tensor<20x40xf32, #DCSR> + return %0 : tensor<20x40xf32, #DCSR> +} + +// CHECK-LABEL: func @sparse_tensor_convert +// CHECK-FUNC-LABEL: func @sparse_tensor_convert +func.func @sparse_tensor_convert() -> tensor<20x40xf32> { + // CHECK: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} + // CHECK-FUNC: %[[alloc:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} + %0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR> + // CHECK: %[[loaded:.*]] = sparse_tensor.load %[[alloc]] + // CHECK-FUNC: %[[loaded:.*]] = sparse_tensor.load %[[alloc]] + %1 = sparse_tensor.load %0 : tensor<20x40xf32, #DCSR> + // CHECK: sparse_tensor.convert %[[loaded]] {bufferization.escape = [false]} + // CHECK-FUNC: sparse_tensor.convert %[[loaded]] {bufferization.escape = [true]} + %2 = sparse_tensor.convert %1 : tensor<20x40xf32, #DCSR> to tensor<20x40xf32> + return %2 : tensor<20x40xf32> +} diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -2082,6 +2082,7 @@ "lib/Dialect/SparseTensor/Transforms/*.h", ]), hdrs = [ + "include/mlir/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.h", "include/mlir/Dialect/SparseTensor/Transforms/Passes.h", "include/mlir/ExecutionEngine/SparseTensorUtils.h", ], @@ -2090,6 +2091,7 @@ ":AffineDialect", ":ArithmeticDialect", ":BufferizationDialect", + ":BufferizationTransforms", ":ComplexDialect", ":FuncDialect", ":FuncTransforms",