diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -26,9 +26,19 @@ namespace sparse_tensor { namespace { +template +struct SparseBufferizableOpInterfaceExternalModel + : public BufferizableOpInterface::ExternalModel { + LogicalResult bufferize(Operation *op, RewriterBase &rewriter, + const BufferizationOptions &options) const { + return op->emitError( + "sparse_tensor ops must be bufferized with the sparse compiler"); + } +}; + struct ConcatenateOpInterface - : public BufferizableOpInterface::ExternalModel< - ConcatenateOpInterface, sparse_tensor::ConcatenateOp> { + : SparseBufferizableOpInterfaceExternalModel { bool bufferizesToAllocation(Operation *op, Value value) const { return true; } bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, @@ -52,9 +62,8 @@ } }; -struct ConvertOpInterface - : public BufferizableOpInterface::ExternalModel { +struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel< + ConvertOpInterface, sparse_tensor::ConvertOp> { bool bufferizesToAllocation(Operation *op, Value value) const { // ConvertOps may allocate. (Unless they convert between two identical // types, then they fold away.) @@ -83,8 +92,8 @@ }; struct LoadOpInterface - : public BufferizableOpInterface::ExternalModel { + : public SparseBufferizableOpInterfaceExternalModel { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { return false; @@ -102,8 +111,8 @@ }; struct NewOpInterface - : public BufferizableOpInterface::ExternalModel { + : public SparseBufferizableOpInterfaceExternalModel { bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult, const AnalysisState &state) const { // NewOps allocate but do not write. @@ -114,8 +123,8 @@ }; struct PackOpInterface - : public BufferizableOpInterface::ExternalModel { + : public SparseBufferizableOpInterfaceExternalModel { bool bufferizesToAllocation(Operation *op, Value value) const { // PackOp reuses all the buffers instead of allocating new ones return false; @@ -145,9 +154,8 @@ } }; -struct UnpackOpInterface - : public BufferizableOpInterface::ExternalModel { +struct UnpackOpInterface : public SparseBufferizableOpInterfaceExternalModel< + UnpackOpInterface, sparse_tensor::UnpackOp> { bool bufferizesToAllocation(Operation *op, Value value) const { // The output buffer is pre-allocated by the user. return false; @@ -178,9 +186,8 @@ } }; -struct InsertOpInterface - : public BufferizableOpInterface::ExternalModel { +struct InsertOpInterface : public SparseBufferizableOpInterfaceExternalModel< + InsertOpInterface, sparse_tensor::InsertOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { return true; @@ -201,7 +208,7 @@ }; struct NumberOfEntriesOpInterface - : public BufferizableOpInterface::ExternalModel< + : public SparseBufferizableOpInterfaceExternalModel< NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { @@ -220,7 +227,7 @@ }; struct ToCoordinatesBufferOpInterface - : public BufferizableOpInterface::ExternalModel< + : public SparseBufferizableOpInterfaceExternalModel< ToCoordinatesBufferOpInterface, sparse_tensor::ToCoordinatesBufferOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, @@ -242,7 +249,7 @@ }; struct ToCoordinatesOpInterface - : public BufferizableOpInterface::ExternalModel< + : public SparseBufferizableOpInterfaceExternalModel< ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { @@ -263,7 +270,7 @@ }; struct ToPositionsOpInterface - : public BufferizableOpInterface::ExternalModel< + : public SparseBufferizableOpInterfaceExternalModel< ToPositionsOpInterface, sparse_tensor::ToPositionsOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { @@ -284,8 +291,8 @@ }; struct ToValuesOpInterface - : public BufferizableOpInterface::ExternalModel { + : public SparseBufferizableOpInterfaceExternalModel< + ToValuesOpInterface, sparse_tensor::ToValuesOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { return true; diff --git a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir @@ -0,0 +1,12 @@ +// RUN: mlir-opt %s -one-shot-bufferize -verify-diagnostics + +#SparseVector = #sparse_tensor.encoding<{ + lvlTypes = ["compressed"] +}> + +func.func @sparse_tensor_op(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { + // expected-error @below{{sparse_tensor ops must be bufferized with the sparse compiler}} + // expected-error @below{{failed to bufferize op}} + %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> + return %0 : tensor<64xf32, #SparseVector> +}