diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -86,7 +86,6 @@ }]; let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)"; - let hasFolder = 1; let hasVerifier = 1; } diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -421,12 +421,6 @@ return emitError("unexpected type in convert"); } -OpFoldResult ConvertOp::fold(ArrayRef operands) { - if (getType() == getSource().getType()) - return getSource(); - return {}; -} - LogicalResult ToPointersOp::verify() { auto e = getSparseTensorEncoding(getTensor().getType()); if (failed(isInBounds(getDimension().getZExtValue(), getTensor()))) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -706,6 +706,22 @@ } }; +/// Sparse codegen rule for the convert operator. +class SparseConvertConverter : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + LogicalResult + matchAndRewrite(ConvertOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + if (op.getType() != op.getSource().getType()) { + // This should be handled by rewriting before codegen. + return failure(); + } + rewriter.replaceOp(op, adaptor.getSource()); + return success(); + } +}; + } // namespace //===----------------------------------------------------------------------===// @@ -730,6 +746,7 @@ SparseTensorDeallocConverter, SparseTensorLoadConverter, SparseExpandConverter, SparseCompressConverter, SparseInsertConverter, SparseToPointersConverter, - SparseToIndicesConverter, SparseToValuesConverter>( + SparseToIndicesConverter, SparseToValuesConverter, + SparseConvertConverter>( typeConverter, patterns.getContext()); } diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -449,3 +449,17 @@ %1 = sparse_tensor.load %0 hasInserts : tensor<128xf64, #SV> return %1 : tensor<128xf64, #SV> } + +// CHECK-LABEL: func.func @sparse_nop_convert( +// CHECK-SAME: %[[A0:.*]]: memref<1xindex>, +// CHECK-SAME: %[[A1:.*]]: memref<3xindex>, +// CHECK-SAME: %[[A2:.*]]: memref, +// CHECK-SAME: %[[A3:.*]]: memref, +// CHECK-SAME: %[[A4:.*]]: memref) +// CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]] : memref<1xindex>, memref<3xindex>, memref, memref, memref +func.func @sparse_nop_convert(%arg0: tensor) -> tensor { + %0 = sparse_tensor.convert %arg0 : tensor to tensor + return %0 : tensor +} + + diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir --- a/mlir/test/Dialect/SparseTensor/fold.mlir +++ b/mlir/test/Dialect/SparseTensor/fold.mlir @@ -4,8 +4,8 @@ // CHECK-LABEL: func @sparse_nop_convert( // CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>) -// CHECK-NOT: sparse_tensor.convert -// CHECK: return %[[A]] : tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[B:.*]] = sparse_tensor.convert %[[A]] +// CHECK: return %[[B]] : tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>> func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> return %0 : tensor<64xf32, #SparseVector>