diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -86,7 +86,6 @@ }]; let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)"; - let hasFolder = 1; let hasVerifier = 1; } diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -333,12 +333,6 @@ return emitError("unexpected type in convert"); } -OpFoldResult ConvertOp::fold(ArrayRef operands) { - if (getType() == getSource().getType()) - return getSource(); - return {}; -} - LogicalResult ToPointersOp::verify() { auto e = getSparseTensorEncoding(getTensor().getType()); if (failed(isInBounds(getDimension().getZExtValue(), getTensor()))) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -720,6 +720,22 @@ } }; +/// Sparse codegen rule for the convert operator. +class SparseConvertConverter : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + LogicalResult + matchAndRewrite(ConvertOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + if (op.getType() != op.getSource().getType()) { + // This should be handled by rewriting before codegen. + return failure(); + } + rewriter.replaceOp(op, adaptor.getSource()); + return success(); + } +}; + } // namespace //===----------------------------------------------------------------------===// @@ -744,6 +760,6 @@ SparseTensorDeallocConverter, SparseTensorLoadConverter, SparseExpandConverter, SparseCompressConverter, SparseInsertConverter, SparseToPointersConverter, - SparseToIndicesConverter, SparseToValuesConverter>( - typeConverter, patterns.getContext()); + SparseToIndicesConverter, SparseToValuesConverter, + SparseConvertConverter>(typeConverter, patterns.getContext()); } diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -518,3 +518,15 @@ %1 = sparse_tensor.load %0 hasInserts : tensor<128xf64, #SparseVector> return %1 : tensor<128xf64, #SparseVector> } + +// CHECK-LABEL: func.func @sparse_nop_convert( +// CHECK-SAME: %[[A0:.*]]: memref<1xindex>, +// CHECK-SAME: %[[A1:.*]]: memref<3xindex>, +// CHECK-SAME: %[[A2:.*]]: memref, +// CHECK-SAME: %[[A3:.*]]: memref, +// CHECK-SAME: %[[A4:.*]]: memref) +// CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]] : memref<1xindex>, memref<3xindex>, memref, memref, memref +func.func @sparse_nop_convert(%arg0: tensor) -> tensor { + %0 = sparse_tensor.convert %arg0 : tensor to tensor + return %0 : tensor +} diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir --- a/mlir/test/Dialect/SparseTensor/fold.mlir +++ b/mlir/test/Dialect/SparseTensor/fold.mlir @@ -2,15 +2,6 @@ #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -// CHECK-LABEL: func @sparse_nop_convert( -// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>) -// CHECK-NOT: sparse_tensor.convert -// CHECK: return %[[A]] : tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>> -func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { - %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> - return %0 : tensor<64xf32, #SparseVector> -} - // CHECK-LABEL: func @sparse_dce_convert( // CHECK-SAME: %[[A:.*]]: tensor<64xf32>) // CHECK-NOT: sparse_tensor.convert