diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -51,7 +51,8 @@ let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)"; } -def SparseTensor_ConvertOp : SparseTensor_Op<"convert", [SameOperandsAndResultType]>, +def SparseTensor_ConvertOp : SparseTensor_Op<"convert", + [NoSideEffect, SameOperandsAndResultType]>, Arguments<(ins AnyTensor:$source)>, Results<(outs AnyTensor:$dest)> { string summary = "Converts between different tensor types"; @@ -81,6 +82,7 @@ }]; let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)"; + let hasFolder = 1; } def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>, diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -230,6 +230,12 @@ return op.emitError("unexpected type in convert"); } +OpFoldResult ConvertOp::fold(ArrayRef operands) { + if (getType() == source().getType()) + return source(); + return {}; +} + static LogicalResult verify(ToPointersOp op) { if (auto e = getSparseTensorEncoding(op.tensor().getType())) { if (failed(isInBounds(op.dim(), op.tensor()))) diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -112,6 +112,14 @@ return %0 : tensor } +// CHECK-LABEL: func @sparse_nop_convert( +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr +// CHECK: return %[[A]] : !llvm.ptr +func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { + %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> + return %0 : tensor<64xf32, #SparseVector> +} + // CHECK-LABEL: func @sparse_convert_1d( // CHECK-SAME: %[[A:.*]]: tensor) -> !llvm.ptr // CHECK-DAG: %[[C0:.*]] = constant 0 : index diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/SparseTensor/fold.mlir @@ -0,0 +1,20 @@ +// RUN: mlir-opt %s --canonicalize --cse | FileCheck %s + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +// CHECK-LABEL: func @sparse_nop_convert( +// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #{{.*}}>) +// CHECK: return %[[A]] : tensor<64xf32, #{{.*}}> +func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { + %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> + return %0 : tensor<64xf32, #SparseVector> +} + +// CHECK-LABEL: func @sparse_dce_convert( +// CHECK-SAME: %[[A:.*]]: tensor<64xf32>) +// CHECK-NOT: sparse_tensor.convert +// CHECK: return +func @sparse_dce_convert(%arg0: tensor<64xf32>) { + %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector> + return +}