diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir --- a/mlir/test/Dialect/SparseTensor/fold.mlir +++ b/mlir/test/Dialect/SparseTensor/fold.mlir @@ -1,5 +1,6 @@ // RUN: mlir-opt %s --canonicalize --cse | FileCheck %s +#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}> #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> // CHECK-LABEL: func @sparse_nop_convert( @@ -18,3 +19,28 @@ %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector> return } + +// CHECK-LABEL: func @sparse_dce_getters( +// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>) +// CHECK-NOT: sparse_tensor.pointers +// CHECK-NOT: sparse_tensor.indices +// CHECK-NOT: sparse_tensor.values +// CHECK: return +func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) { + %c = constant 0 : index + %0 = sparse_tensor.pointers %arg0, %c : tensor<64xf32, #SparseVector> to memref + %1 = sparse_tensor.indices %arg0, %c : tensor<64xf32, #SparseVector> to memref + %2 = sparse_tensor.values %arg0 : tensor<64xf32, #SparseVector> to memref + return +} + +// CHECK-LABEL: func @sparse_dce_reconstruct( +// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>) +// CHECK-NOT: sparse_tensor.values +// CHECK-NOT: sparse_tensor.tensor +// CHECK: return +func @sparse_dce_reconstruct(%arg0: tensor<64xf32, #DenseVector>) { + %0 = sparse_tensor.values %arg0 : tensor<64xf32, #DenseVector> to memref + %1 = sparse_tensor.tensor %0 : memref to tensor<64xf32, #DenseVector> + return +}