diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize | FileCheck %s +// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s #DenseVector = #sparse_tensor.encoding<{ dimLevelType = ["dense"] @@ -46,12 +46,27 @@ // CHECK: %[[D:.*]] = call @sparseDimSize(%[[A]], %[[C]]) // CHECK: return %[[D]] : index func @sparse_dim3d(%arg0: tensor) -> index { - // Needs permuting 1 into 2. + // Querying for dimension 1 in the tensor type needs to be + // permuted into querying for dimension 2 in the stored sparse + // tensor scheme, since the latter honors the dimOrdering. %c = constant 1 : index %0 = tensor.dim %arg0, %c : tensor return %0 : index } +// CHECK-LABEL: func @sparse_dim3d_const( +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) +// CHECK: %[[C:.*]] = constant 20 : index +// CHECK: return %[[C]] : index +func @sparse_dim3d_const(%arg0: tensor<10x20x30xf64, #SparseTensor>) -> index { + // Querying for dimension 1 in the tensor type can be directly + // folded into the right value (even though it corresponds + // to dimension 2 in the stored sparse tensor scheme). + %c = constant 1 : index + %0 = tensor.dim %arg0, %c : tensor<10x20x30xf64, #SparseTensor> + return %0 : index +} + // CHECK-LABEL: func @sparse_new1d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK-DAG: %[[U:.*]] = constant dense<1> : tensor<1xi8>