diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -807,7 +807,7 @@ if (auto c = tensorExps[e].val.getDefiningOp()) { ArrayAttr arrayAttr = c.getValue(); return arrayAttr[0].cast().getValue().isZero() && - arrayAttr[0].cast().getValue().isZero(); + arrayAttr[1].cast().getValue().isZero(); } if (auto c = tensorExps[e].val.getDefiningOp()) return c.value() == 0; diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir @@ -1,4 +1,3 @@ -// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s #SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> @@ -400,3 +399,40 @@ } -> tensor<32xf64, #SV> return %0 : tensor<32xf64, #SV> } + +// CHECK-LABEL: func.func @complex_divbyc( +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>> { +// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex +// CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>> +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_1]] : tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_1]] : tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>> to memref> +// CHECK: %[[VAL_8:.*]] = memref.alloca(%[[VAL_2]]) : memref +// CHECK: %[[VAL_9:.*]] = memref.alloca() : memref> +// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref +// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref +// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] { +// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref +// CHECK: memref.store %[[VAL_13]], %[[VAL_8]]{{\[}}%[[VAL_1]]] : memref +// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref> +// CHECK: %[[VAL_15:.*]] = complex.div %[[VAL_14]], %[[VAL_3]] : complex +// CHECK: memref.store %[[VAL_15]], %[[VAL_9]][] : memref> +// CHECK: sparse_tensor.lex_insert %[[VAL_4]], %[[VAL_8]], %[[VAL_9]] : tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>>, memref, memref> +// CHECK: } +// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_4]] hasInserts : tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>> +// CHECK: return %[[VAL_16]] : tensor<32xcomplex, #sparse_tensor.encoding<{{.*}}>> +// CHECK: } +func.func @complex_divbyc(%arg0: tensor<32xcomplex, #SV>) -> tensor<32xcomplex, #SV> { + %c = complex.constant [0.0, 1.0] : complex + %init = bufferization.alloc_tensor() : tensor<32xcomplex, #SV> + %0 = linalg.generic #traitc + ins(%arg0: tensor<32xcomplex, #SV>) + outs(%init: tensor<32xcomplex, #SV>) { + ^bb(%a: complex, %x: complex): + %0 = complex.div %a, %c : complex + linalg.yield %0 : complex + } -> tensor<32xcomplex, #SV> + return %0 : tensor<32xcomplex, #SV> +}