diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h --- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h @@ -432,8 +432,15 @@ /// Sets the level number and level-type of the `t`th tensor on /// `i`th loop. void setLevelAndType(TensorId t, LoopId i, Level lvl, DimLevelType dlt) { - assert(t < numTensors && i < numLoops && lvl < lvlToLoop[t].size() && - isValidDLT(dlt)); + assert(t < numTensors && i < numLoops && isValidDLT(dlt)); + // In case with indexing map like (d0) -> (0, d0), there might be more + // levels then loops because of the constant index. + // TODO: Constant indices are currently not support on sparse tensor, but + // are allowed in non-annotated dense tensor. Support it, it would be + // required for sparse tensor slice rank reducing too. + if (lvl >= lvlToLoop[t].size()) + lvlToLoop[t].resize(lvl + 1); + lvlTypes[t][i] = dlt; loopToLvl[t][i] = lvl; lvlToLoop[t][lvl] = i; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp @@ -86,10 +86,13 @@ SmallVector tensors; // input tensors passed to loop emitter for (OpOperand &t : linalgOp->getOpOperands()) { tensors.push_back(t.get()); - Level rank = linalgOp.getMatchingIndexingMap(&t).getNumResults(); - for (Level lvl = 0; lvl < rank; lvl++) { - sortArrayBasedOnOrder( - latticeMerger.getDependentLoops(t.getOperandNumber(), lvl), topSort); + if (getSparseTensorEncoding(t.get().getType())) { + Level rank = linalgOp.getMatchingIndexingMap(&t).getNumResults(); + for (Level lvl = 0; lvl < rank; lvl++) { + sortArrayBasedOnOrder( + latticeMerger.getDependentLoops(t.getOperandNumber(), lvl), + topSort); + } } } @@ -103,11 +106,13 @@ // passing in a callback. [this](TensorId t, Level lvl) -> std::vector> { // Translates from a list of loop index to a list of [tid, dim] pair. - std::vector rLoops = this->merger().getDependentLoops(t, lvl); std::vector> ret; - ret.reserve(rLoops.size()); - for (LoopId l : rLoops) - ret.emplace_back(this->merger().getLoopDefiningLvl(l)); + if (getSparseTensorEncoding(linalgOp.getOperands()[t].getType())) { + std::vector rLoops = this->merger().getDependentLoops(t, lvl); + ret.reserve(rLoops.size()); + for (LoopId l : rLoops) + ret.emplace_back(this->merger().getLoopDefiningLvl(l)); + } return ret; }); } diff --git a/mlir/test/Dialect/SparseTensor/constant_index_map.mlir b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir @@ -0,0 +1,41 @@ +// Reported by https://github.com/llvm/llvm-project/issues/61530 + +// RUN: mlir-opt %s -sparsification | FileCheck %s + +#map1 = affine_map<(d0) -> (0, d0)> +#map2 = affine_map<(d0) -> (d0)> + +#SpVec = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> + +// CHECK-LABEL: func.func @main( +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<1x77xi1>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<1x77xi1>) -> tensor<77xi1, #{{.*}}> { +// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 77 : index +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<77xi1, #{{.*}}> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<1x77xi1> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<1x77xi1> +// CHECK: %[[VAL_8:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] iter_args(%[[VAL_10:.*]] = %[[VAL_5]]) -> (tensor<77xi1, #{{.*}}>) { +// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1> +// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1> +// CHECK: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_12]] : i1 +// CHECK: %[[VAL_14:.*]] = sparse_tensor.insert %[[VAL_13]] into %[[VAL_10]]{{\[}}%[[VAL_9]]] : tensor<77xi1, #{{.*}}> +// CHECK: scf.yield %[[VAL_14]] : tensor<77xi1, #{{.*}}> +// CHECK: } +// CHECK: %[[VAL_15:.*]] = sparse_tensor.load %[[VAL_16:.*]] hasInserts : tensor<77xi1, #{{.*}}> +// CHECK: return %[[VAL_15]] : tensor<77xi1, #{{.*}}> +// CHECK: } +func.func @main(%arg0: tensor<1x77xi1>, %arg1: tensor<1x77xi1>) -> tensor<77xi1, #SpVec> { + %0 = bufferization.alloc_tensor() : tensor<77xi1, #SpVec> + %1 = linalg.generic { + indexing_maps = [#map1, #map1, #map2], + iterator_types = ["parallel"]} + ins(%arg0, %arg1 : tensor<1x77xi1>, tensor<1x77xi1>) + outs(%0 : tensor<77xi1, #SpVec>) { + ^bb0(%in: i1, %in_0: i1, %out: i1): + %2 = arith.addi %in, %in_0 : i1 + linalg.yield %2 : i1 + } -> tensor<77xi1, #SpVec> + return %1 : tensor<77xi1, #SpVec> +} diff --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir @@ -496,4 +496,3 @@ } -> tensor<32x16xf64> return %0 : tensor<32x16xf64> } -