diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -486,7 +486,7 @@ StringRef namePrefix, FuncGeneratorType createFunc) { // The mangled name of the function has this format: - // _[C|S|D]___ + // _[C|S|D][NU]___ // __ RankedTensorType rtp = desc.getTensorType(); SmallString<32> nameBuffer; @@ -496,12 +496,16 @@ assert(rank == indices.size()); for (unsigned d = 0; d < rank; d++) { if (isCompressedDim(rtp, d)) { - nameOstream << "C_"; + nameOstream << "C"; } else if (isSingletonDim(rtp, d)) { - nameOstream << "S_"; + nameOstream << "S"; } else { - nameOstream << "D_"; + nameOstream << "D"; } + if (isUniqueDim(rtp, d)) + nameOstream << "_"; + else + nameOstream << "NU_"; } // Static dim sizes are used in the generated code while dynamic sizes are // loaded from the dimSizes buffer. This is the reason for adding the shape diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -46,6 +46,10 @@ dimOrdering = affine_map<(i, j, k) -> (k, i, j)> }> +#Coo = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed-nu", "singleton" ] +}> + // CHECK-LABEL: func @sparse_nop( // CHECK-SAME: %[[A0:.*0]]: memref<1xindex>, // CHECK-SAME: %[[A1:.*1]]: memref<3xindex>, @@ -594,6 +598,37 @@ return %1 : tensor<128xf64, #SparseVector> } +// CHECK-LABEL: func.func private @_insert_CNU_S_5_6_f64_0_0( +// CHECK-SAME: %[[A0:.*0]]: memref<2xindex>, +// CHECK-SAME: %[[A1:.*1]]: memref<4xindex>, +// CHECK-SAME: %[[A2:.*2]]: memref, +// CHECK-SAME: %[[A3:.*3]]: memref, +// CHECK-SAME: %[[A4:.*4]]: memref, +// CHECK-SAME: %[[A5:.*5]]: memref, +// CHECK-SAME: %[[A6:.*6]]: index, +// CHECK-SAME: %[[A7:.*7]]: index, +// CHECK-SAME: %[[A8:.*8]]: f64) +// CHECK: %[[P0:.*]] = sparse_tensor.push_back %[[A1]], %[[A3]], %[[A6]] +// CHECK: %[[P1:.*]] = sparse_tensor.push_back %[[A1]], %[[A4]], %[[A7]] +// CHECK: %[[P2:.*]] = sparse_tensor.push_back %[[A1]], %[[A5]], %[[A8]] +// CHECK: return %[[A0]], %[[A1]], %[[A2]], %[[P0]], %[[P1]], %[[P2]] +// CHECK: func.func @sparse_insert_coo( +// CHECK-SAME: %[[A0:.*0]]: memref<2xindex>, +// CHECK-SAME: %[[A1:.*1]]: memref<4xindex>, +// CHECK-SAME: %[[A2:.*2]]: memref, +// CHECK-SAME: %[[A3:.*3]]: memref, +// CHECK-SAME: %[[A4:.*4]]: memref, +// CHECK-SAME: %[[A5:.*5]]: memref, +// CHECK-SAME: %[[A6:.*6]]: index, +// CHECK-SAME: %[[A7:.*7]]: f64) +// CHECK: %[[R:.*]]:6 = call @_insert_CNU_S_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]], %[[A6]], %[[A7]]) +// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3, %[[R]]#4, %[[R]]#5 +func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> { + %0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo> + %1 = sparse_tensor.load %0 hasInserts : tensor<5x6xf64, #Coo> + return %1 : tensor<5x6xf64, #Coo> +} + // CHECK-LABEL: func.func @sparse_nop_convert( // CHECK-SAME: %[[A0:.*0]]: memref<1xindex>, // CHECK-SAME: %[[A1:.*1]]: memref<3xindex>,