diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -1020,7 +1020,8 @@ // get the next element from the input file // insert the element to %tmp // %t = sparse_tensor.ConvertOp %tmp - RankedTensorType cooTp = getUnorderedCOOFromType(dstTp); + RankedTensorType cooTp = + getUnorderedCOOFromTypeWithOrdering(dstTp, encDst.getDimOrdering()); auto cooBuffer = rewriter.create(loc, cooTp, dynSizesArray).getResult(); @@ -1050,10 +1051,10 @@ Value indices = dimSizes; // Reuse the indices memref to store indices. createFuncCall(rewriter, loc, getNextFuncName, {}, {reader, indices, value}, EmitCInterface::On); - SmallVector indicesArray; + SmallVector indicesArray(rank, Value()); for (uint64_t i = 0; i < rank; i++) { - indicesArray.push_back(rewriter.create( - loc, indices, constantIndex(rewriter, loc, i))); + indicesArray[toStoredDim(encDst, i)] = rewriter.create( + loc, indices, constantIndex(rewriter, loc, i)); } Value v = rewriter.create(loc, value); Value t = rewriter.create(loc, v, forOp.getRegionIterArg(0), diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir @@ -5,6 +5,11 @@ dimLevelType = ["dense", "compressed"] }> +#CSC = #sparse_tensor.encoding<{ + dimLevelType = [ "dense", "compressed" ], + dimOrdering = affine_map<(i, j) -> (j, i)> +}> + // CHECK-LABEL: func.func @sparse_new_symmetry( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index @@ -75,6 +80,37 @@ return %0 : tensor } +// CHECK-LABEL: func.func @sparse_new_csc( +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor (d1, d0)> }>> { +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK: %[[R:.*]] = call @createSparseTensorReader(%[[A]]) +// CHECK: %[[DS:.*]] = memref.alloca(%[[C2]]) : memref +// CHECK: call @copySparseTensorReaderDimSizes(%[[R]], %[[DS]]) +// CHECK: %[[D0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]] +// CHECK: %[[D1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]] +// CHECK: %[[T:.*]] = bufferization.alloc_tensor(%[[D0]], %[[D1]]) +// CHECK: %[[N:.*]] = call @getSparseTensorReaderNNZ(%[[R]]) +// CHECK: %[[VB:.*]] = memref.alloca() +// CHECK: %[[T2:.*]] = scf.for %{{.*}} = %[[C0]] to %[[N]] step %[[C1]] iter_args(%[[A2:.*]] = %[[T]]) +// CHECK: func.call @getSparseTensorReaderNextF32(%[[R]], %[[DS]], %[[VB]]) +// CHECK: %[[E0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]] +// CHECK: %[[E1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]] +// CHECK: %[[V:.*]] = memref.load %[[VB]][] +// CHECK: %[[T1:.*]] = sparse_tensor.insert %[[V]] into %[[A2]]{{\[}}%[[E1]], %[[E0]]] +// CHECK: scf.yield %[[T1]] +// CHECK: } +// CHECK: call @delSparseTensorReader(%[[R]]) +// CHECK: %[[T4:.*]] = sparse_tensor.load %[[T2]] hasInserts +// CHECK: %[[R:.*]] = sparse_tensor.convert %[[T4]] +// CHECK: bufferization.dealloc_tensor %[[T4]] +// CHECK: return %[[R]] +func.func @sparse_new_csc(%arg0: !llvm.ptr) -> tensor { + %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor + return %0 : tensor +} + // CHECK-LABEL: func.func @sparse_out( // CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[B:.*]]: !llvm.ptr) {