diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -582,8 +582,18 @@ SmallVector params; sizesFromPtr(rewriter, sizes, op, encSrc, srcType.cast(), src); - newParams(rewriter, params, op, encDst, kToCOO, sizes, src); + // Set up encoding with right mix of src and dst so that the two + // method calls can share most parameters, while still providing + // the correct sparsity information to either of them. + auto enc = SparseTensorEncodingAttr::get( + op->getContext(), encDst.getDimLevelType(), encDst.getDimOrdering(), + encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); + newParams(rewriter, params, op, enc, kToCOO, sizes, src); Value coo = genNewCall(rewriter, op, params); + params[3] = constantI32( + rewriter, loc, getOverheadTypeEncoding(encDst.getPointerBitWidth())); + params[4] = constantI32( + rewriter, loc, getOverheadTypeEncoding(encDst.getIndexBitWidth())); params[6] = constantI32(rewriter, loc, kFromCOO); params[7] = coo; rewriter.replaceOp(op, genNewCall(rewriter, op, params)); diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_convert.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir rename from mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_convert.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_convert.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir rename from mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_convert.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_convert.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -12,18 +12,22 @@ // RUN: FileCheck %s #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + dimLevelType = [ "compressed", "compressed" ], + pointerBitWidth = 8, + indexBitWidth = 8 }> #DCSC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimOrdering = affine_map<(i,j) -> (j,i)>, + pointerBitWidth = 64, + indexBitWidth = 64 }> // // Integration test that tests conversions between sparse tensors, -// where the dynamic sizes of the shape of the enveloping tensor -// may change (the actual underlying sizes obviously never change). +// where the pointer and index sizes in the overhead storage change +// in addition to layout. // module { @@ -45,46 +49,36 @@ [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ]> : tensor<32x64xf64> %t2 = tensor.cast %t1 : tensor<32x64xf64> to tensor - // Four dense to sparse conversions. - %1 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor - %2 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor - %3 = sparse_tensor.convert %t2 : tensor to tensor - %4 = sparse_tensor.convert %t2 : tensor to tensor + // Dense to sparse. + %1 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<32x64xf64, #DCSR> + %2 = sparse_tensor.convert %t1 : tensor<32x64xf64> to tensor<32x64xf64, #DCSC> - // Two cross conversions. - %5 = sparse_tensor.convert %3 : tensor to tensor - %6 = sparse_tensor.convert %4 : tensor to tensor + // Sparse to sparse. + %3 = sparse_tensor.convert %1 : tensor<32x64xf64, #DCSR> to tensor<32x64xf64, #DCSC> + %4 = sparse_tensor.convert %2 : tensor<32x64xf64, #DCSC> to tensor<32x64xf64, #DCSR> // // All proper row-/column-wise? // // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 ) // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 ) - // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 ) - // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 ) // CHECK: ( 1, 4, 6, 2, 5, 3, 7, -1 ) // CHECK: ( 1, 2, 3, 4, 5, 6, 7, -1 ) // - %m1 = sparse_tensor.values %1 : tensor to memref - %m2 = sparse_tensor.values %2 : tensor to memref - %m3 = sparse_tensor.values %3 : tensor to memref - %m4 = sparse_tensor.values %4 : tensor to memref - %m5 = sparse_tensor.values %5 : tensor to memref - %m6 = sparse_tensor.values %6 : tensor to memref + %m1 = sparse_tensor.values %1 : tensor<32x64xf64, #DCSR> to memref + %m2 = sparse_tensor.values %2 : tensor<32x64xf64, #DCSC> to memref + %m3 = sparse_tensor.values %3 : tensor<32x64xf64, #DCSC> to memref + %m4 = sparse_tensor.values %4 : tensor<32x64xf64, #DCSR> to memref call @dump(%m1) : (memref) -> () call @dump(%m2) : (memref) -> () call @dump(%m3) : (memref) -> () call @dump(%m4) : (memref) -> () - call @dump(%m5) : (memref) -> () - call @dump(%m6) : (memref) -> () // Release the resources. - sparse_tensor.release %1 : tensor - sparse_tensor.release %2 : tensor - sparse_tensor.release %3 : tensor - sparse_tensor.release %4 : tensor - sparse_tensor.release %5 : tensor - sparse_tensor.release %6 : tensor + sparse_tensor.release %1 : tensor<32x64xf64, #DCSR> + sparse_tensor.release %2 : tensor<32x64xf64, #DCSC> + sparse_tensor.release %3 : tensor<32x64xf64, #DCSC> + sparse_tensor.release %4 : tensor<32x64xf64, #DCSR> return }