diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h @@ -205,6 +205,62 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR) #undef DECL_CONVERTFROMMLIRSPARSETENSOR +/// Creates a SparseTensorReader for reading a sparse tensor from a file with +/// the given file name. +MLIR_CRUNNERUTILS_EXPORT void *createSparseTensorReader(char *filename); + +/// Returns the rank of the sparse tensor being read. +MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderRank(void *p); + +/// Returns the is_symmetric bit for the sparse tensor being read. +MLIR_CRUNNERUTILS_EXPORT bool getSparseTensorReaderIsSymmetric(void *p); + +/// Returns the number of non-zero values for the sparse tensor being read. +MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderNNZ(void *p); + +/// Returns the size of a dimension for the sparse tensor being read. +MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderDimSize(void *p, + index_type d); + +/// Returns all dimension sizes for the sparse tensor being read. +MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_getSparseTensorReaderDimSizes( + void *p, StridedMemRefType *dref); + +/// Releases the SparseTensorReader. +MLIR_CRUNNERUTILS_EXPORT void delSparseTensorReader(void *p); + +/// Returns the next element for the sparse tensor being read. +#define IMPL_GETNEXT(VNAME, V) \ + MLIR_CRUNNERUTILS_EXPORT V _mlir_ciface_getSparseTensorReaderNext##VNAME( \ + void *p, StridedMemRefType *iref); +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT) +#undef IMPL_GETNEXT + +typedef std::ostream SparseTensorWriter; + +/// Creates a SparseTensorWriter for outputing a sparse tensor to a file with +/// the given file name. When the file name is empty, std::cout is used. +// +// Only the extended FROSTT format is supported currently. +MLIR_CRUNNERUTILS_EXPORT void *createSparseTensorWriter(char *filename); + +/// Finalizes the outputing of a sparse tensor to a file and releases the +/// SparseTensorWriter. +MLIR_CRUNNERUTILS_EXPORT void delSparseTensorWriter(void *p); + +/// Outputs the sparse tensor rank, nnz and shape. +MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_outSparseTensorWriterMetaData( + void *p, index_type rank, index_type nnz, + StridedMemRefType *dref); + +/// Outputs an element for the sparse tensor. +#define IMPL_OUTNEXT(VNAME, V) \ + MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_outSparseTensorWriterNext##VNAME( \ + void *p, index_type rank, StridedMemRefType *iref, \ + V value); +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_OUTNEXT) +#undef IMPL_OUTNEXT + } // extern "C" #endif // MLIR_EXECUTIONENGINE_SPARSETENSORUTILS_H diff --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp @@ -533,6 +533,108 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR) #undef IMPL_CONVERTFROMMLIRSPARSETENSOR +void *createSparseTensorReader(char *filename) { + SparseTensorReader *stfile = new SparseTensorReader(filename); + stfile->openFile(); + stfile->readHeader(); + return static_cast(stfile); +} + +index_type getSparseTensorReaderRank(void *p) { + return static_cast(p)->getRank(); +} + +bool getSparseTensorReaderIsSymmetric(void *p) { + return static_cast(p)->isSymmetric(); +} + +index_type getSparseTensorReaderNNZ(void *p) { + return static_cast(p)->getNNZ(); +} + +index_type getSparseTensorReaderDimSize(void *p, index_type d) { + return static_cast(p)->getDimSize(d); +} + +void _mlir_ciface_getSparseTensorReaderDimSizes( + void *p, StridedMemRefType *dref) { + assert(p && dref); + assert(dref->strides[0] == 1); + index_type *dimSizes = dref->data + dref->offset; + SparseTensorReader &file = *static_cast(p); + const index_type *sizes = file.getDimSizes(); + index_type rank = file.getRank(); + for (uint64_t r = 0; r < rank; ++r) + dimSizes[r] = sizes[r]; +} + +void delSparseTensorReader(void *p) { + delete static_cast(p); +} + +#define IMPL_GETNEXT(VNAME, V) \ + V _mlir_ciface_getSparseTensorReaderNext##VNAME( \ + void *p, StridedMemRefType *iref) { \ + assert(p &&iref); \ + assert(iref->strides[0] == 1); \ + index_type *indices = iref->data + iref->offset; \ + SparseTensorReader *stfile = static_cast(p); \ + index_type rank = stfile->getRank(); \ + char *linePtr = stfile->readLine(); \ + for (uint64_t r = 0; r < rank; ++r) { \ + uint64_t idx = strtoul(linePtr, &linePtr, 10); \ + indices[r] = idx - 1; \ + } \ + return detail::readCOOValue(&linePtr, stfile->isPattern()); \ + } +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT) +#undef IMPL_GETNEXT + +typedef std::ostream SparseTensorWriter; + +void *createSparseTensorWriter(char *filename) { + SparseTensorWriter *file = + (filename[0] == 0) ? &std::cout : new std::ofstream(filename); + *file << "# extended FROSTT format\n"; + return static_cast(file); +} + +void delSparseTensorWriter(void *p) { + SparseTensorWriter *file = static_cast(p); + file->flush(); + assert(file->good()); + if (file != &std::cout) + delete file; +} + +void _mlir_ciface_outSparseTensorWriterMetaData( + void *p, index_type rank, index_type nnz, + StridedMemRefType *dref) { + assert(p && dref); + assert(dref->strides[0] == 1); + index_type *dimSizes = dref->data + dref->offset; + SparseTensorWriter &file = *static_cast(p); + file << rank << " " << nnz << std::endl; + for (uint64_t r = 0; r < rank - 1; ++r) + file << dimSizes[r] << " "; + file << dimSizes[rank - 1] << std::endl; +} + +#define IMPL_OUTNEXT(VNAME, V) \ + void _mlir_ciface_outSparseTensorWriterNext##VNAME( \ + void *p, index_type rank, StridedMemRefType *iref, \ + V value) { \ + assert(p &&iref); \ + assert(iref->strides[0] == 1); \ + index_type *indices = iref->data + iref->offset; \ + SparseTensorWriter &file = *static_cast(p); \ + for (uint64_t r = 0; r < rank; ++r) \ + file << (indices[r] + 1) << " "; \ + file << value << std::endl; \ + } +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_OUTNEXT) +#undef IMPL_OUTNEXT + } // extern "C" #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir @@ -0,0 +1,191 @@ +// RUN: mlir-opt %s --sparse-compiler | \ +// RUN: TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// RUN: TENSOR1="" \ +// RUN: mlir-cpu-runner \ +// RUN: -e entry -entry-point-result=void \ +// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s + +!Filename = !llvm.ptr +!TensorReader = !llvm.ptr +!TensorWriter = !llvm.ptr + +module { + + func.func private @getTensorFilename(index) -> (!Filename) + + func.func private @createSparseTensorReader(!Filename) -> (!TensorReader) + func.func private @delSparseTensorReader(!TensorReader) -> () + func.func private @getSparseTensorReaderRank(!TensorReader) -> (index) + func.func private @getSparseTensorReaderNNZ(!TensorReader) -> (index) + func.func private @getSparseTensorReaderIsSymmetric(!TensorReader) -> (i1) + func.func private @getSparseTensorReaderDimSizes(!TensorReader, memref) + -> () attributes { llvm.emit_c_interface } + func.func private @getSparseTensorReaderNextF32(!TensorReader, memref) + -> (f32) attributes { llvm.emit_c_interface } + + func.func private @createSparseTensorWriter(!Filename) -> (!TensorWriter) + func.func private @delSparseTensorWriter(!TensorWriter) + func.func private @outSparseTensorWriterMetaData(!TensorWriter, index, index, + memref) -> () attributes { llvm.emit_c_interface } + func.func private @outSparseTensorWriterNextF32(!TensorWriter, index, + memref, f32) -> () attributes { llvm.emit_c_interface } + + func.func @dumpi(%arg0: memref) { + %c0 = arith.constant 0 : index + %v = vector.transfer_read %arg0[%c0], %c0: memref, vector<17xindex> + vector.print %v : vector<17xindex> + return + } + + func.func @dumpf(%arg0: memref) { + %c0 = arith.constant 0 : index + %d0 = arith.constant 0.0 : f32 + %v = vector.transfer_read %arg0[%c0], %d0: memref, vector<17xf32> + vector.print %v : vector<17xf32> + return + } + + // Returns the indices and values of the tensor. + func.func @readTensorFile(%tensor: !TensorReader) + -> (memref, memref, memref) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + + %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index + %nnz = call @getSparseTensorReaderNNZ(%tensor) : (!TensorReader) -> index + + // Assume rank == 2. + %x0s = memref.alloc(%nnz) : memref + %x1s = memref.alloc(%nnz) : memref + %vs = memref.alloc(%nnz) : memref + %indices = memref.alloc(%rank) : memref + scf.for %i = %c0 to %nnz step %c1 { + %v = func.call @getSparseTensorReaderNextF32(%tensor, %indices) + : (!TensorReader, memref) -> f32 + memref.store %v, %vs[%i] : memref + %i0 = memref.load %indices[%c0] : memref + memref.store %i0, %x0s[%i] : memref + %i1 = memref.load %indices[%c1] : memref + memref.store %i1, %x1s[%i] : memref + } + + // Release the resource for the indices. + memref.dealloc %indices : memref + return %x0s, %x1s, %vs : memref, memref, memref + } + + // Reads a COO tensor from the given file name and prints its content. + func.func @readTensorFileAndDump(%fileName: !Filename) { + %tensor = call @createSparseTensorReader(%fileName) + : (!Filename) -> (!TensorReader) + %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index + vector.print %rank : index + %nnz = call @getSparseTensorReaderNNZ(%tensor) : (!TensorReader) -> index + vector.print %nnz : index + %symmetric = call @getSparseTensorReaderIsSymmetric(%tensor) + : (!TensorReader) -> i1 + vector.print %symmetric : i1 + %dimSizes = memref.alloc(%rank) : memref + func.call @getSparseTensorReaderDimSizes(%tensor, %dimSizes) + : (!TensorReader, memref) -> () + call @dumpi(%dimSizes) : (memref) -> () + %x0s, %x1s, %vs = call @readTensorFile(%tensor) + : (!TensorReader) -> (memref, memref, memref) + + call @dumpi(%x0s) : (memref) -> () + call @dumpi(%x1s) : (memref) -> () + call @dumpf(%vs) : (memref) -> () + + // Release the resources. + call @delSparseTensorReader(%tensor) : (!TensorReader) -> () + memref.dealloc %dimSizes : memref + memref.dealloc %x0s : memref + memref.dealloc %x1s : memref + memref.dealloc %vs : memref + + return + } + + // Reads a COO tensor from a file with fileName0 and writes its content to + // another file with fileName1. + func.func @createTensorFileFrom(%fileName0: !Filename, %fileName1: !Filename) { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + + %tensor0 = call @createSparseTensorReader(%fileName0) + : (!Filename) -> (!TensorReader) + %tensor1 = call @createSparseTensorWriter(%fileName1) + : (!Filename) -> (!TensorWriter) + + %rank = call @getSparseTensorReaderRank(%tensor0) : (!TensorReader) -> index + %nnz = call @getSparseTensorReaderNNZ(%tensor0) : (!TensorReader) -> index + %dimSizes = memref.alloc(%rank) : memref + func.call @getSparseTensorReaderDimSizes(%tensor0,%dimSizes) + : (!TensorReader, memref) -> () + call @outSparseTensorWriterMetaData(%tensor1, %rank, %nnz, %dimSizes) + : (!TensorWriter, index, index, memref) -> () + + //TODO: handle isSymmetric. + // Assume rank == 2. + %indices = memref.alloc(%rank) : memref + scf.for %i = %c0 to %nnz step %c1 { + %v = func.call @getSparseTensorReaderNextF32(%tensor0, %indices) + : (!TensorReader, memref) -> f32 + func.call @outSparseTensorWriterNextF32(%tensor1, %rank, %indices, %v) + : (!TensorWriter, index, memref, f32) -> () + } + + // Release the resources. + call @delSparseTensorReader(%tensor0) : (!TensorReader) -> () + call @delSparseTensorWriter(%tensor1) : (!TensorWriter) -> () + memref.dealloc %dimSizes : memref + memref.dealloc %indices : memref + + return + } + + func.func @entry() { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %fileName0 = call @getTensorFilename(%c0) : (index) -> (!Filename) + %fileName1 = call @getTensorFilename(%c1) : (index) -> (!Filename) + + // Write the sparse tensor data from file through the SparseTensorReader and + // print the data. + // CHECK: 2 + // CHECK: 17 + // CHECK: 0 + // CHECK: ( 4, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) + // CHECK: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 ) + // CHECK: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255 ) + // CHECK: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17 ) + call @readTensorFileAndDump(%fileName0) : (!Filename) -> () + + // Write the sparse tensor data to std::cout through the SparseTensorWriter. + // CHECK: # extended FROSTT format + // CHECK: 2 17 + // CHECK: 4 256 + // CHECK: 1 1 -1 + // CHECK: 1 127 2 + // CHECK: 1 128 -3 + // CHECK: 1 255 4 + // CHECK: 2 2 -5 + // CHECK: 2 254 6 + // CHECK: 3 3 -7 + // CHECK: 4 1 8 + // CHECK: 4 2 -9 + // CHECK: 4 4 10 + // CHECK: 4 99 -11 + // CHECK: 4 127 12 + // CHECK: 4 128 -13 + // CHECK: 4 129 14 + // CHECK: 4 250 -15 + // CHECK: 4 254 16 + // CHECK: 4 256 -17 + call @createTensorFileFrom(%fileName0, %fileName1) + : (!Filename, !Filename) -> () + + return + } +}