diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h @@ -125,9 +125,6 @@ /// Constructs a new SparseTensorReader object, opens the file, reads the /// header, and validates that the actual contents of the file match /// the expected `dimShapeRef` and `valTp`. -// -// FIXME: update `SparseTensorCodegenPass` to use -// `_mlir_ciface_createCheckedSparseTensorReader` instead. MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_createCheckedSparseTensorReader( char *filename, StridedMemRefType *dimShapeRef, PrimaryType valTp); @@ -142,14 +139,6 @@ StridedMemRefType *dim2lvlRef, OverheadType posTp, OverheadType crdTp, PrimaryType valTp); -/// SparseTensorReader method to copy the dimension-sizes into the -/// provided memref. -// -// FIXME: update `SparseTensorCodegenPass` to use -// `_mlir_ciface_getSparseTensorReaderDimSizes` instead. -MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_copySparseTensorReaderDimSizes( - void *p, StridedMemRefType *dref); - /// SparseTensorReader method to obtain direct access to the /// dimension-sizes array. MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_getSparseTensorReaderDimSizes( @@ -168,7 +157,7 @@ /// sorted. #define DECL_GETNEXT(VNAME, V, CNAME, C) \ MLIR_CRUNNERUTILS_EXPORT bool \ - _mlir_ciface_getSparseTensorReaderRead##CNAME##VNAME( \ + _mlir_ciface_getSparseTensorReaderReadToBuffers##CNAME##VNAME( \ void *p, StridedMemRefType *dim2lvlRef, \ StridedMemRefType *iref, StridedMemRefType *vref) \ MLIR_SPARSETENSOR_FOREVERY_V_O(DECL_GETNEXT) @@ -286,11 +275,6 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR) #undef DECL_CONVERTFROMMLIRSPARSETENSOR -/// Creates a SparseTensorReader for reading a sparse tensor from a file with -/// the given file name. This opens the file and read the header meta data based -/// of the sparse tensor format derived from the suffix of the file name. -MLIR_CRUNNERUTILS_EXPORT void *createSparseTensorReader(char *filename); - /// Returns the rank of the sparse tensor being read. MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderRank(void *p); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -1372,7 +1372,7 @@ return failure(); // Implement the NewOp(filename) as follows: - // %reader = @getSparseTensorReader(%filename) + // %reader = @createCheckedSparseTensorReader(%filename) // %nse = @getSparseTensorNSE(%reader) // %coo = bufferization.alloc_tensor an ordered COO with // dst dim ordering, size_hint = %nse @@ -1383,15 +1383,23 @@ // update storage specifier // @delSparseTensorReader(%reader) - // Create a sparse tensor reader. - const Value fileName = op.getSource(); + // Allocate `SparseTensorReader` and perform all initial setup that + // does not depend on lvlSizes (nor dimToLvl, lvlToDim, etc). const Type opaqueTp = getOpaquePointerType(rewriter); - // FIXME: use `createCheckedSparseTensorReader` instead, because - // `createSparseTensorReader` is unsafe. - Value reader = createFuncCall(rewriter, loc, "createSparseTensorReader", - {opaqueTp}, {fileName}, EmitCInterface::Off) - .getResult(0); - + const Value fileName = op.getSource(); + SmallVector dimShapeValues; + for (const DynSize sh : dstTp.getDimShape()) { + const auto s = ShapedType::isDynamic(sh) ? 0 : sh; + dimShapeValues.push_back(constantIndex(rewriter, loc, s)); + } + Value dimShapeBuffer = allocaBuffer(rewriter, loc, dimShapeValues); + Value valTp = + constantPrimaryTypeEncoding(rewriter, loc, dstTp.getElementType()); + Value reader = + createFuncCall(rewriter, loc, "createCheckedSparseTensorReader", + opaqueTp, {fileName, dimShapeBuffer, valTp}, + EmitCInterface::On) + .getResult(0); const Type indexTp = rewriter.getIndexType(); const Dimension dimRank = dstTp.getDimRank(); const Level lvlRank = dstTp.getLvlRank(); @@ -1400,18 +1408,18 @@ // the sparse tensor reader. SmallVector dynSizes; if (dstTp.hasDynamicDimShape()) { - // FIXME: call `getSparseTensorReaderDimSizes` instead, because - // `copySparseTensorReaderDimSizes` copies the memref over, - // instead of just accessing the reader's memory directly. - Value dimSizes = genAlloca(rewriter, loc, dimRank, indexTp); - createFuncCall(rewriter, loc, "copySparseTensorReaderDimSizes", {}, - {reader, dimSizes}, EmitCInterface::On); + auto memTp = MemRefType::get({ShapedType::kDynamic}, indexTp); + Value dimSizesBuffer = + createFuncCall(rewriter, loc, "getSparseTensorReaderDimSizes", memTp, + reader, EmitCInterface::On) + .getResult(0); for (const auto &d : llvm::enumerate(dstTp.getDimShape())) if (ShapedType::isDynamic(d.value())) dynSizes.push_back(rewriter.create( - loc, dimSizes, constantIndex(rewriter, loc, d.index()))); + loc, dimSizesBuffer, constantIndex(rewriter, loc, d.index()))); } + // Get the number of stored entries. Value nse = createFuncCall(rewriter, loc, "getSparseTensorReaderNSE", {indexTp}, {reader}, EmitCInterface::Off) .getResult(0); @@ -1422,10 +1430,6 @@ MutSparseTensorDescriptor desc(dstTp, fields); // Construct the `dimToLvl` buffer for handing off to the runtime library. - // FIXME: This code is (mostly) copied from the SparseTensorConversion.cpp - // handling of `NewOp`, and only handles permutations. Fixing this - // requires waiting for wrengr to finish redoing the CL that handles - // all dim<->lvl stuff more robustly. SmallVector dimToLvlValues(dimRank); if (!dstTp.isIdentity()) { const auto dimToLvl = dstTp.getDimToLvl(); @@ -1449,9 +1453,7 @@ const Type boolTp = rewriter.getIntegerType(1); const Type elemTp = dstTp.getElementType(); const Type crdTp = dstTp.getCrdType(); - // FIXME: This function name is weird; should rename to - // "sparseTensorReaderReadToBuffers". - SmallString<32> readToBuffersFuncName{"getSparseTensorReaderRead", + SmallString<32> readToBuffersFuncName{"getSparseTensorReaderReadToBuffers", overheadTypeFunctionSuffix(crdTp), primaryTypeFunctionSuffix(elemTp)}; Value isSorted = diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp @@ -605,21 +605,6 @@ return static_cast(reader); } -// FIXME: update `SparseTensorCodegenPass` to use -// `_mlir_ciface_getSparseTensorReaderDimSizes` instead. -void _mlir_ciface_copySparseTensorReaderDimSizes( - void *p, StridedMemRefType *dimSizesRef) { - assert(p); - SparseTensorReader &reader = *static_cast(p); - ASSERT_NO_STRIDE(dimSizesRef); - const uint64_t dimRank = MEMREF_GET_USIZE(dimSizesRef); - ASSERT_USIZE_EQ(dimSizesRef, reader.getRank()); - index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef); - const index_type *fileSizes = reader.getDimSizes(); - for (uint64_t d = 0; d < dimRank; ++d) - dimSizes[d] = fileSizes[d]; -} - void _mlir_ciface_getSparseTensorReaderDimSizes( StridedMemRefType *out, void *p) { assert(out && p); @@ -643,10 +628,8 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT) #undef IMPL_GETNEXT -// FIXME: This function name is weird; should rename to -// "sparseTensorReaderReadToBuffers". #define IMPL_GETNEXT(VNAME, V, CNAME, C) \ - bool _mlir_ciface_getSparseTensorReaderRead##CNAME##VNAME( \ + bool _mlir_ciface_getSparseTensorReaderReadToBuffers##CNAME##VNAME( \ void *p, StridedMemRefType *dim2lvlRef, \ StridedMemRefType *cref, StridedMemRefType *vref) { \ assert(p); \ @@ -694,9 +677,6 @@ const DimLevelType *lvlTypes = MEMREF_GET_PAYLOAD(lvlTypesRef); const index_type *lvl2dim = MEMREF_GET_PAYLOAD(lvl2dimRef); const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef); - // - // FIXME(wrengr): Really need to define a separate x-macro for handling - // all this. (Or ideally some better, entirely-different approach) #define CASE(p, c, v, P, C, V) \ if (posTp == OverheadType::p && crdTp == OverheadType::c && \ valTp == PrimaryType::v) \ @@ -907,15 +887,6 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR) #undef IMPL_CONVERTFROMMLIRSPARSETENSOR -// FIXME: update `SparseTensorCodegenPass` to use -// `_mlir_ciface_createCheckedSparseTensorReader` instead. -void *createSparseTensorReader(char *filename) { - SparseTensorReader *reader = new SparseTensorReader(filename); - reader->openFile(); - reader->readHeader(); - return static_cast(reader); -} - index_type getSparseTensorReaderRank(void *p) { return static_cast(p)->getRank(); } diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -685,12 +685,15 @@ // CHECK-DAG: %[[A2:.*]] = arith.constant 1 : index // CHECK-DAG: %[[A3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[A4:.*]] = arith.constant 2 : index -// CHECK: %[[A5:.*]] = call @createSparseTensorReader(%[[A0]]) -// CHECK: %[[A6:.*]] = memref.alloca() : memref<2xindex> -// CHECK: %[[A7:.*]] = memref.cast %[[A6]] : memref<2xindex> to memref -// CHECK: call @copySparseTensorReaderDimSizes(%[[A5]], %[[A7]]) : (!llvm.ptr, memref) -> () -// CHECK: %[[A8:.*]] = memref.load %[[A6]]{{\[}}%[[A3]]] : memref<2xindex> -// CHECK: %[[A9:.*]] = memref.load %[[A6]]{{\[}}%[[A2]]] : memref<2xindex> +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : i32 +// CHECK: %[[D0:.*]] = memref.alloca() : memref<2xindex> +// CHECK: %[[D1:.*]] = memref.cast %[[D0]] : memref<2xindex> to memref +// CHECK: memref.store %[[A3]], %[[D0]][%[[A3]]] : memref<2xindex +// CHECK: memref.store %[[A3]], %[[D0]][%[[A2]]] : memref<2xindex> +// CHECK: %[[A5:.*]] = call @createCheckedSparseTensorReader(%[[A0]], %[[D1]], %[[C2]]) +// CHECK: %[[D2:.*]] = call @getSparseTensorReaderDimSizes(%0) : (!llvm.ptr) -> memref +// CHECK: %[[A8:.*]] = memref.load %[[D2]]{{\[}}%[[A3]]] : memref +// CHECK: %[[A9:.*]] = memref.load %[[D2]]{{\[}}%[[A2]]] : memref // CHECK: %[[A10:.*]] = call @getSparseTensorReaderNSE(%[[A5]]) // CHECK: %[[A11:.*]] = arith.muli %[[A10]], %[[A4]] : index // CHECK: %[[A12:.*]] = memref.alloc() : memref<2xindex> @@ -709,7 +712,7 @@ // CHECK: %[[A32:.*]] = memref.cast %[[A31]] : memref<2xindex> to memref // CHECK: memref.store %[[A3]], %[[A31]]{{\[}}%[[A3]]] : memref<2xindex> // CHECK: memref.store %[[A2]], %[[A31]]{{\[}}%[[A2]]] : memref<2xindex> -// CHECK: %[[A33:.*]] = call @getSparseTensorReaderRead0F32(%[[A5]], %[[A32]], %[[A14]], %[[A15]]) +// CHECK: %[[A33:.*]] = call @getSparseTensorReaderReadToBuffers0F32(%[[A5]], %[[A32]], %[[A14]], %[[A15]]) // CHECK: %[[A34:.*]] = arith.cmpi eq, %[[A33]], %[[A1]] : i1 // CHECK: scf.if %[[A34]] { // CHECK: sparse_tensor.sort_coo hybrid_quick_sort %[[A10]], %[[A14]] jointly %[[A15]] {nx = 2 : index, ny = 0 : index} : memref jointly memref @@ -729,12 +732,15 @@ // CHECK-DAG: %[[A1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[A2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[A3:.*]] = arith.constant 2 : index -// CHECK: %[[A4:.*]] = call @createSparseTensorReader(%[[A0]]) -// CHECK: %[[A5:.*]] = memref.alloca() : memref<2xindex> -// CHECK: %[[A6:.*]] = memref.cast %[[A5]] : memref<2xindex> to memref -// CHECK: call @copySparseTensorReaderDimSizes(%[[A4]], %[[A6]]) -// CHECK: %[[A7:.*]] = memref.load %[[A5]]{{\[}}%[[A2]]] : memref<2xindex> -// CHECK: %[[A8:.*]] = memref.load %[[A5]]{{\[}}%[[A1]]] : memref<2xindex> +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : i32 +// CHECK: %[[D0:.*]] = memref.alloca() : memref<2xindex> +// CHECK: %[[D1:.*]] = memref.cast %[[D0]] : memref<2xindex> to memref +// CHECK: memref.store %[[A2]], %[[D0]][%[[A2]]] : memref<2xindex +// CHECK: memref.store %[[A2]], %[[D0]][%[[A1]]] : memref<2xindex> +// CHECK: %[[A4:.*]] = call @createCheckedSparseTensorReader(%[[A0]], %[[D1]], %[[C2]]) +// CHECK: %[[D2:.*]] = call @getSparseTensorReaderDimSizes(%0) : (!llvm.ptr) -> memref +// CHECK: %[[A7:.*]] = memref.load %[[D2]]{{\[}}%[[A2]]] : memref +// CHECK: %[[A8:.*]] = memref.load %[[D2]]{{\[}}%[[A1]]] : memref // CHECK: %[[A9:.*]] = call @getSparseTensorReaderNSE(%[[A4]]) // CHECK: %[[A10:.*]] = arith.muli %[[A9]], %[[A3]] : index // CHECK: %[[A11:.*]] = memref.alloc() : memref<2xindex> @@ -753,7 +759,7 @@ // CHECK: %[[A31:.*]] = memref.cast %[[A30]] : memref<2xindex> to memref // CHECK: memref.store %[[A1]], %[[A30]]{{\[}}%[[A2]]] : memref<2xindex> // CHECK: memref.store %[[A2]], %[[A30]]{{\[}}%[[A1]]] : memref<2xindex> -// CHECK: %[[A32:.*]] = call @getSparseTensorReaderRead0F32(%[[A4]], %[[A31]], %[[A13]], %[[A14]]) +// CHECK: %[[A32:.*]] = call @getSparseTensorReaderReadToBuffers0F32(%[[A4]], %[[A31]], %[[A13]], %[[A14]]) // CHECK: memref.store %[[A9]], %[[A26]]{{\[}}%[[A1]]] : memref // CHECK: %[[A34:.*]] = sparse_tensor.storage_specifier.set %[[A29]] crd_mem_sz at 0 with %[[A10]] // CHECK: %[[A36:.*]] = sparse_tensor.storage_specifier.set %[[A34]] val_mem_sz with %[[A9]] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir deleted file mode 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir +++ /dev/null @@ -1,229 +0,0 @@ -//-------------------------------------------------------------------------------------------------- -// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS. -// -// Set-up that's shared across all tests in this directory. In principle, this -// config could be moved to lit.local.cfg. However, there are downstream users that -// do not use these LIT config files. Hence why this is kept inline. -// -// DEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// DEFINE: %{sparse_compiler_opts_sve} = enable-arm-sve=true %{sparse_compiler_opts} -// DEFINE: %{compile} = mlir-opt %s --sparse-compiler="%{sparse_compiler_opts}" -// DEFINE: %{compile_sve} = mlir-opt %s --sparse-compiler="%{sparse_compiler_opts_sve}" -// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// DEFINE: %{run_opts} = -e entry -entry-point-result=void -// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs} -// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs} -// -// DEFINE: %{env} = -//-------------------------------------------------------------------------------------------------- - -// REDEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" TENSOR1="" -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} - -!Filename = !llvm.ptr -!TensorReader = !llvm.ptr -!TensorWriter = !llvm.ptr - -module { - - func.func private @getTensorFilename(index) -> (!Filename) - - func.func private @createSparseTensorReader(!Filename) -> (!TensorReader) - func.func private @delSparseTensorReader(!TensorReader) -> () - func.func private @getSparseTensorReaderRank(!TensorReader) -> (index) - func.func private @getSparseTensorReaderNSE(!TensorReader) -> (index) - func.func private @getSparseTensorReaderIsSymmetric(!TensorReader) -> (i1) - func.func private @copySparseTensorReaderDimSizes(!TensorReader, - memref) -> () attributes { llvm.emit_c_interface } - func.func private @getSparseTensorReaderRead0F32(!TensorReader, - memref, memref, memref) - -> (i1) attributes { llvm.emit_c_interface } - func.func private @getSparseTensorReaderNextF32(!TensorReader, - memref, memref) -> () attributes { llvm.emit_c_interface } - - func.func private @createSparseTensorWriter(!Filename) -> (!TensorWriter) - func.func private @delSparseTensorWriter(!TensorWriter) - func.func private @outSparseTensorWriterMetaData(!TensorWriter, index, index, - memref) -> () attributes { llvm.emit_c_interface } - func.func private @outSparseTensorWriterNextF32(!TensorWriter, index, - memref, memref) -> () attributes { llvm.emit_c_interface } - - func.func @dumpi(%arg0: memref) { - %c0 = arith.constant 0 : index - %v = vector.transfer_read %arg0[%c0], %c0: memref, vector<17xindex> - vector.print %v : vector<17xindex> - return - } - - func.func @dumpi2(%arg0: memref>) { - %c0 = arith.constant 0 : index - %v = vector.transfer_read %arg0[%c0], %c0 : - memref>, vector<17xindex> - vector.print %v : vector<17xindex> - return - } - - func.func @dumpf(%arg0: memref) { - %c0 = arith.constant 0 : index - %d0 = arith.constant 0.0 : f32 - %v = vector.transfer_read %arg0[%c0], %d0: memref, vector<17xf32> - vector.print %v : vector<17xf32> - return - } - - // Returns the indices and values of the tensor. - func.func @readTensorFile(%tensor: !TensorReader) - -> (memref, memref, i1) { - %c0 = arith.constant 0 : index - %c1 = arith.constant 1 : index - %c2 = arith.constant 2 : index - - %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index - %nse = call @getSparseTensorReaderNSE(%tensor) : (!TensorReader) -> index - - // Assume rank == 2. - %isize = arith.muli %c2, %nse : index - %xs = memref.alloc(%isize) : memref - %vs = memref.alloc(%nse) : memref - %dim2lvl = memref.alloca(%c2) : memref - memref.store %c0, %dim2lvl[%c0] : memref - memref.store %c1, %dim2lvl[%c1] : memref - %isSorted =func.call @getSparseTensorReaderRead0F32(%tensor, %dim2lvl, %xs, %vs) - : (!TensorReader, memref, memref, memref) -> (i1) - return %xs, %vs, %isSorted : memref, memref, i1 - } - - // Reads a COO tensor from the given file name and prints its content. - func.func @readTensorFileAndDump(%fileName: !Filename) { - %c0 = arith.constant 0 : index - %c1 = arith.constant 1 : index - %c2 = arith.constant 2 : index - %tensor = call @createSparseTensorReader(%fileName) - : (!Filename) -> (!TensorReader) - %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index - vector.print %rank : index - %nse = call @getSparseTensorReaderNSE(%tensor) : (!TensorReader) -> index - vector.print %nse : index - %symmetric = call @getSparseTensorReaderIsSymmetric(%tensor) - : (!TensorReader) -> i1 - vector.print %symmetric : i1 - %dimSizes = memref.alloc(%rank) : memref - func.call @copySparseTensorReaderDimSizes(%tensor, %dimSizes) - : (!TensorReader, memref) -> () - call @dumpi(%dimSizes) : (memref) -> () - - %xs, %vs, %isSorted = call @readTensorFile(%tensor) - : (!TensorReader) -> (memref, memref, i1) - %x0s = memref.subview %xs[%c0][%nse][%c2] - : memref to memref> - %x1s = memref.subview %xs[%c1][%nse][%c2] - : memref to memref> - vector.print %isSorted : i1 - call @dumpi2(%x0s) : (memref>) -> () - call @dumpi2(%x1s) : (memref>) -> () - call @dumpf(%vs) : (memref) -> () - - // Release the resources. - call @delSparseTensorReader(%tensor) : (!TensorReader) -> () - memref.dealloc %dimSizes : memref - memref.dealloc %xs : memref - memref.dealloc %vs : memref - - return - } - - // Reads a COO tensor from a file with fileName0 and writes its content to - // another file with fileName1. - func.func @createTensorFileFrom(%fileName0: !Filename, %fileName1: !Filename) { - %c0 = arith.constant 0 : index - %c1 = arith.constant 1 : index - - %tensor0 = call @createSparseTensorReader(%fileName0) - : (!Filename) -> (!TensorReader) - %tensor1 = call @createSparseTensorWriter(%fileName1) - : (!Filename) -> (!TensorWriter) - - %rank = call @getSparseTensorReaderRank(%tensor0) : (!TensorReader) -> index - %nse = call @getSparseTensorReaderNSE(%tensor0) : (!TensorReader) -> index - %dimSizes = memref.alloc(%rank) : memref - func.call @copySparseTensorReaderDimSizes(%tensor0, %dimSizes) - : (!TensorReader, memref) -> () - call @outSparseTensorWriterMetaData(%tensor1, %rank, %nse, %dimSizes) - : (!TensorWriter, index, index, memref) -> () - - //TODO: handle isSymmetric. - // Assume rank == 2. - %indices = memref.alloc(%rank) : memref - %value = memref.alloca() : memref - scf.for %i = %c0 to %nse step %c1 { - func.call @getSparseTensorReaderNextF32(%tensor0, %indices, %value) - : (!TensorReader, memref, memref) -> () - func.call @outSparseTensorWriterNextF32(%tensor1, %rank, %indices, %value) - : (!TensorWriter, index, memref, memref) -> () - } - - // Release the resources. - call @delSparseTensorReader(%tensor0) : (!TensorReader) -> () - call @delSparseTensorWriter(%tensor1) : (!TensorWriter) -> () - memref.dealloc %dimSizes : memref - memref.dealloc %indices : memref - - return - } - - func.func @entry() { - %c0 = arith.constant 0 : index - %c1 = arith.constant 1 : index - %fileName0 = call @getTensorFilename(%c0) : (index) -> (!Filename) - %fileName1 = call @getTensorFilename(%c1) : (index) -> (!Filename) - - // Write the sparse tensor data from file through the SparseTensorReader and - // print the data. - // CHECK: 2 - // CHECK: 17 - // CHECK: 0 - // CHECK: ( 4, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) - // CHECK: 1 - // CHECK: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 ) - // CHECK: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255 ) - // CHECK: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17 ) - call @readTensorFileAndDump(%fileName0) : (!Filename) -> () - - // Write the sparse tensor data to std::cout through the SparseTensorWriter. - // CHECK: # extended FROSTT format - // CHECK: 2 17 - // CHECK: 4 256 - // CHECK: 1 1 -1 - // CHECK: 1 127 2 - // CHECK: 1 128 -3 - // CHECK: 1 255 4 - // CHECK: 2 2 -5 - // CHECK: 2 254 6 - // CHECK: 3 3 -7 - // CHECK: 4 1 8 - // CHECK: 4 2 -9 - // CHECK: 4 4 10 - // CHECK: 4 99 -11 - // CHECK: 4 127 12 - // CHECK: 4 128 -13 - // CHECK: 4 129 14 - // CHECK: 4 250 -15 - // CHECK: 4 254 16 - // CHECK: 4 256 -17 - call @createTensorFileFrom(%fileName0, %fileName1) - : (!Filename, !Filename) -> () - - return - } -}