diff --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp @@ -56,18 +56,19 @@ /// ({i}, a[i]) /// and a rank-5 tensor element like /// ({i,j,k,l,m}, a[i,j,k,l,m]) +template struct Element { - Element(const std::vector &ind, double val) - : indices(ind), value(val){}; + Element(const std::vector &ind, V val) : indices(ind), value(val){}; std::vector indices; - double value; + V value; }; /// A memory-resident sparse tensor in coordinate scheme (collection of /// elements). This data structure is used to read a sparse tensor from -/// external file format into memory and sort the elements lexicographically +/// any external format into memory and sort the elements lexicographically /// by indices before passing it back to the client (most packed storage /// formats require the elements to appear in lexicographic index order). +template struct SparseTensor { public: SparseTensor(const std::vector &szs, uint64_t capacity) @@ -75,26 +76,26 @@ elements.reserve(capacity); } /// Adds element as indices and value. - void add(const std::vector &ind, double val) { + void add(const std::vector &ind, V val) { assert(getRank() == ind.size()); for (int64_t r = 0, rank = getRank(); r < rank; r++) assert(ind[r] < sizes[r]); // within bounds - elements.emplace_back(Element(ind, val)); + elements.emplace_back(Element(ind, val)); } /// Sorts elements lexicographically by index. void sort() { std::sort(elements.begin(), elements.end(), lexOrder); } /// Primitive one-time iteration. - const Element &next() { return elements[pos++]; } + const Element &next() { return elements[pos++]; } /// Returns rank. uint64_t getRank() const { return sizes.size(); } /// Getter for sizes array. const std::vector &getSizes() const { return sizes; } /// Getter for elements array. - const std::vector &getElements() const { return elements; } + const std::vector> &getElements() const { return elements; } private: /// Returns true if indices of e1 < indices of e2. - static bool lexOrder(const Element &e1, const Element &e2) { + static bool lexOrder(const Element &e1, const Element &e2) { assert(e1.indices.size() == e2.indices.size()); for (int64_t r = 0, rank = e1.indices.size(); r < rank; r++) { if (e1.indices[r] == e2.indices[r]) @@ -104,7 +105,7 @@ return false; } std::vector sizes; // per-rank dimension sizes - std::vector elements; + std::vector> elements; uint64_t pos; }; @@ -150,12 +151,12 @@ /// each differently annotated sparse tensor, this method provides a convenient /// "one-size-fits-all" solution that simply takes an input tensor and /// annotations to implement all required setup in a general manner. -template +template class SparseTensorStorage : public SparseTensorStorageBase { public: /// Constructs sparse tensor storage scheme following the given /// per-rank dimension dense/sparse annotations. - SparseTensorStorage(SparseTensor *tensor, uint8_t *sparsity) + SparseTensorStorage(SparseTensor *tensor, uint8_t *sparsity) : sizes(tensor->getSizes()), pointers(getRank()), indices(getRank()) { // Provide hints on capacity. // TODO: needs fine-tuning based on sparsity @@ -195,12 +196,12 @@ /// representation of an external sparse tensor. This method prepares /// the pointers and indices arrays under the given per-rank dimension /// dense/sparse annotations. - void traverse(SparseTensor *tensor, uint8_t *sparsity, uint64_t lo, + void traverse(SparseTensor *tensor, uint8_t *sparsity, uint64_t lo, uint64_t hi, uint64_t d) { - const std::vector &elements = tensor->getElements(); + const std::vector> &elements = tensor->getElements(); // Once dimensions are exhausted, insert the numerical values. if (d == getRank()) { - values.push_back(lo < hi ? elements[lo].value : 0.0); + values.push_back(lo < hi ? elements[lo].value : 0); return; } // Prepare a sparse pointer structure at this dimension. @@ -320,8 +321,9 @@ } /// Reads a sparse tensor with the given filename into a memory-resident -/// sparse tensor in coordinate scheme. -static SparseTensor *openTensor(char *filename, uint64_t *perm) { +/// sparse tensor in coordinate scheme. The external formats always store +/// the numerical values with the type double. +static SparseTensor *openTensor(char *filename, uint64_t *perm) { // Open the file. FILE *file = fopen(filename, "r"); if (!file) { @@ -345,7 +347,7 @@ std::vector indices(rank); for (uint64_t r = 0; r < rank; r++) indices[perm[r]] = idata[2 + r]; - SparseTensor *tensor = new SparseTensor(indices, nnz); + SparseTensor *tensor = new SparseTensor(indices, nnz); // Read all nonzero elements. for (uint64_t k = 0; k < nnz; k++) { uint64_t idx = -1; @@ -374,10 +376,10 @@ template void *newSparseTensor(char *filename, uint8_t *sparsity, uint64_t *perm, uint64_t size) { - SparseTensor *t = openTensor(filename, perm); + SparseTensor *t = openTensor(filename, perm); assert(size == t->getRank()); // sparsity array must match rank SparseTensorStorageBase *tensor = - new SparseTensorStorage(t, sparsity); + new SparseTensorStorage(t, sparsity); delete t; return tensor; } @@ -521,8 +523,6 @@ exit(1); } -#undef CASE - uint64_t sparseDimSize(void *tensor, uint64_t d) { return static_cast(tensor)->getDimSize(d); }