diff --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp @@ -149,6 +149,8 @@ /// the given ordering and expects subsequent add() calls to honor /// that same ordering for the given indices. The result is a /// fully permuted coordinate scheme. + /// + /// Precondition: `sizes` and `perm` must be valid for `rank`. static SparseTensorCOO *newSparseTensorCOO(uint64_t rank, const uint64_t *sizes, const uint64_t *perm, @@ -168,12 +170,65 @@ unsigned iteratorPos; }; -/// Abstract base class of sparse tensor storage. Note that we use -/// function overloading to implement "partial" method specialization. +/// Abstract base class for `SparseTensorStorage`. This class +/// takes responsibility for all the ``-independent aspects +/// of the tensor (e.g., shape, sparsity, permutation). In addition, +/// we use function overloading to implement "partial" method +/// specialization, which the C-API relies on to catch type errors +/// arising from our use of opaque pointers. class SparseTensorStorageBase { public: - /// Dimension size query. - virtual uint64_t getDimSize(uint64_t) const = 0; + /// Constructs a new storage object. The `perm` maps the tensor's + /// semantic-ordering of dimensions to this object's storage-order. + /// The `szs` and `sparsity` arrays are already in storage-order. + /// + /// Precondition: `perm` and `sparsity` must be valid for `szs.size()`. + SparseTensorStorageBase(const std::vector &szs, + const uint64_t *perm, const DimLevelType *sparsity) + : dimSizes(szs), rev(getRank()), + dimTypes(sparsity, sparsity + getRank()) { + const uint64_t rank = getRank(); + // Validate parameters. + assert(rank > 0 && "Trivial shape is unsupported"); + for (uint64_t r = 0; r < rank; r++) { + assert(dimSizes[r] > 0 && "Dimension size zero has trivial storage"); + assert((dimTypes[r] == DimLevelType::kDense || + dimTypes[r] == DimLevelType::kCompressed) && + "Unsupported DimLevelType"); + } + // Construct the "reverse" (i.e., inverse) permutation. + for (uint64_t r = 0; r < rank; r++) + rev[perm[r]] = r; + } + + virtual ~SparseTensorStorageBase() = default; + + /// Get the rank of the tensor. + inline uint64_t getRank() const { return dimSizes.size(); } + + /// Getter for the dimension-sizes array, in storage-order. + inline const std::vector &getDimSizes() const { return dimSizes; } + + /// Safely lookup the size of the given (storage-order) dimension. + inline uint64_t getDimSize(uint64_t d) const { + assert(d < getRank()); + return dimSizes[d]; + } + + /// Getter for the "reverse" permutation, which maps this object's + /// storage-order to the tensor's semantic-order. + inline const std::vector &getRev() const { return rev; } + + /// Getter for the dimension-types array, in storage-order. + inline const std::vector &getDimTypes() const { + return dimTypes; + } + + /// Safely check if the (storage-order) dimension uses compressed storage. + inline bool isCompressedDim(uint64_t d) const { + assert(d < getRank()); + return (dimTypes[d] == DimLevelType::kCompressed); + } /// Overhead storage. virtual void getPointers(std::vector **, uint64_t) { fatal("p64"); } @@ -224,13 +279,15 @@ /// Finishes insertion. virtual void endInsert() = 0; - virtual ~SparseTensorStorageBase() = default; - private: static void fatal(const char *tp) { fprintf(stderr, "unsupported %s\n", tp); exit(1); } + + const std::vector dimSizes; + std::vector rev; + const std::vector dimTypes; }; /// A memory-resident sparse tensor using a storage scheme based on @@ -245,44 +302,37 @@ /// Constructs a sparse tensor storage scheme with the given dimensions, /// permutation, and per-dimension dense/sparse annotations, using /// the coordinate scheme tensor for the initial contents if provided. + /// + /// Precondition: `perm` and `sparsity` must be valid for `szs.size()`. SparseTensorStorage(const std::vector &szs, const uint64_t *perm, const DimLevelType *sparsity, - SparseTensorCOO *tensor = nullptr) - : sizes(szs), rev(getRank()), idx(getRank()), pointers(getRank()), - indices(getRank()) { - uint64_t rank = getRank(); - // Store "reverse" permutation. - for (uint64_t r = 0; r < rank; r++) - rev[perm[r]] = r; + SparseTensorCOO *coo = nullptr) + : SparseTensorStorageBase(szs, perm, sparsity), pointers(getRank()), + indices(getRank()), idx(getRank()) { + const uint64_t rank = getRank(); // Provide hints on capacity of pointers and indices. // TODO: needs fine-tuning based on sparsity bool allDense = true; uint64_t sz = 1; for (uint64_t r = 0; r < rank; r++) { - assert(sizes[r] > 0 && "Dimension size zero has trivial storage"); - assert(sizes[r] <= std::numeric_limits::max() / sz); - sz *= sizes[r]; - if (sparsity[r] == DimLevelType::kCompressed) { + const uint64_t sz_r = getDimSizes()[r]; + assert(sz_r <= std::numeric_limits::max() / sz); + sz *= sz_r; + if (isCompressedDim(r)) { pointers[r].reserve(sz + 1); + pointers[r].push_back(0); indices[r].reserve(sz); sz = 1; allDense = false; - // Prepare the pointer structure. We cannot use `appendPointer` - // here, because `isCompressedDim` won't work until after this - // preparation has been done. - pointers[r].push_back(0); - } else { - assert(sparsity[r] == DimLevelType::kDense && - "singleton not yet supported"); } } // Then assign contents from coordinate scheme tensor if provided. - if (tensor) { + if (coo) { // Ensure both preconditions of `fromCOO`. - assert(tensor->getSizes() == sizes && "Tensor size mismatch"); - tensor->sort(); + assert(coo->getSizes() == getDimSizes() && "Tensor size mismatch"); + coo->sort(); // Now actually insert the `elements`. - const std::vector> &elements = tensor->getElements(); + const std::vector> &elements = coo->getElements(); uint64_t nnz = elements.size(); values.reserve(nnz); fromCOO(elements, 0, nnz, 0); @@ -293,15 +343,6 @@ ~SparseTensorStorage() override = default; - /// Get the rank of the tensor. - uint64_t getRank() const { return sizes.size(); } - - /// Get the size of the given dimension of the tensor. - uint64_t getDimSize(uint64_t d) const override { - assert(d < getRank()); - return sizes[d]; - } - /// Partially specialize these getter methods based on template types. void getPointers(std::vector

**out, uint64_t d) override { assert(d < getRank()); @@ -366,14 +407,18 @@ /// Returns this sparse tensor storage scheme as a new memory-resident /// sparse tensor in coordinate scheme with the given dimension order. + /// + /// Precondition: `perm` must be valid for `getRank()`. SparseTensorCOO *toCOO(const uint64_t *perm) { // Restore original order of the dimension sizes and allocate coordinate // scheme with desired new ordering specified in perm. - uint64_t rank = getRank(); + const uint64_t rank = getRank(); + const auto &rev = getRev(); + const auto &sizes = getDimSizes(); std::vector orgsz(rank); for (uint64_t r = 0; r < rank; r++) orgsz[rev[r]] = sizes[r]; - SparseTensorCOO *tensor = SparseTensorCOO::newSparseTensorCOO( + SparseTensorCOO *coo = SparseTensorCOO::newSparseTensorCOO( rank, orgsz.data(), perm, values.size()); // Populate coordinate scheme restored from old ordering and changed with // new ordering. Rather than applying both reorderings during the recursion, @@ -381,9 +426,9 @@ std::vector reord(rank); for (uint64_t r = 0; r < rank; r++) reord[r] = perm[rev[r]]; - toCOO(*tensor, reord, 0, 0); - assert(tensor->getElements().size() == values.size()); - return tensor; + toCOO(*coo, reord, 0, 0); + assert(coo->getElements().size() == values.size()); + return coo; } /// Factory method. Constructs a sparse tensor storage scheme with the given @@ -391,16 +436,18 @@ /// using the coordinate scheme tensor for the initial contents if provided. /// In the latter case, the coordinate scheme must respect the same /// permutation as is desired for the new sparse tensor storage. + /// + /// Precondition: `shape`, `perm`, and `sparsity` must be valid for `rank`. static SparseTensorStorage * newSparseTensor(uint64_t rank, const uint64_t *shape, const uint64_t *perm, - const DimLevelType *sparsity, SparseTensorCOO *tensor) { + const DimLevelType *sparsity, SparseTensorCOO *coo) { SparseTensorStorage *n = nullptr; - if (tensor) { - assert(tensor->getRank() == rank); + if (coo) { + assert(coo->getRank() == rank && "Tensor rank mismatch"); + const auto &coosz = coo->getSizes(); for (uint64_t r = 0; r < rank; r++) - assert(shape[r] == 0 || shape[r] == tensor->getSizes()[perm[r]]); - n = new SparseTensorStorage(tensor->getSizes(), perm, sparsity, - tensor); + assert(shape[r] == 0 || shape[r] == coosz[perm[r]]); + n = new SparseTensorStorage(coosz, perm, sparsity, coo); } else { std::vector permsz(rank); for (uint64_t r = 0; r < rank; r++) { @@ -501,7 +548,9 @@ } } else { // Dense dimension. - for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) { + const uint64_t sz = getDimSizes()[d]; + const uint64_t off = pos * sz; + for (uint64_t i = 0; i < sz; i++) { idx[reord[d]] = i; toCOO(tensor, reord, off + i, d + 1); } @@ -515,7 +564,7 @@ if (isCompressedDim(d)) { appendPointer(d, indices[d].size(), count); } else { // Dense dimension. - const uint64_t sz = sizes[d]; + const uint64_t sz = getDimSizes()[d]; assert(sz >= full && "Segment is overfull"); // Assuming we check for overflows in the constructor, then this // multiply will never overflow. @@ -565,19 +614,11 @@ return -1u; } - /// Returns true if dimension is compressed. - inline bool isCompressedDim(uint64_t d) const { - assert(d < getRank()); - return (!pointers[d].empty()); - } - private: - const std::vector sizes; // per-dimension sizes - std::vector rev; // "reverse" permutation - std::vector idx; // index cursor std::vector> pointers; std::vector> indices; std::vector values; + std::vector idx; // index cursor for lexicographic insertion. }; /// Helper to convert string to lower case.