diff --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp @@ -154,8 +154,10 @@ const uint64_t *perm, uint64_t capacity = 0) { std::vector permsz(rank); - for (uint64_t r = 0; r < rank; r++) + for (uint64_t r = 0; r < rank; r++) { + assert(sizes[r] > 0 && "Dimension size zero has trivial storage"); permsz[perm[r]] = sizes[r]; + } return new SparseTensorCOO(permsz, capacity); } @@ -293,7 +295,7 @@ /// Get the rank of the tensor. uint64_t getRank() const { return sizes.size(); } - /// Get the size in the given dimension of the tensor. + /// Get the size of the given dimension of the tensor. uint64_t getDimSize(uint64_t d) override { assert(d < getRank()); return sizes[d]; @@ -389,20 +391,22 @@ /// In the latter case, the coordinate scheme must respect the same /// permutation as is desired for the new sparse tensor storage. static SparseTensorStorage * - newSparseTensor(uint64_t rank, const uint64_t *sizes, const uint64_t *perm, + newSparseTensor(uint64_t rank, const uint64_t *shape, const uint64_t *perm, const DimLevelType *sparsity, SparseTensorCOO *tensor) { SparseTensorStorage *n = nullptr; if (tensor) { assert(tensor->getRank() == rank); for (uint64_t r = 0; r < rank; r++) - assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]); + assert(shape[r] == 0 || shape[r] == tensor->getSizes()[perm[r]]); n = new SparseTensorStorage(tensor->getSizes(), perm, sparsity, tensor); delete tensor; } else { std::vector permsz(rank); - for (uint64_t r = 0; r < rank; r++) - permsz[perm[r]] = sizes[r]; + for (uint64_t r = 0; r < rank; r++) { + assert(shape[r] > 0 && "Dimension size zero has trivial storage"); + permsz[perm[r]] = shape[r]; + } n = new SparseTensorStorage(permsz, perm, sparsity); } return n; @@ -658,7 +662,7 @@ /// sparse tensor in coordinate scheme. template static SparseTensorCOO *openSparseTensorCOO(char *filename, uint64_t rank, - const uint64_t *sizes, + const uint64_t *shape, const uint64_t *perm) { // Open the file. FILE *file = fopen(filename, "r"); @@ -684,7 +688,7 @@ assert(rank == idata[0] && "rank mismatch"); uint64_t nnz = idata[1]; for (uint64_t r = 0; r < rank; r++) - assert((sizes[r] == 0 || sizes[r] == idata[2 + r]) && + assert((shape[r] == 0 || shape[r] == idata[2 + r]) && "dimension size mismatch"); SparseTensorCOO *tensor = SparseTensorCOO::newSparseTensorCOO(rank, idata + 2, perm, nnz); @@ -846,17 +850,17 @@ if (action <= Action::kFromCOO) { \ if (action == Action::kFromFile) { \ char *filename = static_cast(ptr); \ - tensor = openSparseTensorCOO(filename, rank, sizes, perm); \ + tensor = openSparseTensorCOO(filename, rank, shape, perm); \ } else if (action == Action::kFromCOO) { \ tensor = static_cast *>(ptr); \ } else { \ assert(action == Action::kEmpty); \ } \ - return SparseTensorStorage::newSparseTensor(rank, sizes, perm, \ + return SparseTensorStorage::newSparseTensor(rank, shape, perm, \ sparsity, tensor); \ } \ if (action == Action::kEmptyCOO) \ - return SparseTensorCOO::newSparseTensorCOO(rank, sizes, perm); \ + return SparseTensorCOO::newSparseTensorCOO(rank, shape, perm); \ tensor = static_cast *>(ptr)->toCOO(perm); \ if (action == Action::kToIterator) { \ tensor->startIterator(); \ @@ -985,7 +989,7 @@ pref->strides[0] == 1); assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]); const DimLevelType *sparsity = aref->data + aref->offset; - const index_type *sizes = sref->data + sref->offset; + const index_type *shape = sref->data + sref->offset; const index_type *perm = pref->data + pref->offset; uint64_t rank = aref->sizes[0];