diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h @@ -53,7 +53,7 @@ struct ElementLT final { ElementLT(uint64_t rank) : rank(rank) {} - /// Compare two elements a la `operator<`. + /// Compares two elements a la `operator<`. /// /// Precondition: the elements must both be valid for `rank`. bool operator()(const Element &e1, const Element &e2) const { @@ -112,13 +112,13 @@ return new SparseTensorCOO(permsz, capacity); } - /// Get the rank of the tensor. + /// Gets the rank of the tensor. uint64_t getRank() const { return dimSizes.size(); } - /// Get the dimension-sizes array. + /// Gets the dimension-sizes array. const std::vector &getDimSizes() const { return dimSizes; } - /// Get the elements array. + /// Gets the elements array. const std::vector> &getElements() const { return elements; } /// Returns the `operator<` closure object for the COO's element type. @@ -173,14 +173,14 @@ isSorted = true; } - /// Switch into iterator mode. If already in iterator mode, then + /// Switches into iterator mode. If already in iterator mode, then /// resets the position to the first element. void startIterator() { iteratorLocked = true; iteratorPos = 0; } - /// Get the next element. If there are no remaining elements, then + /// Gets the next element. If there are no remaining elements, then /// returns nullptr and switches out of iterator mode. /// /// Asserts: is in iterator mode. diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h @@ -133,10 +133,10 @@ void assertMatchesShape(uint64_t rank, const uint64_t *shape) const; private: - /// Read the MME header of a general sparse matrix of type real. + /// Reads the MME header of a general sparse matrix of type real. void readMMEHeader(); - /// Read the "extended" FROSTT header. Although not part of the + /// Reads the "extended" FROSTT header. Although not part of the /// documented format, we assume that the file starts with optional /// comments followed by two lines that define the rank, the number of /// nonzeros, and the dimensions sizes (one per rank) of the sparse tensor. diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h @@ -83,32 +83,32 @@ virtual ~SparseTensorStorageBase() = default; - /// Get the rank of the tensor. + /// Gets the rank of the tensor. uint64_t getRank() const { return dimSizes.size(); } - /// Get the dimension-sizes array, in storage-order. + /// Gets the dimension-sizes array, in storage-order. const std::vector &getDimSizes() const { return dimSizes; } - /// Safely lookup the size of the given (storage-order) dimension. + /// Safely looks up the size of the given (storage-order) dimension. uint64_t getDimSize(uint64_t d) const { ASSERT_VALID_DIM(d); return dimSizes[d]; } - /// Get the "reverse" permutation, which maps this object's + /// Gets the "reverse" permutation, which maps this object's /// storage-order to the tensor's semantic-order. const std::vector &getRev() const { return rev; } - /// Get the dimension-types array, in storage-order. + /// Gets the dimension-types array, in storage-order. const std::vector &getDimTypes() const { return dimTypes; } - /// Safely check if the (storage-order) dimension uses dense storage. + /// Safely checks if the (storage-order) dimension uses dense storage. bool isDenseDim(uint64_t d) const { ASSERT_VALID_DIM(d); return dimTypes[d] == DimLevelType::kDense; } - /// Safely check if the (storage-order) dimension uses compressed storage. + /// Safely checks if the (storage-order) dimension uses compressed storage. bool isCompressedDim(uint64_t d) const { ASSERT_VALID_DIM(d); switch (dimTypes[d]) { @@ -122,7 +122,7 @@ } } - /// Safely check if the (storage-order) dimension uses singleton storage. + /// Safely checks if the (storage-order) dimension uses singleton storage. bool isSingletonDim(uint64_t d) const { ASSERT_VALID_DIM(d); switch (dimTypes[d]) { @@ -136,7 +136,7 @@ } } - /// Safely check if the (storage-order) dimension is ordered. + /// Safely checks if the (storage-order) dimension is ordered. bool isOrderedDim(uint64_t d) const { ASSERT_VALID_DIM(d); switch (dimTypes[d]) { @@ -150,7 +150,7 @@ } } - /// Safely check if the (storage-order) dimension is unique. + /// Safely checks if the (storage-order) dimension is unique. bool isUniqueDim(uint64_t d) const { ASSERT_VALID_DIM(d); switch (dimTypes[d]) { @@ -164,26 +164,26 @@ } } - /// Allocate a new enumerator. + /// Allocates a new enumerator. #define DECL_NEWENUMERATOR(VNAME, V) \ virtual void newEnumerator(SparseTensorEnumeratorBase **, uint64_t, \ const uint64_t *) const; FOREVERY_V(DECL_NEWENUMERATOR) #undef DECL_NEWENUMERATOR - /// Pointers-overhead storage. + /// Gets pointers-overhead storage. #define DECL_GETPOINTERS(PNAME, P) \ virtual void getPointers(std::vector

**, uint64_t); FOREVERY_FIXED_O(DECL_GETPOINTERS) #undef DECL_GETPOINTERS - /// Indices-overhead storage. + /// Gets indices-overhead storage. #define DECL_GETINDICES(INAME, I) \ virtual void getIndices(std::vector **, uint64_t); FOREVERY_FIXED_O(DECL_GETINDICES) #undef DECL_GETINDICES - /// Primary storage. + /// Gets primary storage. #define DECL_GETVALUES(VNAME, V) virtual void getValues(std::vector **); FOREVERY_V(DECL_GETVALUES) #undef DECL_GETVALUES @@ -342,7 +342,7 @@ endPath(0); } - /// Allocate a new enumerator for this classes `` types and + /// Allocates a new enumerator for this classes `` types and /// erase the `` parts from the type. Callers must make sure to /// delete the enumerator when they're done with it. void newEnumerator(SparseTensorEnumeratorBase **out, uint64_t rank, @@ -476,7 +476,7 @@ finalizeSegment(d, full); } - /// Finalize the sparse pointer structure at this dimension. + /// Finalizes the sparse pointer structure at this dimension. void finalizeSegment(uint64_t d, uint64_t full = 0, uint64_t count = 1) { if (count == 0) return; // Short-circuit, since it'll be a nop. @@ -698,7 +698,7 @@ /// those parameters. class SparseTensorNNZ final { public: - /// Allocate the statistics structure for the desired sizes and + /// Allocates the statistics structure for the desired sizes and /// sparsity (in the target tensor's storage-order). This constructor /// does not actually populate the statistics, however; for that see /// `initialize`. @@ -714,7 +714,7 @@ /// Returns the rank of the target tensor. uint64_t getRank() const { return dimSizes.size(); } - /// Enumerate the source tensor to fill in the statistics. The + /// Enumerates the source tensor to fill in the statistics. The /// enumerator should already incorporate the permutation (from /// semantic-order to the target storage-order). template