diff --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp @@ -112,7 +112,7 @@ /// vector since that (1) reduces the per-element memory footprint, and /// (2) centralizes the memory reservation and (re)allocation to one place. template -struct Element { +struct Element final { Element(uint64_t *ind, V val) : indices(ind), value(val){}; uint64_t *indices; // pointer into shared index pool V value; @@ -131,7 +131,7 @@ /// by indices before passing it back to the client (most packed storage /// formats require the elements to appear in lexicographic index order). template -struct SparseTensorCOO { +struct SparseTensorCOO final { public: SparseTensorCOO(const std::vector &szs, uint64_t capacity) : sizes(szs) { @@ -424,7 +424,7 @@ /// a convenient "one-size-fits-all" solution that simply takes an input tensor /// and annotations to implement all required setup in a general manner. template -class SparseTensorStorage : public SparseTensorStorageBase { +class SparseTensorStorage final : public SparseTensorStorageBase { /// Private constructor to share code between the other constructors. /// Beware that the object is not necessarily guaranteed to be in a /// valid state after this constructor alone; e.g., `isCompressedDim(d)` @@ -491,21 +491,21 @@ const DimLevelType *sparsity, const SparseTensorStorageBase &tensor); - ~SparseTensorStorage() override = default; + ~SparseTensorStorage() final override = default; /// Partially specialize these getter methods based on template types. - void getPointers(std::vector

**out, uint64_t d) override { + void getPointers(std::vector

**out, uint64_t d) final override { assert(d < getRank()); *out = &pointers[d]; } - void getIndices(std::vector **out, uint64_t d) override { + void getIndices(std::vector **out, uint64_t d) final override { assert(d < getRank()); *out = &indices[d]; } - void getValues(std::vector **out) override { *out = &values; } + void getValues(std::vector **out) final override { *out = &values; } /// Partially specialize lexicographical insertions based on template types. - void lexInsert(const uint64_t *cursor, V val) override { + void lexInsert(const uint64_t *cursor, V val) final override { // First, wrap up pending insertion path. uint64_t diff = 0; uint64_t top = 0; @@ -522,7 +522,7 @@ /// Note that this method resets the values/filled-switch array back /// to all-zero/false while only iterating over the nonzero elements. void expInsert(uint64_t *cursor, V *values, bool *filled, uint64_t *added, - uint64_t count) override { + uint64_t count) final override { if (count == 0) return; // Sort. @@ -548,7 +548,7 @@ } /// Finalizes lexicographic insertions. - void endInsert() override { + void endInsert() final override { if (values.empty()) finalizeSegment(0); else @@ -556,7 +556,7 @@ } void newEnumerator(SparseTensorEnumeratorBase **out, uint64_t rank, - const uint64_t *perm) const override { + const uint64_t *perm) const final override { *out = new SparseTensorEnumerator(*this, rank, perm); } @@ -940,7 +940,7 @@ /// N.B., this class stores references to the parameters passed to /// the constructor; thus, objects of this class must not outlive /// those parameters. -class SparseTensorNNZ { +class SparseTensorNNZ final { public: /// Allocate the statistics structure for the desired sizes and /// sparsity (in the target tensor's storage-order). This constructor