diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt --- a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt @@ -12,4 +12,3 @@ mlir_tablegen(SparseTensorTypes.h.inc -gen-typedef-decls) mlir_tablegen(SparseTensorTypes.cpp.inc -gen-typedef-defs) add_public_tablegen_target(MLIRSparseTensorTypesIncGen) - diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h @@ -42,7 +42,6 @@ /// values with the built-in type "index". For now, we simply assume that /// type is 64-bit, but targets with different "index" bitwidths should /// link with an alternatively built runtime support library. -// TODO: support such targets? using index_type = uint64_t; /// Encoding of overhead types (both position overhead and coordinate @@ -92,11 +91,6 @@ }; // This x-macro includes all `V` types. -// TODO: We currently split out the non-variadic version from the variadic -// version. Using ##__VA_ARGS__ to avoid the split gives -// warning: token pasting of ',' and __VA_ARGS__ is a GNU extension -// [-Wgnu-zero-variadic-macro-arguments] -// and __VA_OPT__(, ) __VA_ARGS__ requires c++20. #define MLIR_SPARSETENSOR_FOREVERY_V(DO) \ DO(F64, double) \ DO(F32, float) \ @@ -205,7 +199,6 @@ /// Returns string representation of the given dimension level type. constexpr const char *toMLIRString(DimLevelType dlt) { switch (dlt) { - // TODO: should probably raise an error instead of printing it... case DimLevelType::Undef: return "undef"; case DimLevelType::Dense: diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -153,9 +153,7 @@ stored with compression while dense storage is used within each block (although hybrid schemes are possible as well). - TODO: the following example is out-of-date and will be implemented - in a different manner than described here. - (This will be corrected in an upcoming change that completely + (The following will be corrected in an upcoming change that completely overhauls the syntax of this attribute.) The dimToLvl mapping also provides a notion of "counting a @@ -439,11 +437,11 @@ // Sparse Tensor Sorting Algorithm Attribute. //===----------------------------------------------------------------------===// -// TODO: Currently, we only provide four implementations, and expose the -// implementations via attribute algorithm. In the future, if we will need -// to support both stable and non-stable quick sort, we may add -// quick_sort_nonstable enum to the attribute. Alternative, we may use two -// attributes, (stable|nonstable, algorithm), to specify a sorting +// Currently, we only provide four implementations, and expose the +// implementations via attribute algorithm. In the future, if we will +// need to support both stable and non-stable quick sort, we may add +// quick_sort_nonstable enum to the attribute. Alternative, we may use +// two attributes, (stable|nonstable, algorithm), to specify a sorting // implementation. // // -------------------------------------------------------------------------- diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -762,14 +762,6 @@ //===----------------------------------------------------------------------===// def SparseTensor_SortOp : SparseTensor_Op<"sort", [AttrSizedOperandSegments]>, - // TODO: May want to extend tablegen with - // class NonemptyVariadic : Variadic { let minSize = 1; } - // and then use NonemptyVariadic<...>:$xs here. - // - // TODO: Currently tablegen doesn't support the assembly syntax when - // `algorithm` is an optional enum attribute. We may want to use an optional - // enum attribute when this is fixed in tablegen. - // Arguments<(ins Index:$n, Variadic>:$xs, Variadic>:$ys, diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h @@ -107,7 +107,7 @@ /// encoding. class StorageLayout { public: - // TODO: Functions/methods marked with [NUMFIELDS] might should use + // TODO: Functions/methods marked with [NUMFIELDS] should use // `FieldIndex` for their return type, via the same reasoning for why // `Dimension`/`Level` are used both for identifiers and ranks. explicit StorageLayout(const SparseTensorType &stt) @@ -154,12 +154,12 @@ // Wrapper functions to invoke StorageLayout-related method. // -// TODO: See note [NUMFIELDS]. +// See note [NUMFIELDS]. inline unsigned getNumFieldsFromEncoding(SparseTensorEncodingAttr enc) { return StorageLayout(enc).getNumFields(); } -// TODO: See note [NUMFIELDS]. +// See note [NUMFIELDS]. inline unsigned getNumDataFieldsFromEncoding(SparseTensorEncodingAttr enc) { return StorageLayout(enc).getNumDataFields(); } diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h @@ -41,7 +41,6 @@ kAnyStorageOuterLoop, kDenseAnyLoop, kAnyStorageAnyLoop - // TODO: support reduction parallelization too? }; #define GEN_PASS_DECL