diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Enums.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Enums.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Enums.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Enums.h @@ -67,7 +67,7 @@ // fixed-width. It excludes `index_type` because that type is often // handled specially (e.g., by translating it into the architecture-dependent // equivalent fixed-width overhead type). -#define FOREVERY_FIXED_O(DO) \ +#define MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DO) \ DO(64, uint64_t) \ DO(32, uint32_t) \ DO(16, uint16_t) \ @@ -75,8 +75,8 @@ // This x-macro calls its argument on every overhead type, including // `index_type`. -#define FOREVERY_O(DO) \ - FOREVERY_FIXED_O(DO) \ +#define MLIR_SPARSETENSOR_FOREVERY_O(DO) \ + MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DO) \ DO(0, index_type) // These are not just shorthands but indicate the particular @@ -100,7 +100,7 @@ }; // This x-macro includes all `V` types. -#define FOREVERY_V(DO) \ +#define MLIR_SPARSETENSOR_FOREVERY_V(DO) \ DO(F64, double) \ DO(F32, float) \ DO(F16, f16) \ diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h @@ -131,35 +131,35 @@ #define DECL_NEWENUMERATOR(VNAME, V) \ virtual void newEnumerator(SparseTensorEnumeratorBase **, uint64_t, \ const uint64_t *) const; - FOREVERY_V(DECL_NEWENUMERATOR) + MLIR_SPARSETENSOR_FOREVERY_V(DECL_NEWENUMERATOR) #undef DECL_NEWENUMERATOR /// Pointers-overhead storage. #define DECL_GETPOINTERS(PNAME, P) \ virtual void getPointers(std::vector

**, uint64_t); - FOREVERY_FIXED_O(DECL_GETPOINTERS) + MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETPOINTERS) #undef DECL_GETPOINTERS /// Indices-overhead storage. #define DECL_GETINDICES(INAME, I) \ virtual void getIndices(std::vector **, uint64_t); - FOREVERY_FIXED_O(DECL_GETINDICES) + MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETINDICES) #undef DECL_GETINDICES /// Primary storage. #define DECL_GETVALUES(VNAME, V) virtual void getValues(std::vector **); - FOREVERY_V(DECL_GETVALUES) + MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES) #undef DECL_GETVALUES /// Element-wise insertion in lexicographic index order. #define DECL_LEXINSERT(VNAME, V) virtual void lexInsert(const uint64_t *, V); - FOREVERY_V(DECL_LEXINSERT) + MLIR_SPARSETENSOR_FOREVERY_V(DECL_LEXINSERT) #undef DECL_LEXINSERT /// Expanded insertion. #define DECL_EXPINSERT(VNAME, V) \ virtual void expInsert(uint64_t *, V *, bool *, uint64_t *, uint64_t); - FOREVERY_V(DECL_EXPINSERT) + MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT) #undef DECL_EXPINSERT /// Finishes insertion. diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h @@ -65,7 +65,7 @@ #define DECL_SPARSEVALUES(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparseValues##VNAME( \ StridedMemRefType *out, void *tensor); -FOREVERY_V(DECL_SPARSEVALUES) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_SPARSEVALUES) #undef DECL_SPARSEVALUES /// Tensor-storage method to obtain direct access to the pointers array @@ -73,7 +73,7 @@ #define DECL_SPARSEPOINTERS(PNAME, P) \ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparsePointers##PNAME( \ StridedMemRefType *out, void *tensor, index_type d); -FOREVERY_O(DECL_SPARSEPOINTERS) +MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEPOINTERS) #undef DECL_SPARSEPOINTERS /// Tensor-storage method to obtain direct access to the indices array @@ -81,7 +81,7 @@ #define DECL_SPARSEINDICES(INAME, I) \ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparseIndices##INAME( \ StridedMemRefType *out, void *tensor, index_type d); -FOREVERY_O(DECL_SPARSEINDICES) +MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEINDICES) #undef DECL_SPARSEINDICES /// Coordinate-scheme method for adding a new element. @@ -90,7 +90,7 @@ void *coo, StridedMemRefType *vref, \ StridedMemRefType *iref, \ StridedMemRefType *pref); -FOREVERY_V(DECL_ADDELT) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_ADDELT) #undef DECL_ADDELT /// Coordinate-scheme method for getting the next element while iterating. @@ -98,7 +98,7 @@ MLIR_CRUNNERUTILS_EXPORT bool _mlir_ciface_getNext##VNAME( \ void *coo, StridedMemRefType *iref, \ StridedMemRefType *vref); -FOREVERY_V(DECL_GETNEXT) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT) #undef DECL_GETNEXT /// Tensor-storage method to insert elements in lexicographical index order. @@ -106,7 +106,7 @@ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_lexInsert##VNAME( \ void *tensor, StridedMemRefType *cref, \ StridedMemRefType *vref); -FOREVERY_V(DECL_LEXINSERT) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_LEXINSERT) #undef DECL_LEXINSERT /// Tensor-storage method to insert using expansion. @@ -115,7 +115,7 @@ void *tensor, StridedMemRefType *cref, \ StridedMemRefType *vref, StridedMemRefType *fref, \ StridedMemRefType *aref, index_type count); -FOREVERY_V(DECL_EXPINSERT) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT) #undef DECL_EXPINSERT //===----------------------------------------------------------------------===// @@ -138,7 +138,7 @@ #define DECL_OUTSPARSETENSOR(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void outSparseTensor##VNAME(void *coo, void *dest, \ bool sort); -FOREVERY_V(DECL_OUTSPARSETENSOR) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_OUTSPARSETENSOR) #undef DECL_OUTSPARSETENSOR /// Releases the memory for the tensor-storage object. @@ -147,7 +147,7 @@ /// Releases the memory for the coordinate-scheme object. #define DECL_DELCOO(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void delSparseTensorCOO##VNAME(void *coo); -FOREVERY_V(DECL_DELCOO) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELCOO) #undef DECL_DELCOO /// Helper function to read a sparse tensor filename from the environment, @@ -183,7 +183,7 @@ MLIR_CRUNNERUTILS_EXPORT void *convertToMLIRSparseTensor##VNAME( \ uint64_t rank, uint64_t nse, uint64_t *shape, V *values, \ uint64_t *indices, uint64_t *perm, uint8_t *sparse); -FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR) #undef DECL_CONVERTTOMLIRSPARSETENSOR /// Converts a sparse tensor to COO-flavored format expressed using @@ -202,7 +202,7 @@ MLIR_CRUNNERUTILS_EXPORT void convertFromMLIRSparseTensor##VNAME( \ void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \ V **pValues, uint64_t **pIndices); -FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR) +MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR) #undef DECL_CONVERTFROMMLIRSPARSETENSOR } // extern "C" diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -79,9 +79,9 @@ } // TODO: Adjust the naming convention for the constructors of -// `OverheadType` so we can use the `FOREVERY_O` x-macro here instead -// of `FOREVERY_FIXED_O`; to further reduce the possibility of typo bugs -// or things getting out of sync. +// `OverheadType` so we can use the `MLIR_SPARSETENSOR_FOREVERY_O` x-macro +// here instead of `MLIR_SPARSETENSOR_FOREVERY_FIXED_O`; to further reduce +// the possibility of typo bugs or things getting out of sync. StringRef mlir::sparse_tensor::overheadTypeFunctionSuffix(OverheadType ot) { switch (ot) { case OverheadType::kIndex: @@ -89,7 +89,7 @@ #define CASE(ONAME, O) \ case OverheadType::kU##ONAME: \ return #ONAME; - FOREVERY_FIXED_O(CASE) + MLIR_SPARSETENSOR_FOREVERY_FIXED_O(CASE) #undef CASE } llvm_unreachable("Unknown OverheadType"); @@ -131,7 +131,7 @@ #define CASE(VNAME, V) \ case PrimaryType::k##VNAME: \ return #VNAME; - FOREVERY_V(CASE) + MLIR_SPARSETENSOR_FOREVERY_V(CASE) #undef CASE } llvm_unreachable("Unknown PrimaryType"); diff --git a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp --- a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp @@ -59,35 +59,35 @@ SparseTensorEnumeratorBase **, uint64_t, const uint64_t *) const { \ FATAL_PIV("newEnumerator" #VNAME); \ } -FOREVERY_V(IMPL_NEWENUMERATOR) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_NEWENUMERATOR) #undef IMPL_NEWENUMERATOR #define IMPL_GETPOINTERS(PNAME, P) \ void SparseTensorStorageBase::getPointers(std::vector

**, uint64_t) { \ FATAL_PIV("getPointers" #PNAME); \ } -FOREVERY_FIXED_O(IMPL_GETPOINTERS) +MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETPOINTERS) #undef IMPL_GETPOINTERS #define IMPL_GETINDICES(INAME, I) \ void SparseTensorStorageBase::getIndices(std::vector **, uint64_t) { \ FATAL_PIV("getIndices" #INAME); \ } -FOREVERY_FIXED_O(IMPL_GETINDICES) +MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETINDICES) #undef IMPL_GETINDICES #define IMPL_GETVALUES(VNAME, V) \ void SparseTensorStorageBase::getValues(std::vector **) { \ FATAL_PIV("getValues" #VNAME); \ } -FOREVERY_V(IMPL_GETVALUES) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETVALUES) #undef IMPL_GETVALUES #define IMPL_LEXINSERT(VNAME, V) \ void SparseTensorStorageBase::lexInsert(const uint64_t *, V) { \ FATAL_PIV("lexInsert" #VNAME); \ } -FOREVERY_V(IMPL_LEXINSERT) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_LEXINSERT) #undef IMPL_LEXINSERT #define IMPL_EXPINSERT(VNAME, V) \ @@ -95,7 +95,7 @@ uint64_t) { \ FATAL_PIV("expInsert" #VNAME); \ } -FOREVERY_V(IMPL_EXPINSERT) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_EXPINSERT) #undef IMPL_EXPINSERT #undef FATAL_PIV diff --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp @@ -351,7 +351,7 @@ ref->sizes[0] = v->size(); \ ref->strides[0] = 1; \ } -FOREVERY_V(IMPL_SPARSEVALUES) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_SPARSEVALUES) #undef IMPL_SPARSEVALUES #define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \ @@ -367,12 +367,12 @@ } #define IMPL_SPARSEPOINTERS(PNAME, P) \ IMPL_GETOVERHEAD(sparsePointers##PNAME, P, getPointers) -FOREVERY_O(IMPL_SPARSEPOINTERS) +MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEPOINTERS) #undef IMPL_SPARSEPOINTERS #define IMPL_SPARSEINDICES(INAME, I) \ IMPL_GETOVERHEAD(sparseIndices##INAME, I, getIndices) -FOREVERY_O(IMPL_SPARSEINDICES) +MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEINDICES) #undef IMPL_SPARSEINDICES #undef IMPL_GETOVERHEAD @@ -393,7 +393,7 @@ static_cast *>(coo)->add(indices, *value); \ return coo; \ } -FOREVERY_V(IMPL_ADDELT) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_ADDELT) #undef IMPL_ADDELT #define IMPL_GETNEXT(VNAME, V) \ @@ -414,7 +414,7 @@ *value = elem->value; \ return true; \ } -FOREVERY_V(IMPL_GETNEXT) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT) #undef IMPL_GETNEXT #define IMPL_LEXINSERT(VNAME, V) \ @@ -428,7 +428,7 @@ V *value = vref->data + vref->offset; \ static_cast(tensor)->lexInsert(cursor, *value); \ } -FOREVERY_V(IMPL_LEXINSERT) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_LEXINSERT) #undef IMPL_LEXINSERT #define IMPL_EXPINSERT(VNAME, V) \ @@ -449,7 +449,7 @@ static_cast(tensor)->expInsert( \ cursor, values, filled, added, count); \ } -FOREVERY_V(IMPL_EXPINSERT) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_EXPINSERT) #undef IMPL_EXPINSERT //===----------------------------------------------------------------------===// @@ -475,7 +475,7 @@ coo_.sort(); \ return writeExtFROSTT(coo_, static_cast(dest)); \ } -FOREVERY_V(IMPL_OUTSPARSETENSOR) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_OUTSPARSETENSOR) #undef IMPL_OUTSPARSETENSOR void delSparseTensor(void *tensor) { @@ -486,7 +486,7 @@ void delSparseTensorCOO##VNAME(void *coo) { \ delete static_cast *>(coo); \ } -FOREVERY_V(IMPL_DELCOO) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELCOO) #undef IMPL_DELCOO char *getTensorFilename(index_type id) { @@ -518,7 +518,7 @@ return toMLIRSparseTensor(rank, nse, shape, values, indices, perm, \ reinterpret_cast(sparse)); \ } -FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR) #undef IMPL_CONVERTTOMLIRSPARSETENSOR #define IMPL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V) \ @@ -529,7 +529,7 @@ static_cast *>(tensor), \ pRank, pNse, pShape, pValues, pIndices); \ } -FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR) +MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR) #undef IMPL_CONVERTFROMMLIRSPARSETENSOR } // extern "C"