diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h @@ -9,6 +9,7 @@ #ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ #define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ +#include "mlir/ExecutionEngine/SparseTensor/Enums.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" @@ -36,33 +37,47 @@ // Dimension level types. // -bool isDenseDim(SparseTensorEncodingAttr::DimLevelType dltp); -bool isCompressedDim(SparseTensorEncodingAttr::DimLevelType dltp); -bool isSingletonDim(SparseTensorEncodingAttr::DimLevelType dltp); +// Cannot be constexpr, because `getRank` isn't constexpr. However, +// for some strange reason, the wrapper functions below don't trigger +// the same [-Winvalid-constexpr] warning (despite this function not +// being constexpr). +inline DimLevelType getDimLevelType(RankedTensorType type, uint64_t d) { + assert(d < static_cast(type.getRank())); + if (auto enc = getSparseTensorEncoding(type)) + return enc.getDimLevelType()[d]; + return DimLevelType::Dense; // unannotated tensor is dense +} -/// Convenience method to test for dense dimension (0 <= d < rank). -bool isDenseDim(RankedTensorType type, uint64_t d); +/// Convenience function to test for dense dimension (0 <= d < rank). +constexpr bool isDenseDim(RankedTensorType type, uint64_t d) { + return isDenseDLT(getDimLevelType(type, d)); +} -/// Convenience method to test for compressed dimension (0 <= d < rank). -bool isCompressedDim(RankedTensorType type, uint64_t d); +/// Convenience function to test for compressed dimension (0 <= d < rank). +constexpr bool isCompressedDim(RankedTensorType type, uint64_t d) { + return isCompressedDLT(getDimLevelType(type, d)); +} -/// Convenience method to test for singleton dimension (0 <= d < rank). -bool isSingletonDim(RankedTensorType type, uint64_t d); +/// Convenience function to test for singleton dimension (0 <= d < rank). +constexpr bool isSingletonDim(RankedTensorType type, uint64_t d) { + return isSingletonDLT(getDimLevelType(type, d)); +} // // Dimension level properties. // -bool isOrderedDim(SparseTensorEncodingAttr::DimLevelType dltp); -bool isUniqueDim(SparseTensorEncodingAttr::DimLevelType dltp); - -/// Convenience method to test for ordered property in the +/// Convenience function to test for ordered property in the /// given dimension (0 <= d < rank). -bool isOrderedDim(RankedTensorType type, uint64_t d); +constexpr bool isOrderedDim(RankedTensorType type, uint64_t d) { + return isOrderedDLT(getDimLevelType(type, d)); +} -/// Convenience method to test for unique property in the +/// Convenience function to test for unique property in the /// given dimension (0 <= d < rank). -bool isUniqueDim(RankedTensorType type, uint64_t d); +constexpr bool isUniqueDim(RankedTensorType type, uint64_t d) { + return isUniqueDLT(getDimLevelType(type, d)); +} // // Reordering. diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -145,7 +145,7 @@ ins // A dimension level type for each dimension of the tensor type. ArrayRefParameter< - "SparseTensorEncodingAttr::DimLevelType", + "::mlir::sparse_tensor::DimLevelType", "per dimension level type" >: $dimLevelType, // A dimension order on the indices of this tensor type. @@ -160,26 +160,6 @@ let genVerifyDecl = 1; let hasCustomAssemblyFormat = 1; - - let extraClassDeclaration = [{ - // Dimension level types. By default, each type has the unique and - // ordered properties. Alternatives properties are indicated by - // Nu (not-unique) and No (not-ordered). - // - // TODO: separate type and property in encoding - // - enum class DimLevelType : uint8_t { - Dense = 4, // 0b001_00 - Compressed = 8, // 0b010_00 - CompressedNu = 9, // 0b010_01 - CompressedNo = 10, // 0b010_10 - CompressedNuNo = 11, // 0b010_11 - Singleton = 16, // 0b100_00 - SingletonNu = 17, // 0b100_01 - SingletonNo = 18, // 0b100_10 - SingletonNuNo = 19, // 0b100_11 - }; - }]; } def IsSparseTensorPred diff --git a/mlir/lib/CAPI/Dialect/SparseTensor.cpp b/mlir/lib/CAPI/Dialect/SparseTensor.cpp --- a/mlir/lib/CAPI/Dialect/SparseTensor.cpp +++ b/mlir/lib/CAPI/Dialect/SparseTensor.cpp @@ -22,31 +22,23 @@ // Ensure the C-API enums are int-castable to C++ equivalents. static_assert( static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE) == - static_cast(SparseTensorEncodingAttr::DimLevelType::Dense) && + static_cast(DimLevelType::Dense) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::Compressed) && + static_cast(DimLevelType::Compressed) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::CompressedNu) && + static_cast(DimLevelType::CompressedNu) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::CompressedNo) && + static_cast(DimLevelType::CompressedNo) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::CompressedNuNo) && + static_cast(DimLevelType::CompressedNuNo) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::Singleton) && + static_cast(DimLevelType::Singleton) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::SingletonNu) && + static_cast(DimLevelType::SingletonNu) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::SingletonNo) && + static_cast(DimLevelType::SingletonNo) && static_cast(MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO) == - static_cast( - SparseTensorEncodingAttr::DimLevelType::SingletonNuNo), + static_cast(DimLevelType::SingletonNuNo), "MlirSparseTensorDimLevelType (C-API) and DimLevelType (C++) mismatch"); bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) { @@ -58,11 +50,10 @@ MlirSparseTensorDimLevelType const *dimLevelTypes, MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int pointerBitWidth, int indexBitWidth) { - SmallVector cppDimLevelTypes; + SmallVector cppDimLevelTypes; cppDimLevelTypes.resize(numDimLevelTypes); for (intptr_t i = 0; i < numDimLevelTypes; ++i) - cppDimLevelTypes[i] = - static_cast(dimLevelTypes[i]); + cppDimLevelTypes[i] = static_cast(dimLevelTypes[i]); return wrap(SparseTensorEncodingAttr::get( unwrap(ctx), cppDimLevelTypes, unwrap(dimOrdering), unwrap(higherOrdering), pointerBitWidth, indexBitWidth)); diff --git a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt --- a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt @@ -14,4 +14,5 @@ MLIRIR MLIRInferTypeOpInterface MLIRSupport + MLIRSparseTensorRuntime ) diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -49,7 +49,7 @@ if (failed(parser.parseGreater())) return {}; // Process the data from the parsed dictionary value into struct-like data. - SmallVector dlt; + SmallVector dlt; AffineMap dimOrd = {}; AffineMap higherOrd = {}; unsigned ptr = 0; @@ -71,23 +71,23 @@ } auto strVal = strAttr.getValue(); if (strVal == "dense") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Dense); + dlt.push_back(DimLevelType::Dense); } else if (strVal == "compressed") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Compressed); + dlt.push_back(DimLevelType::Compressed); } else if (strVal == "compressed-nu") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNu); + dlt.push_back(DimLevelType::CompressedNu); } else if (strVal == "compressed-no") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNo); + dlt.push_back(DimLevelType::CompressedNo); } else if (strVal == "compressed-nu-no") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNuNo); + dlt.push_back(DimLevelType::CompressedNuNo); } else if (strVal == "singleton") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Singleton); + dlt.push_back(DimLevelType::Singleton); } else if (strVal == "singleton-nu") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNu); + dlt.push_back(DimLevelType::SingletonNu); } else if (strVal == "singleton-no") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNo); + dlt.push_back(DimLevelType::SingletonNo); } else if (strVal == "singleton-nu-no") { - dlt.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNuNo); + dlt.push_back(DimLevelType::SingletonNuNo); } else { parser.emitError(parser.getNameLoc(), "unexpected dimension level type: ") @@ -258,98 +258,6 @@ return nullptr; } -bool mlir::sparse_tensor::isDenseDim( - SparseTensorEncodingAttr::DimLevelType dltp) { - return dltp == SparseTensorEncodingAttr::DimLevelType::Dense; -} - -bool mlir::sparse_tensor::isCompressedDim( - SparseTensorEncodingAttr::DimLevelType dltp) { - switch (dltp) { - case SparseTensorEncodingAttr::DimLevelType::Compressed: - case SparseTensorEncodingAttr::DimLevelType::CompressedNu: - case SparseTensorEncodingAttr::DimLevelType::CompressedNo: - case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo: - return true; - default: - return false; - } -} - -bool mlir::sparse_tensor::isSingletonDim( - SparseTensorEncodingAttr::DimLevelType dltp) { - switch (dltp) { - case SparseTensorEncodingAttr::DimLevelType::Singleton: - case SparseTensorEncodingAttr::DimLevelType::SingletonNu: - case SparseTensorEncodingAttr::DimLevelType::SingletonNo: - case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo: - return true; - default: - return false; - } -} - -bool mlir::sparse_tensor::isDenseDim(RankedTensorType type, uint64_t d) { - assert(d < static_cast(type.getRank())); - if (auto enc = getSparseTensorEncoding(type)) - return isDenseDim(enc.getDimLevelType()[d]); - return true; // unannotated tensor is dense -} - -bool mlir::sparse_tensor::isCompressedDim(RankedTensorType type, uint64_t d) { - assert(d < static_cast(type.getRank())); - if (auto enc = getSparseTensorEncoding(type)) - return isCompressedDim(enc.getDimLevelType()[d]); - return false; // unannotated tensor is dense -} - -bool mlir::sparse_tensor::isSingletonDim(RankedTensorType type, uint64_t d) { - assert(d < static_cast(type.getRank())); - if (auto enc = getSparseTensorEncoding(type)) - return isSingletonDim(enc.getDimLevelType()[d]); - return false; // unannotated tensor is dense -} - -bool mlir::sparse_tensor::isOrderedDim( - SparseTensorEncodingAttr::DimLevelType dltp) { - switch (dltp) { - case SparseTensorEncodingAttr::DimLevelType::CompressedNo: - case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo: - case SparseTensorEncodingAttr::DimLevelType::SingletonNo: - case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo: - return false; - default: - return true; - } -} - -bool mlir::sparse_tensor::isUniqueDim( - SparseTensorEncodingAttr::DimLevelType dltp) { - switch (dltp) { - case SparseTensorEncodingAttr::DimLevelType::CompressedNu: - case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo: - case SparseTensorEncodingAttr::DimLevelType::SingletonNu: - case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo: - return false; - default: - return true; - } -} - -bool mlir::sparse_tensor::isOrderedDim(RankedTensorType type, uint64_t d) { - assert(d < static_cast(type.getRank())); - if (auto enc = getSparseTensorEncoding(type)) - return isOrderedDim(enc.getDimLevelType()[d]); - return true; // unannotated tensor is dense (and thus ordered) -} - -bool mlir::sparse_tensor::isUniqueDim(RankedTensorType type, uint64_t d) { - assert(d < static_cast(type.getRank())); - if (auto enc = getSparseTensorEncoding(type)) - return isUniqueDim(enc.getDimLevelType()[d]); - return true; // unannotated tensor is dense (and thus unique) -} - uint64_t mlir::sparse_tensor::toOrigDim(const SparseTensorEncodingAttr &enc, uint64_t d) { if (enc) { diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h @@ -135,7 +135,7 @@ /// Input (TODO: and output) tensors. std::vector tensors; /// The dim type array for each tensor. - std::vector> dims; + std::vector> dims; /// Sparse iteration information (by tensor and dim). These arrays /// are updated to remain current within the current loop. std::vector> pidxs; @@ -197,9 +197,6 @@ /// Converts a primary storage type to its function-name suffix. StringRef primaryTypeFunctionSuffix(Type elemTp); -/// Converts the IR's dimension level type to its internal type-encoding. -DimLevelType dimLevelTypeEncoding(SparseTensorEncodingAttr::DimLevelType dlt); - //===----------------------------------------------------------------------===// // Misc code generators and utilities. //===----------------------------------------------------------------------===// @@ -345,11 +342,9 @@ } /// Generates a constant of the internal dimension level type encoding. -inline Value -constantDimLevelTypeEncoding(OpBuilder &builder, Location loc, - SparseTensorEncodingAttr::DimLevelType dlt) { - return constantI8(builder, loc, - static_cast(dimLevelTypeEncoding(dlt))); +inline Value constantDimLevelTypeEncoding(OpBuilder &builder, Location loc, + DimLevelType dlt) { + return constantI8(builder, loc, static_cast(dlt)); } } // namespace sparse_tensor diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -63,7 +63,7 @@ for (auto dimTp : enc.getDimLevelType()) dims[i].push_back(dimTp); else - dims[i].assign(rank, SparseTensorEncodingAttr::DimLevelType::Dense); + dims[i].assign(rank, DimLevelType::Dense); // Initialize using empty value. pidxs[i].assign(rank, Value()); @@ -94,7 +94,7 @@ assert(!ptrBuffer[t][d] && !idxBuffer[t][d] && !sizes[t][d] && !highs[t][d]); // Handle sparse storage schemes. - if (isCompressedDim(dims[t][d])) { + if (isCompressedDLT(dims[t][d])) { auto ptrTp = MemRefType::get(dynShape, getPointerOverheadType(builder, enc)); auto indTp = @@ -103,7 +103,7 @@ // Generate sparse primitives to obtains pointer and indices. ptrBuffer[t][d] = builder.create(loc, ptrTp, tensor, dim); idxBuffer[t][d] = builder.create(loc, indTp, tensor, dim); - } else if (isSingletonDim(dims[t][d])) { + } else if (isSingletonDLT(dims[t][d])) { // Singleton dimension, fetch indices. auto indTp = MemRefType::get(dynShape, getIndexOverheadType(builder, enc)); @@ -111,7 +111,7 @@ idxBuffer[t][d] = builder.create(loc, indTp, tensor, dim); } else { // Dense dimension, nothing to fetch. - assert(isDenseDim(dims[t][d])); + assert(isDenseDLT(dims[t][d])); } // Find upper bound in current dimension. @@ -151,9 +151,9 @@ assert(!coord[tid][dim]); Value step = constantIndex(builder, loc, 1); auto dimType = dims[tid][dim]; - bool isSparse = isCompressedDim(dimType) || isSingletonDim(dimType); - assert(isDenseDim(dimType) || isCompressedDim(dimType) || - isSingletonDim(dimType)); + bool isSparse = isCompressedDLT(dimType) || isSingletonDLT(dimType); + assert(isDenseDLT(dimType) || isCompressedDLT(dimType) || + isSingletonDLT(dimType)); Value lo = isSparse ? pidxs[tid][dim] : constantIndex(builder, loc, 0); Value hi = highs[tid][dim]; @@ -208,14 +208,14 @@ assert(dims[tid].size() > dim); auto dimType = dims[tid][dim]; - if (isDenseDim(dimType)) + if (isDenseDLT(dimType)) return false; // Either the first dimension, or the previous dimension has been set. assert(dim == 0 || pidxs[tid][dim - 1]); Value c0 = constantIndex(builder, loc, 0); Value c1 = constantIndex(builder, loc, 1); - if (isCompressedDim(dimType)) { + if (isCompressedDLT(dimType)) { Value ptr = ptrBuffer[tid][dim]; Value pLo = dim == 0 ? c0 : pidxs[tid][dim - 1]; @@ -225,7 +225,7 @@ highs[tid][dim] = genIndexLoad(builder, loc, ptr, pHi); return true; } - if (isSingletonDim(dimType)) { + if (isSingletonDLT(dimType)) { Value pLo = dim == 0 ? c0 : pidxs[tid][dim - 1]; Value pHi = builder.create(loc, pLo, c1); @@ -254,7 +254,7 @@ // Reset to null. pidxs[tid][dim] = Value(); coord[tid][dim] = Value(); - if (!isDenseDim(dims[tid][dim])) + if (!isDenseDLT(dims[tid][dim])) // Dense dimension, high is fixed. highs[tid][dim] = Value(); } @@ -388,31 +388,6 @@ return primaryTypeFunctionSuffix(primaryTypeEncoding(elemTp)); } -DimLevelType mlir::sparse_tensor::dimLevelTypeEncoding( - SparseTensorEncodingAttr::DimLevelType dlt) { - switch (dlt) { - case SparseTensorEncodingAttr::DimLevelType::Dense: - return DimLevelType::Dense; - case SparseTensorEncodingAttr::DimLevelType::Compressed: - return DimLevelType::Compressed; - case SparseTensorEncodingAttr::DimLevelType::CompressedNu: - return DimLevelType::CompressedNu; - case SparseTensorEncodingAttr::DimLevelType::CompressedNo: - return DimLevelType::CompressedNo; - case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo: - return DimLevelType::CompressedNuNo; - case SparseTensorEncodingAttr::DimLevelType::Singleton: - return DimLevelType::Singleton; - case SparseTensorEncodingAttr::DimLevelType::SingletonNu: - return DimLevelType::SingletonNu; - case SparseTensorEncodingAttr::DimLevelType::SingletonNo: - return DimLevelType::SingletonNo; - case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo: - return DimLevelType::SingletonNuNo; - } - llvm_unreachable("Unknown SparseTensorEncodingAttr::DimLevelType"); -} - //===----------------------------------------------------------------------===// // Misc code generators. //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -204,7 +204,7 @@ Location loc, ShapedType stp, SparseTensorEncodingAttr &enc, Action action, ValueRange szs, Value ptr = Value()) { - ArrayRef dlt = enc.getDimLevelType(); + ArrayRef dlt = enc.getDimLevelType(); unsigned sz = dlt.size(); // Sparsity annotations. SmallVector attrs; @@ -407,11 +407,10 @@ /// Determine if the runtime library supports direct conversion to the /// given target `dimTypes`. -static bool canUseDirectConversion( - ArrayRef dimTypes) { +static bool canUseDirectConversion(ArrayRef dimTypes) { bool alreadyCompressed = false; for (uint64_t rank = dimTypes.size(), r = 0; r < rank; r++) { - const DimLevelType dlt = dimLevelTypeEncoding(dimTypes[r]); + const DimLevelType dlt = dimTypes[r]; if (isCompressedDLT(dlt)) { if (alreadyCompressed) return false; // Multiple compressed dimensions not yet supported. @@ -852,10 +851,8 @@ // The dimLevelTypes aren't actually used by Action::kToIterator. encDst = SparseTensorEncodingAttr::get( op->getContext(), - SmallVector( - rank, SparseTensorEncodingAttr::DimLevelType::Dense), - AffineMap(), AffineMap(), encSrc.getPointerBitWidth(), - encSrc.getIndexBitWidth()); + SmallVector(rank, DimLevelType::Dense), AffineMap(), + AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); SmallVector sizes; SmallVector params; sizesFromPtr(rewriter, sizes, loc, encSrc, srcTensorTp, src); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -41,8 +41,7 @@ // Helper to detect a sparse tensor type operand. static bool isSparseTensor(OpOperand *op) { if (auto enc = getSparseTensorEncoding(op->get().getType())) { - if (llvm::is_contained(enc.getDimLevelType(), - SparseTensorEncodingAttr::DimLevelType::Compressed)) + if (llvm::is_contained(enc.getDimLevelType(), DimLevelType::Compressed)) return true; } return false; @@ -132,21 +131,20 @@ static RankedTensorType getUnorderedCOOFromType(RankedTensorType src) { auto *ctx = src.getContext(); auto rank = src.getRank(); - SmallVector dims; + SmallVector dims; // An unordered and non-unique compressed dim at beginning unless the tensor // is a 1D tensor. if (rank > 1) - dims.push_back(SparseTensorEncodingAttr::DimLevelType::CompressedNuNo); + dims.push_back(DimLevelType::CompressedNuNo); // TODO: it is actually ordered at the level for ordered input. // Followed by unordered non-unique n-2 singleton levels. - std::fill_n(std::back_inserter(dims), rank - 2, - SparseTensorEncodingAttr::DimLevelType::SingletonNuNo); + std::fill_n(std::back_inserter(dims), rank - 2, DimLevelType::SingletonNuNo); // TODO: only if all the inputs (for concatentate) are unique at the last // level should the COO has a unique level at the end. Ends by a unordered // unique singleton level. - dims.push_back(SparseTensorEncodingAttr::DimLevelType::SingletonNo); + dims.push_back(DimLevelType::SingletonNo); // TODO: Maybe pick the bitwidth based on input/output tensors (probably the // largest one among them) in the original operation instead of using the // default value. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -136,23 +136,23 @@ unsigned d) { if (enc) { switch (enc.getDimLevelType()[d]) { - case SparseTensorEncodingAttr::DimLevelType::Dense: + case DimLevelType::Dense: return DimLevelFormat(DimLvlType::kDense); - case SparseTensorEncodingAttr::DimLevelType::Compressed: + case DimLevelType::Compressed: return DimLevelFormat(DimLvlType::kCompressed); - case SparseTensorEncodingAttr::DimLevelType::CompressedNu: + case DimLevelType::CompressedNu: return DimLevelFormat(DimLvlType::kCompressed, true, false); - case SparseTensorEncodingAttr::DimLevelType::CompressedNo: + case DimLevelType::CompressedNo: return DimLevelFormat(DimLvlType::kCompressed, false, true); - case SparseTensorEncodingAttr::DimLevelType::CompressedNuNo: + case DimLevelType::CompressedNuNo: return DimLevelFormat(DimLvlType::kCompressed, false, false); - case SparseTensorEncodingAttr::DimLevelType::Singleton: + case DimLevelType::Singleton: return DimLevelFormat(DimLvlType::kSingleton); - case SparseTensorEncodingAttr::DimLevelType::SingletonNu: + case DimLevelType::SingletonNu: return DimLevelFormat(DimLvlType::kSingleton, true, false); - case SparseTensorEncodingAttr::DimLevelType::SingletonNo: + case DimLevelType::SingletonNo: return DimLevelFormat(DimLvlType::kSingleton, false, true); - case SparseTensorEncodingAttr::DimLevelType::SingletonNuNo: + case DimLevelType::SingletonNuNo: return DimLevelFormat(DimLvlType::kSingleton, false, false); } } diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -2078,6 +2078,7 @@ ":InferTypeOpInterface", ":SparseTensorAttrDefsIncGen", ":SparseTensorOpsIncGen", + ":SparseTensorRuntime", "//llvm:Support", ], )