diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h @@ -300,7 +300,7 @@ /// TODO: factor out a new LevelProperties type so we can add new properties /// without changing this function's signature constexpr std::optional -getDimLevelType(LevelFormat lf, bool ordered, bool unique) { +buildLevelType(LevelFormat lf, bool ordered, bool unique) { auto dlt = static_cast(static_cast(lf) | (ordered ? 0 : 2) | (unique ? 0 : 1)); return isValidDLT(dlt) ? std::optional(dlt) : std::nullopt; @@ -321,27 +321,27 @@ "getLevelFormat conversion is broken"); static_assert( - (getDimLevelType(LevelFormat::Dense, false, true) == std::nullopt && - getDimLevelType(LevelFormat::Dense, true, false) == std::nullopt && - getDimLevelType(LevelFormat::Dense, false, false) == std::nullopt && - *getDimLevelType(LevelFormat::Dense, true, true) == DimLevelType::Dense && - *getDimLevelType(LevelFormat::Compressed, true, true) == + (buildLevelType(LevelFormat::Dense, false, true) == std::nullopt && + buildLevelType(LevelFormat::Dense, true, false) == std::nullopt && + buildLevelType(LevelFormat::Dense, false, false) == std::nullopt && + *buildLevelType(LevelFormat::Dense, true, true) == DimLevelType::Dense && + *buildLevelType(LevelFormat::Compressed, true, true) == DimLevelType::Compressed && - *getDimLevelType(LevelFormat::Compressed, true, false) == + *buildLevelType(LevelFormat::Compressed, true, false) == DimLevelType::CompressedNu && - *getDimLevelType(LevelFormat::Compressed, false, true) == + *buildLevelType(LevelFormat::Compressed, false, true) == DimLevelType::CompressedNo && - *getDimLevelType(LevelFormat::Compressed, false, false) == + *buildLevelType(LevelFormat::Compressed, false, false) == DimLevelType::CompressedNuNo && - *getDimLevelType(LevelFormat::Singleton, true, true) == + *buildLevelType(LevelFormat::Singleton, true, true) == DimLevelType::Singleton && - *getDimLevelType(LevelFormat::Singleton, true, false) == + *buildLevelType(LevelFormat::Singleton, true, false) == DimLevelType::SingletonNu && - *getDimLevelType(LevelFormat::Singleton, false, true) == + *buildLevelType(LevelFormat::Singleton, false, true) == DimLevelType::SingletonNo && - *getDimLevelType(LevelFormat::Singleton, false, false) == + *buildLevelType(LevelFormat::Singleton, false, false) == DimLevelType::SingletonNuNo), - "getDimLevelType conversion is broken"); + "buildLevelType conversion is broken"); // Ensure the above predicates work as intended. static_assert((isValidDLT(DimLevelType::Undef) && diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -266,7 +266,7 @@ ArrayRefParameter< "::mlir::sparse_tensor::DimLevelType", "level-types" - >: $dimLevelType, + >: $lvlTypes, // A permutation from (higher-ordering)-coordinates to level-coordinates. "AffineMap":$dimOrdering, // A mapping from dimension-coordinates to (higher-ordering)-coordinates. @@ -283,12 +283,12 @@ ); let builders = [ - AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$dimLevelType, + AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$lvlTypes, "AffineMap":$dimOrdering, "AffineMap":$higherOrdering, "unsigned":$posWidth, "unsigned":$crdWidth), [{ - return $_get($_ctxt, dimLevelType, + return $_get($_ctxt, lvlTypes, dimOrdering, higherOrdering, posWidth, diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h --- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h @@ -377,14 +377,14 @@ bool hasSparseIdxReduction(const BitVector &bits) const; /// Gets the level-type of the `t`th tensor on `i`th loop. - DimLevelType getDimLevelType(TensorId t, LoopId i) const { + DimLevelType getLvlType(TensorId t, LoopId i) const { assert(isValidTensorId(t) && isValidLoopId(i)); return lvlTypes[t][i]; } /// Gets the level-type of the TensorLoopId. - DimLevelType getDimLevelType(TensorLoopId b) const { - return getDimLevelType(tensor(b), loop(b)); + DimLevelType getLvlType(TensorLoopId b) const { + return getLvlType(tensor(b), loop(b)); } /// Gets the loop identifier for the `lvl`th level of the `t`th tensor. @@ -434,7 +434,7 @@ for (const TensorLoopId b : bits.set_bits()) { const TensorId t = tensor(b); const auto optLvl = getLvl(b); - const auto lvlTp = getDimLevelType(b); + const auto lvlTp = getLvlType(b); if (isLvlWithNonTrivialIdxExp(b)) { // This must be an undefined level. assert(!optLvl.has_value()); diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -130,23 +130,23 @@ } SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutOrdering() const { - return SparseTensorEncodingAttr::get(getContext(), getDimLevelType(), + return SparseTensorEncodingAttr::get(getContext(), getLvlTypes(), AffineMap(), AffineMap(), getPosWidth(), getCrdWidth()); } SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutBitWidths() const { - return SparseTensorEncodingAttr::get(getContext(), getDimLevelType(), + return SparseTensorEncodingAttr::get(getContext(), getLvlTypes(), getDimOrdering(), getHigherOrdering(), 0, 0); } bool SparseTensorEncodingAttr::isAllDense() const { - return !getImpl() || llvm::all_of(getDimLevelType(), isDenseDLT); + return !getImpl() || llvm::all_of(getLvlTypes(), isDenseDLT); } bool SparseTensorEncodingAttr::isAllOrdered() const { - return !getImpl() || llvm::all_of(getDimLevelType(), isOrderedDLT); + return !getImpl() || llvm::all_of(getLvlTypes(), isOrderedDLT); } bool SparseTensorEncodingAttr::hasIdDimOrdering() const { @@ -155,14 +155,14 @@ Level SparseTensorEncodingAttr::getLvlRank() const { assert(getImpl() && "Uninitialized SparseTensorEncodingAttr"); - return getDimLevelType().size(); + return getLvlTypes().size(); } DimLevelType SparseTensorEncodingAttr::getLvlType(Level l) const { if (!getImpl()) return DimLevelType::Dense; assert(l < getLvlRank() && "Level is out of bounds"); - return getDimLevelType()[l]; + return getLvlTypes()[l]; } std::optional @@ -337,7 +337,7 @@ void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { // Print the struct-like storage in dictionary fashion. printer << "<{ dimLevelType = [ "; - llvm::interleaveComma(getDimLevelType(), printer, [&](DimLevelType dlt) { + llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) { printer << "\"" << toMLIRString(dlt) << "\""; }); printer << " ]"; @@ -415,7 +415,7 @@ function_ref emitError) const { // Check structural integrity. In particular, this ensures that the // level-rank is coherent across all the fields. - RETURN_FAILURE_IF_FAILED(verify(emitError, getDimLevelType(), + RETURN_FAILURE_IF_FAILED(verify(emitError, getLvlTypes(), getDimOrdering(), getHigherOrdering(), getPosWidth(), getCrdWidth(), getDimSlices())) // Check integrity with tensor type specifics. In particular, we @@ -496,14 +496,14 @@ // An unordered and non-unique compressed level at beginning. // If this is also the last level, then it is unique. lvlTypes.push_back( - *getDimLevelType(LevelFormat::Compressed, ordered, lvlRank == 1)); + *buildLevelType(LevelFormat::Compressed, ordered, lvlRank == 1)); if (lvlRank > 1) { // TODO: it is actually ordered at the level for ordered input. // Followed by unordered non-unique n-2 singleton levels. std::fill_n(std::back_inserter(lvlTypes), lvlRank - 2, - *getDimLevelType(LevelFormat::Singleton, ordered, false)); + *buildLevelType(LevelFormat::Singleton, ordered, false)); // Ends by a unique singleton level unless the lvlRank is 1. - lvlTypes.push_back(*getDimLevelType(LevelFormat::Singleton, ordered, true)); + lvlTypes.push_back(*buildLevelType(LevelFormat::Singleton, ordered, true)); } // TODO: Maybe pick the bitwidth based on input/output tensors (probably the @@ -580,8 +580,8 @@ static SparseTensorEncodingAttr getNormalizedEncodingForSpecifier(SparseTensorEncodingAttr enc) { SmallVector dlts; - for (auto dlt : enc.getDimLevelType()) - dlts.push_back(*getDimLevelType(*getLevelFormat(dlt), true, true)); + for (auto dlt : enc.getLvlTypes()) + dlts.push_back(*buildLevelType(*getLevelFormat(dlt), true, true)); return SparseTensorEncodingAttr::get( enc.getContext(), dlts, diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h @@ -79,11 +79,9 @@ const LatPoint &lat(LatPointId l) const { return latticeMerger.lat(l); } ArrayRef set(LatSetId s) const { return latticeMerger.set(s); } DimLevelType dlt(TensorId t, LoopId i) const { - return latticeMerger.getDimLevelType(t, i); - } - DimLevelType dlt(TensorLoopId b) const { - return latticeMerger.getDimLevelType(b); + return latticeMerger.getLvlType(t, i); } + DimLevelType dlt(TensorLoopId b) const { return latticeMerger.getLvlType(b); } // // LoopEmitter delegates. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp @@ -288,7 +288,7 @@ if (stt.hasEncoding() && !(isOutputTensor(tid) && isSparseOut)) { const auto enc = stt.getEncoding(); isSparseSlices[tid] = enc.isSlice(); - for (auto lvlTp : enc.getDimLevelType()) + for (auto lvlTp : enc.getLvlTypes()) lvlTypes[tid].push_back(lvlTp); } else { lvlTypes[tid].assign(lvlRank, DimLevelType::Dense); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -1187,7 +1187,7 @@ // TODO: We should check these in ExtractSliceOp::verify. if (!srcEnc || !dstEnc || !dstEnc.isSlice()) return failure(); - assert(srcEnc.getDimLevelType() == dstEnc.getDimLevelType()); + assert(srcEnc.getLvlTypes() == dstEnc.getLvlTypes()); assert(srcEnc.getDimOrdering() == dstEnc.getDimOrdering()); assert(srcEnc.getHigherOrdering() == dstEnc.getHigherOrdering()); assert(srcEnc.getPosWidth() == dstEnc.getPosWidth()); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -205,7 +205,7 @@ SparseTensorType stt) { SmallVector lvlTypes; lvlTypes.reserve(stt.getLvlRank()); - for (const auto dlt : stt.getEncoding().getDimLevelType()) + for (const auto dlt : stt.getEncoding().getLvlTypes()) lvlTypes.push_back(constantDimLevelTypeEncoding(builder, loc, dlt)); return allocaBuffer(builder, loc, lvlTypes); } @@ -565,7 +565,7 @@ rewriter.setInsertionPointToStart(after); const bool hasDenseDim = - llvm::any_of(stt.getEncoding().getDimLevelType(), isDenseDLT); + llvm::any_of(stt.getEncoding().getLvlTypes(), isDenseDLT); if (hasDenseDim) { Value elemV = rewriter.create(loc, elemPtr); Value isZero = genIsNonzero(rewriter, loc, elemV); @@ -880,11 +880,11 @@ break; case SparseToSparseConversionStrategy::kDirect: useDirectConversion = true; - assert(canUseDirectConversion(dstEnc.getDimLevelType()) && + assert(canUseDirectConversion(dstEnc.getLvlTypes()) && "Unsupported target for direct sparse-to-sparse conversion"); break; case SparseToSparseConversionStrategy::kAuto: - useDirectConversion = canUseDirectConversion(dstEnc.getDimLevelType()); + useDirectConversion = canUseDirectConversion(dstEnc.getLvlTypes()); break; } if (useDirectConversion) { @@ -896,7 +896,7 @@ // method calls can share most parameters, while still providing // the correct sparsity information to either of them. const auto mixedEnc = SparseTensorEncodingAttr::get( - op->getContext(), dstEnc.getDimLevelType(), dstEnc.getDimOrdering(), + op->getContext(), dstEnc.getLvlTypes(), dstEnc.getDimOrdering(), dstEnc.getHigherOrdering(), srcEnc.getPosWidth(), srcEnc.getCrdWidth()); // TODO: This is the only place where `kToCOO` (or `kToIterator`) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -44,8 +44,7 @@ // Helper to detect a sparse tensor type operand. static bool isSparseTensor(OpOperand *op) { auto enc = getSparseTensorEncoding(op->get().getType()); - return enc && - llvm::is_contained(enc.getDimLevelType(), DimLevelType::Compressed); + return enc && llvm::is_contained(enc.getLvlTypes(), DimLevelType::Compressed); } // Helper method to find zero/uninitialized allocation. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp @@ -134,7 +134,7 @@ if (!(callback(fidx, kind, dim, dlt))) \ return; - const auto lvlTypes = enc.getDimLevelType(); + const auto lvlTypes = enc.getLvlTypes(); const Level lvlRank = enc.getLvlRank(); const Level cooStart = getCOOStart(enc); const Level end = cooStart == lvlRank ? cooStart : cooStart + 1; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -232,7 +232,7 @@ switch (a.getKind()) { case AffineExprKind::DimId: { const LoopId idx = merger.makeLoopId(a.cast().getPosition()); - if (!isUndefDLT(merger.getDimLevelType(tid, idx))) + if (!isUndefDLT(merger.getLvlType(tid, idx))) return false; // used more than once if (setLvlFormat) @@ -243,7 +243,7 @@ case AffineExprKind::Mul: case AffineExprKind::Constant: { if (!isDenseDLT(dlt) && setLvlFormat) { - assert(isUndefDLT(merger.getDimLevelType(tid, filterLdx))); + assert(isUndefDLT(merger.getLvlType(tid, filterLdx))); // Use a filter loop for sparse affine expression. merger.setLevelAndType(tid, filterLdx, lvl, dlt); ++filterLdx; @@ -287,7 +287,7 @@ switch (a.getKind()) { case AffineExprKind::DimId: { const LoopId ldx = merger.makeLoopId(a.cast().getPosition()); - if (!isUndefDLT(merger.getDimLevelType(tensor, ldx))) + if (!isUndefDLT(merger.getLvlType(tensor, ldx))) return false; // used more than once, e.g., A[i][i] // TODO: Generalizes the following two cases. A[i] (with trivial index @@ -625,7 +625,7 @@ // i.e., d0 + d1 < filter_loop(d0 + d1) if (tldx && env.merger().isFilterLoop(*tldx)) { assert(!ta.isa() && - !isDenseDLT(enc.getDimLevelType()[lvl])); + !isDenseDLT(enc.getLvlTypes()[lvl])); addAffineOrderings(adjM, inDegree, ta, AffineExpr(), std::nullopt, tldx); // Now that the ordering of affine expression is captured by filter // loop idx, we only need to ensure the affine ordering against filter @@ -1922,7 +1922,7 @@ // auto srcTp = getRankedTensorType(tval); auto dstEnc = SparseTensorEncodingAttr::get( - getContext(), srcEnc.getDimLevelType(), + getContext(), srcEnc.getLvlTypes(), permute(env, env.op().getMatchingIndexingMap(t)), // new order srcEnc.getHigherOrdering(), srcEnc.getPosWidth(), srcEnc.getCrdWidth()); diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -405,7 +405,7 @@ // Starts resetting from a dense level, so that the first bit (if kept) // is not undefined level-type. for (unsigned b = 0; b < be; b++) { - if (simple[b] && isDenseDLT(getDimLevelType(TensorLoopId{b}))) { + if (simple[b] && isDenseDLT(getLvlType(TensorLoopId{b}))) { offset = be - b - 1; // relative to the end break; } @@ -417,7 +417,7 @@ b = b == 0 ? be - 1 : b - 1, i++) { // Slice on dense level has `locate` property as well, and can be optimized. if (simple[b] && !isSparseLvlWithNonTrivialIdxExp(b)) { - const auto dlt = getDimLevelType(b); + const auto dlt = getLvlType(b); if (!isCompressedDLT(dlt) && !isSingletonDLT(dlt) && !isCompressedWithHiDLT(dlt)) { if (reset) simple.reset(b); @@ -584,7 +584,7 @@ bool Merger::hasAnySparse(const BitVector &bits) const { for (TensorLoopId b : bits.set_bits()) { - const auto dlt = getDimLevelType(b); + const auto dlt = getLvlType(b); if (isCompressedDLT(dlt) || isSingletonDLT(dlt) || isCompressedWithHiDLT(dlt)) return true; }