diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h b/mlir/include/mlir-c/Dialect/SparseTensor.h --- a/mlir/include/mlir-c/Dialect/SparseTensor.h +++ b/mlir/include/mlir-c/Dialect/SparseTensor.h @@ -52,7 +52,7 @@ /// Creates a `sparse_tensor.encoding` attribute with the given parameters. MLIR_CAPI_EXPORTED MlirAttribute mlirSparseTensorEncodingAttrGet( MlirContext ctx, intptr_t lvlRank, - enum MlirSparseTensorDimLevelType const *dimLevelTypes, + enum MlirSparseTensorDimLevelType const *lvlTypes, MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int posWidth, int crdWidth); @@ -62,7 +62,7 @@ /// Returns a specified level-type of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED enum MlirSparseTensorDimLevelType -mlirSparseTensorEncodingAttrGetDimLevelType(MlirAttribute attr, intptr_t lvl); +mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl); /// Returns the dimension-ordering of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED MlirAffineMap diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h @@ -300,7 +300,7 @@ /// TODO: factor out a new LevelProperties type so we can add new properties /// without changing this function's signature constexpr std::optional -getDimLevelType(LevelFormat lf, bool ordered, bool unique) { +buildLevelType(LevelFormat lf, bool ordered, bool unique) { auto dlt = static_cast(static_cast(lf) | (ordered ? 0 : 2) | (unique ? 0 : 1)); return isValidDLT(dlt) ? std::optional(dlt) : std::nullopt; @@ -321,27 +321,27 @@ "getLevelFormat conversion is broken"); static_assert( - (getDimLevelType(LevelFormat::Dense, false, true) == std::nullopt && - getDimLevelType(LevelFormat::Dense, true, false) == std::nullopt && - getDimLevelType(LevelFormat::Dense, false, false) == std::nullopt && - *getDimLevelType(LevelFormat::Dense, true, true) == DimLevelType::Dense && - *getDimLevelType(LevelFormat::Compressed, true, true) == + (buildLevelType(LevelFormat::Dense, false, true) == std::nullopt && + buildLevelType(LevelFormat::Dense, true, false) == std::nullopt && + buildLevelType(LevelFormat::Dense, false, false) == std::nullopt && + *buildLevelType(LevelFormat::Dense, true, true) == DimLevelType::Dense && + *buildLevelType(LevelFormat::Compressed, true, true) == DimLevelType::Compressed && - *getDimLevelType(LevelFormat::Compressed, true, false) == + *buildLevelType(LevelFormat::Compressed, true, false) == DimLevelType::CompressedNu && - *getDimLevelType(LevelFormat::Compressed, false, true) == + *buildLevelType(LevelFormat::Compressed, false, true) == DimLevelType::CompressedNo && - *getDimLevelType(LevelFormat::Compressed, false, false) == + *buildLevelType(LevelFormat::Compressed, false, false) == DimLevelType::CompressedNuNo && - *getDimLevelType(LevelFormat::Singleton, true, true) == + *buildLevelType(LevelFormat::Singleton, true, true) == DimLevelType::Singleton && - *getDimLevelType(LevelFormat::Singleton, true, false) == + *buildLevelType(LevelFormat::Singleton, true, false) == DimLevelType::SingletonNu && - *getDimLevelType(LevelFormat::Singleton, false, true) == + *buildLevelType(LevelFormat::Singleton, false, true) == DimLevelType::SingletonNo && - *getDimLevelType(LevelFormat::Singleton, false, false) == + *buildLevelType(LevelFormat::Singleton, false, false) == DimLevelType::SingletonNuNo), - "getDimLevelType conversion is broken"); + "buildLevelType conversion is broken"); // Ensure the above predicates work as intended. static_assert((isValidDLT(DimLevelType::Undef) && diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -266,7 +266,7 @@ ArrayRefParameter< "::mlir::sparse_tensor::DimLevelType", "level-types" - >: $dimLevelType, + >: $lvlTypes, // A permutation from (higher-ordering)-coordinates to level-coordinates. "AffineMap":$dimOrdering, // A mapping from dimension-coordinates to (higher-ordering)-coordinates. @@ -283,12 +283,12 @@ ); let builders = [ - AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$dimLevelType, + AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$lvlTypes, "AffineMap":$dimOrdering, "AffineMap":$higherOrdering, "unsigned":$posWidth, "unsigned":$crdWidth), [{ - return $_get($_ctxt, dimLevelType, + return $_get($_ctxt, lvlTypes, dimOrdering, higherOrdering, posWidth, diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h --- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h @@ -377,14 +377,14 @@ bool hasSparseIdxReduction(const BitVector &bits) const; /// Gets the level-type of the `t`th tensor on `i`th loop. - DimLevelType getDimLevelType(TensorId t, LoopId i) const { + DimLevelType getLvlType(TensorId t, LoopId i) const { assert(isValidTensorId(t) && isValidLoopId(i)); return lvlTypes[t][i]; } /// Gets the level-type of the TensorLoopId. - DimLevelType getDimLevelType(TensorLoopId b) const { - return getDimLevelType(tensor(b), loop(b)); + DimLevelType getLvlType(TensorLoopId b) const { + return getLvlType(tensor(b), loop(b)); } /// Gets the loop identifier for the `lvl`th level of the `t`th tensor. @@ -434,7 +434,7 @@ for (const TensorLoopId b : bits.set_bits()) { const TensorId t = tensor(b); const auto optLvl = getLvl(b); - const auto lvlTp = getDimLevelType(b); + const auto lvlTp = getLvlType(b); if (isLvlWithNonTrivialIdxExp(b)) { // This must be an undefined level. assert(!optLvl.has_value()); diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp --- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp +++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp @@ -39,30 +39,28 @@ mlirAttributeIsASparseTensorEncodingAttr) .def_classmethod( "get", - [](py::object cls, - std::vector dimLevelTypes, + [](py::object cls, std::vector lvlTypes, std::optional dimOrdering, std::optional higherOrdering, int posWidth, int crdWidth, MlirContext context) { return cls(mlirSparseTensorEncodingAttrGet( - context, dimLevelTypes.size(), dimLevelTypes.data(), + context, lvlTypes.size(), lvlTypes.data(), dimOrdering ? *dimOrdering : MlirAffineMap{nullptr}, higherOrdering ? *higherOrdering : MlirAffineMap{nullptr}, posWidth, crdWidth)); }, - py::arg("cls"), py::arg("dim_level_types"), py::arg("dim_ordering"), + py::arg("cls"), py::arg("lvl_types"), py::arg("dim_ordering"), py::arg("higher_ordering"), py::arg("pos_width"), py::arg("crd_width"), py::arg("context") = py::none(), "Gets a sparse_tensor.encoding from parameters.") .def_property_readonly( - "dim_level_types", + "lvl_types", [](MlirAttribute self) { const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self); std::vector ret; ret.reserve(lvlRank); for (int l = 0; l < lvlRank; ++l) - ret.push_back( - mlirSparseTensorEncodingAttrGetDimLevelType(self, l)); + ret.push_back(mlirSparseTensorEncodingAttrGetLvlType(self, l)); return ret; }) .def_property_readonly( diff --git a/mlir/lib/CAPI/Dialect/SparseTensor.cpp b/mlir/lib/CAPI/Dialect/SparseTensor.cpp --- a/mlir/lib/CAPI/Dialect/SparseTensor.cpp +++ b/mlir/lib/CAPI/Dialect/SparseTensor.cpp @@ -47,16 +47,15 @@ MlirAttribute mlirSparseTensorEncodingAttrGet( MlirContext ctx, intptr_t lvlRank, - MlirSparseTensorDimLevelType const *dimLevelTypes, - MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int posWidth, - int crdWidth) { - SmallVector cppDimLevelTypes; - cppDimLevelTypes.reserve(lvlRank); + MlirSparseTensorDimLevelType const *lvlTypes, MlirAffineMap dimOrdering, + MlirAffineMap higherOrdering, int posWidth, int crdWidth) { + SmallVector cppLvlTypes; + cppLvlTypes.reserve(lvlRank); for (intptr_t l = 0; l < lvlRank; ++l) - cppDimLevelTypes.push_back(static_cast(dimLevelTypes[l])); + cppLvlTypes.push_back(static_cast(lvlTypes[l])); return wrap(SparseTensorEncodingAttr::get( - unwrap(ctx), cppDimLevelTypes, unwrap(dimOrdering), - unwrap(higherOrdering), posWidth, crdWidth)); + unwrap(ctx), cppLvlTypes, unwrap(dimOrdering), unwrap(higherOrdering), + posWidth, crdWidth)); } MlirAffineMap mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr) { @@ -74,7 +73,7 @@ } MlirSparseTensorDimLevelType -mlirSparseTensorEncodingAttrGetDimLevelType(MlirAttribute attr, intptr_t lvl) { +mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl) { return static_cast( unwrap(attr).cast().getLvlType(lvl)); } diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -130,23 +130,22 @@ } SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutOrdering() const { - return SparseTensorEncodingAttr::get(getContext(), getDimLevelType(), - AffineMap(), AffineMap(), getPosWidth(), + return SparseTensorEncodingAttr::get(getContext(), getLvlTypes(), AffineMap(), + AffineMap(), getPosWidth(), getCrdWidth()); } SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutBitWidths() const { - return SparseTensorEncodingAttr::get(getContext(), getDimLevelType(), - getDimOrdering(), getHigherOrdering(), 0, - 0); + return SparseTensorEncodingAttr::get( + getContext(), getLvlTypes(), getDimOrdering(), getHigherOrdering(), 0, 0); } bool SparseTensorEncodingAttr::isAllDense() const { - return !getImpl() || llvm::all_of(getDimLevelType(), isDenseDLT); + return !getImpl() || llvm::all_of(getLvlTypes(), isDenseDLT); } bool SparseTensorEncodingAttr::isAllOrdered() const { - return !getImpl() || llvm::all_of(getDimLevelType(), isOrderedDLT); + return !getImpl() || llvm::all_of(getLvlTypes(), isOrderedDLT); } bool SparseTensorEncodingAttr::hasIdDimOrdering() const { @@ -155,14 +154,14 @@ Level SparseTensorEncodingAttr::getLvlRank() const { assert(getImpl() && "Uninitialized SparseTensorEncodingAttr"); - return getDimLevelType().size(); + return getLvlTypes().size(); } DimLevelType SparseTensorEncodingAttr::getLvlType(Level l) const { if (!getImpl()) return DimLevelType::Dense; assert(l < getLvlRank() && "Level is out of bounds"); - return getDimLevelType()[l]; + return getLvlTypes()[l]; } std::optional @@ -243,7 +242,7 @@ StringRef attrName; // Exactly 6 keys. - SmallVector keys = {"dimLevelType", "dimOrdering", + SmallVector keys = {"lvlTypes", "dimOrdering", "higherOrdering", "posWidth", "crdWidth", "slice"}; while (succeeded(parser.parseOptionalKeyword(&attrName))) { @@ -258,7 +257,7 @@ // cost of the `is_contained` check above. Should instead use some // "find" function that returns the index into `keys` so that we can // dispatch on that instead. - if (attrName == "dimLevelType") { + if (attrName == "lvlTypes") { Attribute attr; RETURN_ON_FAIL(parser.parseAttribute(attr)); auto arrayAttr = attr.dyn_cast(); @@ -336,8 +335,8 @@ void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { // Print the struct-like storage in dictionary fashion. - printer << "<{ dimLevelType = [ "; - llvm::interleaveComma(getDimLevelType(), printer, [&](DimLevelType dlt) { + printer << "<{ lvlTypes = [ "; + llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) { printer << "\"" << toMLIRString(dlt) << "\""; }); printer << " ]"; @@ -366,7 +365,7 @@ LogicalResult SparseTensorEncodingAttr::verify( function_ref emitError, - ArrayRef dimLevelType, AffineMap dimOrdering, + ArrayRef lvlTypes, AffineMap dimOrdering, AffineMap higherOrdering, unsigned posWidth, unsigned crdWidth, ArrayRef dimSlices) { if (!acceptBitWidth(posWidth)) @@ -378,7 +377,7 @@ // the `getLvlRank` method is the length of the level-types array, // since it must always be provided and have full rank; therefore we // use that same source-of-truth here. - const Level lvlRank = dimLevelType.size(); + const Level lvlRank = lvlTypes.size(); if (lvlRank == 0) return emitError() << "expected a non-empty array for level types"; if (dimOrdering) { @@ -415,9 +414,9 @@ function_ref emitError) const { // Check structural integrity. In particular, this ensures that the // level-rank is coherent across all the fields. - RETURN_FAILURE_IF_FAILED(verify(emitError, getDimLevelType(), - getDimOrdering(), getHigherOrdering(), - getPosWidth(), getCrdWidth(), getDimSlices())) + RETURN_FAILURE_IF_FAILED(verify(emitError, getLvlTypes(), getDimOrdering(), + getHigherOrdering(), getPosWidth(), + getCrdWidth(), getDimSlices())) // Check integrity with tensor type specifics. In particular, we // need only check that the dimension-rank of the tensor agrees with // the dimension-rank of the encoding. @@ -496,14 +495,14 @@ // An unordered and non-unique compressed level at beginning. // If this is also the last level, then it is unique. lvlTypes.push_back( - *getDimLevelType(LevelFormat::Compressed, ordered, lvlRank == 1)); + *buildLevelType(LevelFormat::Compressed, ordered, lvlRank == 1)); if (lvlRank > 1) { // TODO: it is actually ordered at the level for ordered input. // Followed by unordered non-unique n-2 singleton levels. std::fill_n(std::back_inserter(lvlTypes), lvlRank - 2, - *getDimLevelType(LevelFormat::Singleton, ordered, false)); + *buildLevelType(LevelFormat::Singleton, ordered, false)); // Ends by a unique singleton level unless the lvlRank is 1. - lvlTypes.push_back(*getDimLevelType(LevelFormat::Singleton, ordered, true)); + lvlTypes.push_back(*buildLevelType(LevelFormat::Singleton, ordered, true)); } // TODO: Maybe pick the bitwidth based on input/output tensors (probably the @@ -580,8 +579,8 @@ static SparseTensorEncodingAttr getNormalizedEncodingForSpecifier(SparseTensorEncodingAttr enc) { SmallVector dlts; - for (auto dlt : enc.getDimLevelType()) - dlts.push_back(*getDimLevelType(*getLevelFormat(dlt), true, true)); + for (auto dlt : enc.getLvlTypes()) + dlts.push_back(*buildLevelType(*getLevelFormat(dlt), true, true)); return SparseTensorEncodingAttr::get( enc.getContext(), dlts, diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h @@ -79,11 +79,9 @@ const LatPoint &lat(LatPointId l) const { return latticeMerger.lat(l); } ArrayRef set(LatSetId s) const { return latticeMerger.set(s); } DimLevelType dlt(TensorId t, LoopId i) const { - return latticeMerger.getDimLevelType(t, i); - } - DimLevelType dlt(TensorLoopId b) const { - return latticeMerger.getDimLevelType(b); + return latticeMerger.getLvlType(t, i); } + DimLevelType dlt(TensorLoopId b) const { return latticeMerger.getLvlType(b); } // // LoopEmitter delegates. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp @@ -288,7 +288,7 @@ if (stt.hasEncoding() && !(isOutputTensor(tid) && isSparseOut)) { const auto enc = stt.getEncoding(); isSparseSlices[tid] = enc.isSlice(); - for (auto lvlTp : enc.getDimLevelType()) + for (auto lvlTp : enc.getLvlTypes()) lvlTypes[tid].push_back(lvlTp); } else { lvlTypes[tid].assign(lvlRank, DimLevelType::Dense); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -1187,7 +1187,7 @@ // TODO: We should check these in ExtractSliceOp::verify. if (!srcEnc || !dstEnc || !dstEnc.isSlice()) return failure(); - assert(srcEnc.getDimLevelType() == dstEnc.getDimLevelType()); + assert(srcEnc.getLvlTypes() == dstEnc.getLvlTypes()); assert(srcEnc.getDimOrdering() == dstEnc.getDimOrdering()); assert(srcEnc.getHigherOrdering() == dstEnc.getHigherOrdering()); assert(srcEnc.getPosWidth() == dstEnc.getPosWidth()); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -205,7 +205,7 @@ SparseTensorType stt) { SmallVector lvlTypes; lvlTypes.reserve(stt.getLvlRank()); - for (const auto dlt : stt.getEncoding().getDimLevelType()) + for (const auto dlt : stt.getEncoding().getLvlTypes()) lvlTypes.push_back(constantDimLevelTypeEncoding(builder, loc, dlt)); return allocaBuffer(builder, loc, lvlTypes); } @@ -565,7 +565,7 @@ rewriter.setInsertionPointToStart(after); const bool hasDenseDim = - llvm::any_of(stt.getEncoding().getDimLevelType(), isDenseDLT); + llvm::any_of(stt.getEncoding().getLvlTypes(), isDenseDLT); if (hasDenseDim) { Value elemV = rewriter.create(loc, elemPtr); Value isZero = genIsNonzero(rewriter, loc, elemV); @@ -880,11 +880,11 @@ break; case SparseToSparseConversionStrategy::kDirect: useDirectConversion = true; - assert(canUseDirectConversion(dstEnc.getDimLevelType()) && + assert(canUseDirectConversion(dstEnc.getLvlTypes()) && "Unsupported target for direct sparse-to-sparse conversion"); break; case SparseToSparseConversionStrategy::kAuto: - useDirectConversion = canUseDirectConversion(dstEnc.getDimLevelType()); + useDirectConversion = canUseDirectConversion(dstEnc.getLvlTypes()); break; } if (useDirectConversion) { @@ -896,7 +896,7 @@ // method calls can share most parameters, while still providing // the correct sparsity information to either of them. const auto mixedEnc = SparseTensorEncodingAttr::get( - op->getContext(), dstEnc.getDimLevelType(), dstEnc.getDimOrdering(), + op->getContext(), dstEnc.getLvlTypes(), dstEnc.getDimOrdering(), dstEnc.getHigherOrdering(), srcEnc.getPosWidth(), srcEnc.getCrdWidth()); // TODO: This is the only place where `kToCOO` (or `kToIterator`) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -44,8 +44,7 @@ // Helper to detect a sparse tensor type operand. static bool isSparseTensor(OpOperand *op) { auto enc = getSparseTensorEncoding(op->get().getType()); - return enc && - llvm::is_contained(enc.getDimLevelType(), DimLevelType::Compressed); + return enc && llvm::is_contained(enc.getLvlTypes(), DimLevelType::Compressed); } // Helper method to find zero/uninitialized allocation. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp @@ -134,7 +134,7 @@ if (!(callback(fidx, kind, dim, dlt))) \ return; - const auto lvlTypes = enc.getDimLevelType(); + const auto lvlTypes = enc.getLvlTypes(); const Level lvlRank = enc.getLvlRank(); const Level cooStart = getCOOStart(enc); const Level end = cooStart == lvlRank ? cooStart : cooStart + 1; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -232,7 +232,7 @@ switch (a.getKind()) { case AffineExprKind::DimId: { const LoopId idx = merger.makeLoopId(a.cast().getPosition()); - if (!isUndefDLT(merger.getDimLevelType(tid, idx))) + if (!isUndefDLT(merger.getLvlType(tid, idx))) return false; // used more than once if (setLvlFormat) @@ -243,7 +243,7 @@ case AffineExprKind::Mul: case AffineExprKind::Constant: { if (!isDenseDLT(dlt) && setLvlFormat) { - assert(isUndefDLT(merger.getDimLevelType(tid, filterLdx))); + assert(isUndefDLT(merger.getLvlType(tid, filterLdx))); // Use a filter loop for sparse affine expression. merger.setLevelAndType(tid, filterLdx, lvl, dlt); ++filterLdx; @@ -287,7 +287,7 @@ switch (a.getKind()) { case AffineExprKind::DimId: { const LoopId ldx = merger.makeLoopId(a.cast().getPosition()); - if (!isUndefDLT(merger.getDimLevelType(tensor, ldx))) + if (!isUndefDLT(merger.getLvlType(tensor, ldx))) return false; // used more than once, e.g., A[i][i] // TODO: Generalizes the following two cases. A[i] (with trivial index @@ -624,8 +624,7 @@ // Filter loops should be constructed after all the dependent loops, // i.e., d0 + d1 < filter_loop(d0 + d1) if (tldx && env.merger().isFilterLoop(*tldx)) { - assert(!ta.isa() && - !isDenseDLT(enc.getDimLevelType()[lvl])); + assert(!ta.isa() && !isDenseDLT(enc.getLvlTypes()[lvl])); addAffineOrderings(adjM, inDegree, ta, AffineExpr(), std::nullopt, tldx); // Now that the ordering of affine expression is captured by filter // loop idx, we only need to ensure the affine ordering against filter @@ -1922,7 +1921,7 @@ // auto srcTp = getRankedTensorType(tval); auto dstEnc = SparseTensorEncodingAttr::get( - getContext(), srcEnc.getDimLevelType(), + getContext(), srcEnc.getLvlTypes(), permute(env, env.op().getMatchingIndexingMap(t)), // new order srcEnc.getHigherOrdering(), srcEnc.getPosWidth(), srcEnc.getCrdWidth()); diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -405,7 +405,7 @@ // Starts resetting from a dense level, so that the first bit (if kept) // is not undefined level-type. for (unsigned b = 0; b < be; b++) { - if (simple[b] && isDenseDLT(getDimLevelType(TensorLoopId{b}))) { + if (simple[b] && isDenseDLT(getLvlType(TensorLoopId{b}))) { offset = be - b - 1; // relative to the end break; } @@ -417,7 +417,7 @@ b = b == 0 ? be - 1 : b - 1, i++) { // Slice on dense level has `locate` property as well, and can be optimized. if (simple[b] && !isSparseLvlWithNonTrivialIdxExp(b)) { - const auto dlt = getDimLevelType(b); + const auto dlt = getLvlType(b); if (!isCompressedDLT(dlt) && !isSingletonDLT(dlt) && !isCompressedWithHiDLT(dlt)) { if (reset) simple.reset(b); @@ -584,7 +584,7 @@ bool Merger::hasAnySparse(const BitVector &bits) const { for (TensorLoopId b : bits.set_bits()) { - const auto dlt = getDimLevelType(b); + const auto dlt = getLvlType(b); if (isCompressedDLT(dlt) || isSingletonDLT(dlt) || isCompressedWithHiDLT(dlt)) return true; } diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c --- a/mlir/test/CAPI/sparse_tensor.c +++ b/mlir/test/CAPI/sparse_tensor.c @@ -25,7 +25,7 @@ // clang-format off const char *originalAsm = "#sparse_tensor.encoding<{ " - "dimLevelType = [ \"dense\", \"compressed\", \"compressed\"], " + "lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], " "dimOrdering = affine_map<(d0, d1, d2) -> (d0, d1, d2)>, " "higherOrdering = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, " "posWidth = 32, crdWidth = 64 }>"; @@ -47,12 +47,12 @@ // CHECK: level_type: 8 // CHECK: level_type: 8 int lvlRank = mlirSparseTensorEncodingGetLvlRank(originalAttr); - enum MlirSparseTensorDimLevelType *levelTypes = + enum MlirSparseTensorDimLevelType *lvlTypes = malloc(sizeof(enum MlirSparseTensorDimLevelType) * lvlRank); for (int l = 0; l < lvlRank; ++l) { - levelTypes[l] = - mlirSparseTensorEncodingAttrGetDimLevelType(originalAttr, l); - fprintf(stderr, "level_type: %d\n", levelTypes[l]); + lvlTypes[l] = + mlirSparseTensorEncodingAttrGetLvlType(originalAttr, l); + fprintf(stderr, "level_type: %d\n", lvlTypes[l]); } // CHECK: posWidth: 32 int posWidth = mlirSparseTensorEncodingAttrGetPosWidth(originalAttr); @@ -62,13 +62,13 @@ fprintf(stderr, "crdWidth: %d\n", crdWidth); MlirAttribute newAttr = - mlirSparseTensorEncodingAttrGet(ctx, lvlRank, levelTypes, dimOrdering, + mlirSparseTensorEncodingAttrGet(ctx, lvlRank, lvlTypes, dimOrdering, higherOrdering, posWidth, crdWidth); mlirAttributeDump(newAttr); // For debugging filecheck output. // CHECK: equal: 1 fprintf(stderr, "equal: %d\n", mlirAttributeEqual(originalAttr, newAttr)); - free(levelTypes); + free(lvlTypes); return 0; } diff --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir --- a/mlir/test/Dialect/Bufferization/invalid.mlir +++ b/mlir/test/Dialect/Bufferization/invalid.mlir @@ -58,7 +58,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> func.func @sparse_alloc_direct_return() -> tensor<20x40xf32, #DCSR> { // expected-error @+1{{sparse tensor allocation should not escape function}} @@ -68,7 +68,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> func.func private @foo(tensor<20x40xf32, #DCSR>) -> () diff --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir --- a/mlir/test/Dialect/Bufferization/ops.mlir +++ b/mlir/test/Dialect/Bufferization/ops.mlir @@ -2,7 +2,7 @@ // RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s #CSR = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"] + lvlTypes = ["dense", "compressed"] }> // CHECK-LABEL: func @test_clone diff --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir --- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir +++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir @@ -854,7 +854,7 @@ iterator_types = ["parallel", "reduction"] } -#CSR = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed"] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"] }> func.func @sparse_case(%arg0: tensor<8x8xf32, #CSR>, %arg1: tensor<8xf32>) -> tensor<8xf32> { %0 = tensor.empty() : tensor<8xf32> diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_combi.mlir @@ -3,7 +3,7 @@ // RUN: --sparsification="parallelization-strategy=dense-outer-loop" \ // RUN: --sparse-gpu-codegen | FileCheck %s -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> // // CHECK-LABEL: gpu.module @sparse_kernels diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul.mlir @@ -3,7 +3,7 @@ // RUN: --sparsification="parallelization-strategy=dense-outer-loop" \ // RUN: --sparse-gpu-codegen | FileCheck %s -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> // // Compute matrix matrix C = AB diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir @@ -3,7 +3,7 @@ // RUN: --sparsification="parallelization-strategy=dense-outer-loop" \ // RUN: --sparse-gpu-codegen | FileCheck %s -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> // // Compute matrix vector y = Ax diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -1,62 +1,62 @@ // RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize -cse | FileCheck %s -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed" ], + lvlTypes = [ "compressed" ], crdWidth = 64, posWidth = 32 }> #Dense2D = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense" ], + lvlTypes = [ "dense", "dense" ], crdWidth = 64, posWidth = 32 }> #Row = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ], + lvlTypes = [ "compressed", "dense" ], crdWidth = 64, posWidth = 32 }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], crdWidth = 64, posWidth = 32 }> #UCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed-no" ] + lvlTypes = [ "dense", "compressed-no" ] }> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i, j) -> (j, i)> }> #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], crdWidth = 64, posWidth = 32 }> #Dense3D = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "dense" ], + lvlTypes = [ "dense", "dense", "dense" ], dimOrdering = affine_map<(i, j, k) -> (k, i, j)> }> #Coo = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #CooPNo = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton-no" ], + lvlTypes = [ "compressed-nu", "singleton-no" ], dimOrdering = affine_map<(i, j) -> (j, i)> }> #ccoo = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed-nu", "singleton" ] + lvlTypes = [ "compressed", "compressed-nu", "singleton" ] }> // CHECK-LABEL: func @sparse_nop( @@ -680,7 +680,7 @@ } // CHECK-LABEL: func.func @sparse_new_coo( -// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref, memref, memref, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>>) { +// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref, memref, memref, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>) { // CHECK-DAG: %[[A1:.*]] = arith.constant false // CHECK-DAG: %[[A2:.*]] = arith.constant 1 : index // CHECK-DAG: %[[A3:.*]] = arith.constant 0 : index @@ -697,7 +697,7 @@ // CHECK: %[[A13:.*]] = memref.cast %[[A12]] : memref<2xindex> to memref // CHECK: %[[A14:.*]] = memref.alloc(%[[A11]]) : memref // CHECK: %[[A15:.*]] = memref.alloc(%[[A10]]) : memref -// CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>> +// CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>> // CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.set %[[A16]] lvl_sz at 0 with %[[A8]] // CHECK: %[[A19:.*]] = sparse_tensor.storage_specifier.get %[[A18]] pos_mem_sz at 0 // CHECK: %[[A21:.*]], %[[A22:.*]] = sparse_tensor.push_back %[[A19]], %[[A13]], %[[A3]] @@ -725,7 +725,7 @@ } // CHECK-LABEL: func.func @sparse_new_coo_permute_no( -// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref, memref, memref, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>>) { +// CHECK-SAME: %[[A0:.*]]: !llvm.ptr) -> (memref, memref, memref, !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>>) { // CHECK-DAG: %[[A1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[A2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[A3:.*]] = arith.constant 2 : index @@ -741,7 +741,7 @@ // CHECK: %[[A12:.*]] = memref.cast %[[A11]] : memref<2xindex> to memref // CHECK: %[[A13:.*]] = memref.alloc(%[[A10]]) : memref // CHECK: %[[A14:.*]] = memref.alloc(%[[A9]]) : memref -// CHECK: %[[A15:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>> +// CHECK: %[[A15:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "compressed", "singleton" ] }>> // CHECK: %[[A17:.*]] = sparse_tensor.storage_specifier.set %[[A15]] lvl_sz at 0 with %[[A8]] // CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.get %[[A17]] pos_mem_sz at 0 // CHECK: %[[A20:.*]], %[[A21:.*]] = sparse_tensor.push_back %[[A18]], %[[A12]], %[[A2]] diff --git a/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir b/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir --- a/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --sparse-tensor-codegen=enable-buffer-initialization=true --canonicalize --cse | FileCheck %s -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> // CHECK-LABEL: func.func @sparse_alloc_sparse_vector( // CHECK-SAME: %[[VAL_0:.*]]: index) -> (memref, memref, memref, !sparse_tensor.storage_specifier diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir --- a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s -#CSR = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed"]}> -#COO = #sparse_tensor.encoding<{ dimLevelType = ["compressed-nu", "singleton"]}> +#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"]}> +#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed-nu", "singleton"]}> // CHECK-LABEL: func.func @sparse_alloc_copy_CSR( // CHECK-SAME: %[[VAL_0:.*0]]: memref, diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir --- a/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir @@ -6,9 +6,9 @@ // RUN: --sparse-tensor-codegen=create-sparse-deallocs=true \ // RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECK-DEALLOC -#CSR = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed"]}> +#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"], + lvlTypes = ["dense", "compressed"], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Dialect/SparseTensor/codegen_to_llvm.mlir b/mlir/test/Dialect/SparseTensor/codegen_to_llvm.mlir --- a/mlir/test/Dialect/SparseTensor/codegen_to_llvm.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_to_llvm.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --sparse-tensor-codegen --sparse-storage-specifier-to-llvm | FileCheck %s -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = ["compressed"] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"] }> // CHECK-LABEL: func @sparse_nop( // CHECK-SAME: %[[A0:.*0]]: memref, diff --git a/mlir/test/Dialect/SparseTensor/constant_index_map.mlir b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir --- a/mlir/test/Dialect/SparseTensor/constant_index_map.mlir +++ b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir @@ -5,7 +5,7 @@ #map1 = affine_map<(d0) -> (0, d0)> #map2 = affine_map<(d0) -> (d0)> -#SpVec = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SpVec = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> // CHECK-LABEL: func.func @main( // CHECK-SAME: %[[VAL_0:.*0]]: tensor<1x77xi1>, diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -1,32 +1,32 @@ // RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SparseVector64 = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], posWidth = 64, crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], posWidth = 32, crdWidth = 32 }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"] + lvlTypes = ["dense", "compressed"] }> #CSC = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"], + lvlTypes = ["dense", "compressed"], dimOrdering = affine_map<(i,j) -> (j,i)> }> #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed", "compressed"], + lvlTypes = ["dense", "compressed", "compressed"], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir --- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir @@ -3,20 +3,20 @@ // RUN: --canonicalize --cse | FileCheck %s --check-prefix=CHECK-RWT #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"] + lvlTypes = ["dense", "compressed"] }> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i, j) -> (j, i)> }> #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed", "compressed"], + lvlTypes = ["dense", "compressed", "compressed"], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> @@ -113,7 +113,7 @@ // CHECK: return %[[T]] : !llvm.ptr // CHECK-RWT-LABEL: func.func @sparse_convert_2d( -// CHECK-RWT-SAME: %[[T0:.*]]: tensor<2x4xf64>) -> tensor<2x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> { +// CHECK-RWT-SAME: %[[T0:.*]]: tensor<2x4xf64>) -> tensor<2x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> { // CHECK-RWT: %[[T1:.*]] = bufferization.alloc_tensor() // CHECK-RWT: %[[T2:.*]] = sparse_tensor.foreach in %[[T0]] init(%[[T1]]) // CHECK-RWT: ^bb0(%[[L0I0:.*]]: index, %[[L0I1:.*]]: index, %[[L0V:.*]]: f64, %[[L0T:.*]]: tensor @@ -164,7 +164,7 @@ // CHECK: call @delSparseTensorCOOF32(%[[C]]) // CHECK: return %[[T]] : !llvm.ptr -// CHECK-RWT-LABEL: func.func @sparse_constant() -> tensor<8x7xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> { +// CHECK-RWT-LABEL: func.func @sparse_constant() -> tensor<8x7xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> { // CHECK-RWT: %[[F0:.*]] = arith.constant sparse<{{\[\[}}0, 0], [1, 6]], [1.000000e+00, 5.000000e+00]> : tensor<8x7xf32> // CHECK-RWT: %[[T0:.*]] = bufferization.alloc_tensor() // CHECK-RWT: %[[T1:.*]] = sparse_tensor.foreach in %[[F0]] init(%[[T0]]) diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir @@ -4,15 +4,15 @@ // RUN: --canonicalize --cse | FileCheck %s --check-prefix=CHECK-RWT #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"] + lvlTypes = ["dense", "compressed"] }> #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed", "compressed"], + lvlTypes = ["dense", "compressed", "compressed"], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> @@ -145,7 +145,7 @@ // CHECK: return %[[T]] : tensor<2x4xf64> // CHECK-RWT-LABEL: func.func @sparse_convert_2d( -// CHECK-RWT-SAME: %[[A:.*]]: tensor<2x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>) -> tensor<2x4xf64> { +// CHECK-RWT-SAME: %[[A:.*]]: tensor<2x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>) -> tensor<2x4xf64> { // CHECK-RWT: %[[F0:.*]] = arith.constant 0.000000e+00 : f64 // CHECK-RWT: %[[B:.*]] = memref.alloc() : memref<2x4xf64> // CHECK-RWT: linalg.fill ins(%[[F0]] : f64) outs(%[[B]] @@ -301,7 +301,7 @@ // CHECK: return %[[T]] : tensor // CHECK-RWT-LABEL: func.func @sparse_convert_2d_dyn2( -// CHECK-RWT-SAME: %[[A:.*]]: tensor>) -> tensor { +// CHECK-RWT-SAME: %[[A:.*]]: tensor>) -> tensor { // CHECK-RWT-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-RWT-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-RWT-DAG: %[[F0:.*]] = arith.constant 0.000000e+00 : f64 diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir @@ -10,37 +10,37 @@ // RUN: --canonicalize --cse | FileCheck %s --check-prefix=CHECK-RWT #SparseVector64 = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], posWidth = 64, crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], posWidth = 32, crdWidth = 32 }> #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SortedCOO2D = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], }> #SortedCOO3D = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ] }> #TsssPermuted = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> #COOSlice = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], slice = [ (2, 2, 1), (12, 13, 1) ] }> @@ -115,13 +115,13 @@ } #SparseSingleton64 = #sparse_tensor.encoding<{ - dimLevelType = ["singleton"], + lvlTypes = ["singleton"], posWidth = 64, crdWidth = 64 }> #SparseSingleton32 = #sparse_tensor.encoding<{ - dimLevelType = ["singleton"], + lvlTypes = ["singleton"], posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir @@ -1,13 +1,13 @@ // RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s #SparseVector64 = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], posWidth = 64, crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Dialect/SparseTensor/dense.mlir b/mlir/test/Dialect/SparseTensor/dense.mlir --- a/mlir/test/Dialect/SparseTensor/dense.mlir +++ b/mlir/test/Dialect/SparseTensor/dense.mlir @@ -7,7 +7,7 @@ // latter class is linearized into one-dimensional buffers that are backed // by the runtime support library. -#DenseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }> +#DenseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }> #trait_2d = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir --- a/mlir/test/Dialect/SparseTensor/fold.mlir +++ b/mlir/test/Dialect/SparseTensor/fold.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --canonicalize --cse | FileCheck %s -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_nop_dense2dense_convert( // CHECK-SAME: %[[A:.*]]: tensor<64xf32>) diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -8,7 +8,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @non_static_pack_ret(%values: tensor<6xf64>, %coordinates: tensor<6x1xi32>) -> tensor { @@ -20,7 +20,7 @@ // ----- -#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"], crdWidth=32}> +#DenseVector = #sparse_tensor.encoding<{lvlTypes = ["dense"], crdWidth=32}> func.func @invalid_pack_dense(%values: tensor<6xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf64, #DenseVector> { @@ -32,7 +32,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @invalid_pack_data(%values: tensor<6x1xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf64, #SparseVector> { @@ -44,7 +44,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @invalid_pack_type(%values: tensor<6xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf32, #SparseVector> { @@ -56,7 +56,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @invalid_pack_type(%values: tensor<5xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf64, #SparseVector> { @@ -68,7 +68,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @invalid_pack_type(%values: tensor<6xf64>, %coordinates: tensor<6x2xi32>) -> tensor<100xf64, #SparseVector> { @@ -80,7 +80,7 @@ // ----- -#BCOO = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed-hi"], crdWidth=32}> +#BCOO = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed-hi"], crdWidth=32}> func.func @invalid_pack_batched(%values: tensor<2x6xf64>, %coordinates: tensor<3x6x1xi32>) -> tensor<2x100xf64, #BCOO> { @@ -92,7 +92,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>) -> (tensor<6xf64>, tensor<6x1xi32>, i32) { @@ -104,7 +104,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>) -> (tensor<5xf32>, tensor<6x1xi32>, i32) { @@ -116,7 +116,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>) -> (tensor<6xf32>, tensor<6x2xi32>, i32) { @@ -128,7 +128,7 @@ // ----- -#BCOO = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed-hi"], crdWidth=32}> +#BCOO = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed-hi"], crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<2x100xf32, #BCOO>) -> (tensor<2x6xf32>, tensor<3x6x2xi32>, i32) { @@ -156,7 +156,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], posWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], posWidth=32}> func.func @mismatch_positions_types(%arg0: tensor<128xf64, #SparseVector>) -> memref { // expected-error@+1 {{unexpected type for positions}} @@ -166,7 +166,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @positions_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref { // expected-error@+1 {{requested level is out of bounds}} @@ -192,7 +192,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @mismatch_indices_types(%arg0: tensor) -> memref { // expected-error@+1 {{unexpected type for coordinates}} @@ -202,7 +202,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref { // expected-error@+1 {{requested level is out of bounds}} @@ -220,7 +220,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @indices_buffer_noncoo(%arg0: tensor<128xf64, #SparseVector>) -> memref { // expected-error@+1 {{expected sparse tensor with a COO region}} @@ -238,7 +238,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @mismatch_values_types(%arg0: tensor) -> memref { // expected-error@+1 {{unexpected mismatch in element types}} @@ -249,7 +249,7 @@ // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> @@ -262,7 +262,7 @@ // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> @@ -274,7 +274,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index { // expected-error@+1 {{redundant level argument for querying value memory size}} @@ -285,7 +285,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> i64 { // expected-error@+1 {{requested slice data on non-slice tensor}} @@ -296,7 +296,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index { // expected-error@+1 {{missing level argument}} @@ -307,7 +307,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index { // expected-error@+1 {{requested level is out of bounds}} @@ -318,7 +318,7 @@ // ----- -#COO = #sparse_tensor.encoding<{dimLevelType = ["compressed-nu", "singleton"]}> +#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"]}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index { // expected-error@+1 {{requested position memory size on a singleton level}} @@ -345,7 +345,7 @@ // ----- -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @sparse_wrong_arity_insert(%arg0: tensor<128x64xf64, #CSR>, %arg1: index, %arg2: f64) { // expected-error@+1 {{'sparse_tensor.insert' op incorrect number of coordinates}} @@ -395,7 +395,7 @@ // ----- -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @sparse_wrong_arity_compression(%arg0: memref, %arg1: memref, @@ -419,7 +419,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor { // expected-error@+1 {{unexpected conversion mismatch in rank}} @@ -429,7 +429,7 @@ // ----- -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> { // expected-error@+1 {{unexpected conversion mismatch in dimension 1}} @@ -448,7 +448,7 @@ // ----- #CSR = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"], + lvlTypes = ["dense", "compressed"], slice = [ (1, 4, 1), (1, 4, 2) ] }> @@ -680,7 +680,7 @@ // ----- -#DC = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @invalid_concat_less_inputs(%arg: tensor<9x4xf64, #DC>) -> tensor<9x4xf64, #DC> { // expected-error@+1 {{Need at least two tensors to concatenate.}} %0 = sparse_tensor.concatenate %arg {dimension = 1 : index} @@ -690,7 +690,7 @@ // ----- -#DC = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @invalid_concat_dim(%arg0: tensor<2x4xf64, #DC>, %arg1: tensor<3x4xf64, #DC>, %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> { @@ -704,9 +704,9 @@ // ----- -#C = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#DC = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> -#DCC = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed", "compressed"]}> +#C = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> +#DCC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed", "compressed"]}> func.func @invalid_concat_rank_mismatch(%arg0: tensor<2xf64, #C>, %arg1: tensor<3x4xf64, #DC>, %arg2: tensor<4x4x4xf64, #DCC>) -> tensor<9x4xf64, #DC> { @@ -720,7 +720,7 @@ // ----- -#DC = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @invalid_concat_size_mismatch_dyn(%arg0: tensor, %arg1: tensor<5x4xf64, #DC>, %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> { @@ -734,7 +734,7 @@ // ----- -#DC = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @invalid_concat_size_mismatch(%arg0: tensor<3x4xf64, #DC>, %arg1: tensor<5x4xf64, #DC>, %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> { @@ -748,7 +748,7 @@ // ----- -#DC = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#DC = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @invalid_concat_size_mismatch(%arg0: tensor<2x4xf64, #DC>, %arg1: tensor<3x3xf64, #DC>, %arg2: tensor<4x4xf64, #DC>) -> tensor<9x4xf64, #DC> { @@ -762,7 +762,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Unmatched number of arguments in the block}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -773,7 +773,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Expecting Index type for argument at index 1}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -784,7 +784,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Unmatched element type between input tensor and block argument}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -795,7 +795,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>) -> () { // expected-error@+1 {{Unmatched element type between input tensor and block argument}} sparse_tensor.foreach in %arg0 : tensor<2x4xf64, #DCSR> do { @@ -806,7 +806,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () { // expected-error@+1 {{Mismatch in number of init arguments and results}} sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 do { @@ -817,7 +817,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () { // expected-error@+1 {{Mismatch in types of init arguments and results}} %1 = sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 -> i32 do { @@ -828,7 +828,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> func.func @sparse_tensor_foreach(%arg0: tensor<2x4xf64, #DCSR>, %arg1: f32) -> () { // expected-error@+1 {{Mismatch in types of yield values and results}} %1 = sparse_tensor.foreach in %arg0 init(%arg1) : tensor<2x4xf64, #DCSR>, f32 -> f32 do { @@ -892,7 +892,7 @@ // ----- -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func @sparse_alloc_escapes(%arg0: index) -> tensor<10x?xf64, #CSR> { // expected-error@+1 {{sparse tensor allocation should not escape function}} diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir @@ -1,27 +1,27 @@ // RUN: mlir-opt %s -split-input-file -verify-diagnostics // expected-error@+1 {{expected a non-empty array for level types}} -#a = #sparse_tensor.encoding<{dimLevelType = []}> +#a = #sparse_tensor.encoding<{lvlTypes = []}> func.func private @scalar(%arg0: tensor) -> () // ----- -#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> func.func private @tensor_dimlevel_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}} // ----- -#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"], dimOrdering = affine_map<(i) -> (i)>}> // expected-error {{unexpected mismatch in ordering and dimension level types size}} +#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimOrdering = affine_map<(i) -> (i)>}> // expected-error {{unexpected mismatch in ordering and dimension level types size}} func.func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> () // ----- -#a = #sparse_tensor.encoding<{dimLevelType = [1]}> // expected-error {{expected a string value in dimension level types}} +#a = #sparse_tensor.encoding<{lvlTypes = [1]}> // expected-error {{expected a string value in dimension level types}} func.func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> () // ----- -#a = #sparse_tensor.encoding<{dimLevelType = ["strange"]}> // expected-error {{unexpected dimension level type: strange}} +#a = #sparse_tensor.encoding<{lvlTypes = ["strange"]}> // expected-error {{unexpected dimension level type: strange}} func.func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> () // ----- @@ -37,7 +37,7 @@ // ----- // expected-error@+1 {{expected a permutation affine map for dimension ordering}} -#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"], dimOrdering = affine_map<(i,j) -> (i,i)>}> +#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimOrdering = affine_map<(i,j) -> (i,i)>}> func.func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> () // ----- @@ -67,13 +67,13 @@ // ----- -#a = #sparse_tensor.encoding<{dimLevelType = [ "compressed", "compressed", "dense", "dense" ], dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, higherOrdering = affine_map<(i, j) -> (j, i)>}> // expected-error {{unexpected higher ordering mapping from 2 to 2}} +#a = #sparse_tensor.encoding<{lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, higherOrdering = affine_map<(i, j) -> (j, i)>}> // expected-error {{unexpected higher ordering mapping from 2 to 2}} func.func private @tensor_invalid_key(%arg0: tensor<10x60xf32, #a>) -> () // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}} }> func.func private @sparse_slice(tensor) diff --git a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir --- a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir +++ b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir @@ -2,7 +2,7 @@ // RUN: mlir-opt %s -test-tensor-copy-insertion="bufferize-function-boundaries allow-return-allocs" | FileCheck %s --check-prefix=CHECK-FUNC #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> @@ -41,7 +41,7 @@ return %2 : tensor<20x40xf32> } -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/one_trip.mlir b/mlir/test/Dialect/SparseTensor/one_trip.mlir --- a/mlir/test/Dialect/SparseTensor/one_trip.mlir +++ b/mlir/test/Dialect/SparseTensor/one_trip.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification -cse | FileCheck %s #Dense = #sparse_tensor.encoding<{ - dimLevelType = [ "dense" , "dense" ] + lvlTypes = [ "dense" , "dense" ] }> #trait_scale = { @@ -13,15 +13,15 @@ } // CHECK-LABEL: func.func @sparse_scale( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>>) +// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>>) // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f32 -// CHECK: %[[VAL_3:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>> to memref +// CHECK: %[[VAL_3:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>> to memref // CHECK: %[[VAL_4:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_5:.*]] = arith.mulf %[[VAL_4]], %[[VAL_2]] : f32 // CHECK: memref.store %[[VAL_5]], %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>> -// CHECK: return %[[VAL_6]] : tensor<1x1xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>> +// CHECK: %[[VAL_6:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>> +// CHECK: return %[[VAL_6]] : tensor<1x1xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>> func.func @sparse_scale(%argx: tensor<1x1xf32, #Dense>) -> tensor<1x1xf32, #Dense> { %c = arith.constant 2.0 : f32 %0 = linalg.generic #trait_scale diff --git a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir b/mlir/test/Dialect/SparseTensor/post_rewriting.mlir --- a/mlir/test/Dialect/SparseTensor/post_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/post_rewriting.mlir @@ -1,11 +1,11 @@ // RUN: mlir-opt %s -post-sparsification-rewrite | FileCheck %s #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed"] + lvlTypes = ["compressed", "compressed"] }> // CHECK-LABEL: func.func @expand_dense( diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir --- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir @@ -1,15 +1,15 @@ // RUN: mlir-opt %s -pre-sparsification-rewrite | FileCheck %s #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #Slice = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], slice = [ (?, 1, 1), (?, 3, 1) ] }> diff --git a/mlir/test/Dialect/SparseTensor/rejected.mlir b/mlir/test/Dialect/SparseTensor/rejected.mlir --- a/mlir/test/Dialect/SparseTensor/rejected.mlir +++ b/mlir/test/Dialect/SparseTensor/rejected.mlir @@ -3,7 +3,7 @@ // The file contains examples that will be rejected by sparse compiler // (we expect the linalg.generic unchanged). -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -15,7 +15,7 @@ // CHECK-LABEL: func.func @sparse_reduction_subi( // CHECK-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK: %[[VAL_2:.*]] = linalg.generic // CHECK: ^bb0(%[[VAL_3:.*]]: i32, %[[VAL_4:.*]]: i32): // CHECK: %[[VAL_5:.*]] = arith.subi %[[VAL_3]], %[[VAL_4]] : i32 diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir @@ -2,21 +2,21 @@ // RUN: FileCheck %s #CSR = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"] + lvlTypes = ["dense", "compressed"] }> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i, j) -> (j, i)> }> #COO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> // CHECK-LABEL: func.func @sparse_new( -// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { -// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor> +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { +// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor> // CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]] // CHECK: bufferization.dealloc_tensor %[[COO]] // CHECK: return %[[R]] @@ -26,8 +26,8 @@ } // CHECK-LABEL: func.func @sparse_new_csc( -// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor (d1, d0)> }>> { -// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor (d1, d0)> }>> +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor (d1, d0)> }>> { +// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor (d1, d0)> }>> // CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]] // CHECK: bufferization.dealloc_tensor %[[COO]] // CHECK: return %[[R]] @@ -37,8 +37,8 @@ } // CHECK-LABEL: func.func @sparse_new_coo( -// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { -// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor> +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { +// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor> // CHECK: return %[[COO]] func.func @sparse_new_coo(%arg0: !llvm.ptr) -> tensor { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor @@ -46,7 +46,7 @@ } // CHECK-LABEL: func.func @sparse_out( -// CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[B:.*]]: !llvm.ptr) { // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -split-input-file | mlir-opt -split-input-file | FileCheck %s -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_new( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) @@ -13,7 +13,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> // CHECK-LABEL: func @sparse_pack( // CHECK-SAME: %[[D:.*]]: tensor<6xf64>, @@ -29,7 +29,7 @@ // ----- -#BCOO = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed-hi"], crdWidth=32}> +#BCOO = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed-hi"], crdWidth=32}> // CHECK-LABEL: func @sparse_pack_batched( // CHECK-SAME: %[[D:.*]]: tensor<2x6xf64>, // CHECK-SAME: %[[I:.*]]: tensor<2x6x1xi32>) @@ -44,7 +44,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"], crdWidth=32}> // CHECK-LABEL: func @sparse_unpack( // CHECK-SAME: %[[T:.*]]: tensor<100xf64, # @@ -59,7 +59,7 @@ // ----- -#BatchedSparseVector = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed-hi"], crdWidth=32}> +#BatchedSparseVector = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed-hi"], crdWidth=32}> // CHECK-LABEL: func @sparse_unpack( // CHECK-SAME: %[[T:.*]]: tensor<2x100xf64, # @@ -74,7 +74,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_dealloc( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}> @@ -87,7 +87,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_convert_1d_to_sparse( // CHECK-SAME: %[[A:.*]]: tensor<64xf32>) @@ -100,7 +100,7 @@ // ----- -#SparseTensor = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }> +#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }> // CHECK-LABEL: func @sparse_convert_3d_from_sparse( // CHECK-SAME: %[[A:.*]]: tensor<8x8x8xf64, #{{.*}}>) @@ -113,7 +113,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_positions( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) @@ -126,7 +126,7 @@ // ----- -#COO = #sparse_tensor.encoding<{dimLevelType = ["compressed-nu", "singleton"]}> +#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"]}> // CHECK-LABEL: func @sparse_indices_buffer( // CHECK-SAME: %[[A:.*]]: tensor) @@ -139,7 +139,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_indices( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) @@ -152,7 +152,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_values( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) @@ -166,7 +166,7 @@ // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> @@ -182,7 +182,7 @@ // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> @@ -197,7 +197,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_metadata_init( // CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#{{.*}}> @@ -209,9 +209,9 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #SparseVector_Slice = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], slice = [ (?, ?, ?) ] }> @@ -228,7 +228,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_get_md( // CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}> @@ -243,7 +243,7 @@ // ----- #SparseVector_Slice = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], slice = [ (?, ?, ?) ] }> @@ -260,7 +260,7 @@ // ----- #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"], + lvlTypes = ["compressed"], slice = [ (?, ?, ?) ] }> @@ -277,7 +277,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_set_md( // CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>, @@ -293,7 +293,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_noe( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) @@ -306,7 +306,7 @@ // ----- -#DenseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","dense"]}> +#DenseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","dense"]}> // CHECK-LABEL: func @sparse_load( // CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>) @@ -319,7 +319,7 @@ // ----- -#DenseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","dense"]}> +#DenseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","dense"]}> // CHECK-LABEL: func @sparse_load_ins( // CHECK-SAME: %[[A:.*]]: tensor<16x32xf64, #{{.*}}>) @@ -332,7 +332,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> // CHECK-LABEL: func @sparse_insert( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #sparse_tensor.encoding<{{.*}}>>, @@ -387,7 +387,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_expansion( // CHECK-SAME: %[[A:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>>) @@ -401,7 +401,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_compression( // CHECK-SAME: %[[A0:.*0]]: memref, @@ -425,7 +425,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_out( // CHECK-SAME: %[[A:.*]]: tensor>, @@ -439,7 +439,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_binary( // CHECK-SAME: %[[A:.*]]: f64, %[[B:.*]]: i64) -> f64 { @@ -473,7 +473,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_unary( // CHECK-SAME: %[[A:.*]]: f64) -> f64 { @@ -503,7 +503,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_unary( // CHECK-SAME: %[[A:.*]]: f64) -> i64 { @@ -530,7 +530,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_reduce_2d_to_1d( // CHECK-SAME: %[[A:.*]]: f64, %[[B:.*]]: f64) -> f64 { @@ -552,7 +552,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_select( // CHECK-SAME: %[[A:.*]]: f64) -> f64 { @@ -576,7 +576,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @concat_sparse_sparse( // CHECK-SAME: %[[A0:.*]]: tensor<2x4xf64 @@ -600,7 +600,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_tensor_foreach( // CHECK-SAME: %[[A0:.*]]: tensor<2x4xf64 @@ -615,7 +615,7 @@ // ----- -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_tensor_foreach( // CHECK-SAME: %[[A0:.*]]: tensor<2x4xf64, #sparse_tensor.encoding<{{{.*}}}>>, diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -1,132 +1,132 @@ // RUN: mlir-opt %s -split-input-file | mlir-opt | FileCheck %s // CHECK-LABEL: func private @sparse_1d_tensor( -// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = ["compressed"] }>>) +// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>) +func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = ["compressed"] }>>) // ----- #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)>, posWidth = 64, crdWidth = 64 }> // CHECK-LABEL: func private @sparse_csr( -// CHECK-SAME: tensor>) +// CHECK-SAME: tensor>) func.func private @sparse_csr(tensor) // ----- #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, posWidth = 0, crdWidth = 0 }> // CHECK-LABEL: func private @sparse_csc( -// CHECK-SAME: tensor (d1, d0)> }>>) +// CHECK-SAME: tensor (d1, d0)> }>>) func.func private @sparse_csc(tensor) // ----- #DCSC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, posWidth = 0, crdWidth = 64 }> // CHECK-LABEL: func private @sparse_dcsc( -// CHECK-SAME: tensor (d1, d0)>, crdWidth = 64 }>>) +// CHECK-SAME: tensor (d1, d0)>, crdWidth = 64 }>>) func.func private @sparse_dcsc(tensor) // ----- #COO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu-no", "singleton-no" ] + lvlTypes = [ "compressed-nu-no", "singleton-no" ] }> // CHECK-LABEL: func private @sparse_coo( -// CHECK-SAME: tensor>) +// CHECK-SAME: tensor>) func.func private @sparse_coo(tensor) // ----- #BCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed-hi-nu", "singleton" ] + lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ] }> // CHECK-LABEL: func private @sparse_bcoo( -// CHECK-SAME: tensor>) +// CHECK-SAME: tensor>) func.func private @sparse_bcoo(tensor) // ----- #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> // CHECK-LABEL: func private @sparse_sorted_coo( -// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>) +// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>) func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>) // ----- #BCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "dense", "dense" ], + lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, higherOrdering = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> }> // CHECK-LABEL: func private @sparse_bcsr( -// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense", "dense" ], higherOrdering = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> +// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], higherOrdering = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>) // ----- #ELL = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "compressed" ], + lvlTypes = [ "dense", "dense", "compressed" ], dimOrdering = affine_map<(ii, i, j) -> (ii, i, j)>, higherOrdering = affine_map<(i,j)[c] -> (c*4*i, i, j)> }> // CHECK-LABEL: func private @sparse_ell( -// CHECK-SAME: tensor (d0 * (s0 * 4), d0, d1)> }>> +// CHECK-SAME: tensor (d0 * (s0 * 4), d0, d1)> }>> func.func private @sparse_ell(tensor) // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) // ----- #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, ?, 1), (?, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) diff --git a/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir b/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir --- a/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -sparse-tensor-codegen -cse | FileCheck %s -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> // CHECK-LABEL: func.func @for( // CHECK-SAME: %[[VAL_1:.*0]]: memref, diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir --- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir +++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #trait_scale = { @@ -37,14 +37,14 @@ // // CHECK-LABEL: func.func @sparse_scale( -// CHECK-SAME: %[[VAL_0:.*]]: tensor>) -> tensor> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor>) -> tensor> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant false // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2.000000e+00 : f32 -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref> +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = scf.while (%[[VAL_11:.*]] = %[[VAL_8]]) : (index) -> index { @@ -75,8 +75,8 @@ // CHECK: } {"Emitted from" = "linalg.generic"} // CHECK: scf.yield %[[VAL_28:.*]] : index // CHECK: } attributes {"Emitted from" = "linalg.generic"} -// CHECK: %[[VAL_29:.*]] = sparse_tensor.load %[[VAL_0]] : tensor> -// CHECK: return %[[VAL_29]] : tensor> +// CHECK: %[[VAL_29:.*]] = sparse_tensor.load %[[VAL_0]] : tensor> +// CHECK: return %[[VAL_29]] : tensor> // CHECK: } func.func @sparse_scale(%argx: tensor) -> tensor { %c = arith.constant 2.0 : f32 @@ -90,16 +90,16 @@ } // CHECK-LABEL: func.func @matvec( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<64xf64>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant false // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -155,21 +155,21 @@ } // CHECK-LABEL: func.func @mateltmul( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x64xf64>) -> tensor<32x64xf64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant false // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f64 // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref // CHECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64> // CHECK: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_15]] : memref<32x64xf64>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir @@ -1,8 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s -#DV = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#DV = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait1 = { indexing_maps = [ @@ -14,13 +14,13 @@ } // CHECK-LABEL: func @add_d( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: f32, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { @@ -43,14 +43,14 @@ } // CHECK-LABEL: func @add_d_init( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: f32) -> tensor<32xf32> { // CHECK: %[[VAL_2:.*]] = arith.constant 32 : index // CHECK: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f32 // CHECK: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK: %[[VAL_INITTENSOR:.*]] = tensor.empty() : tensor<32xf32> -// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>> to memref // CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_INITTENSOR]] : memref<32xf32> // CHECK: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_7]] : memref<32xf32>) // CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_5]] { @@ -74,13 +74,13 @@ } // CHECK-LABEL: func @mul_d( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: f32, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { @@ -103,16 +103,16 @@ } // CHECK-LABEL: func @add_s( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: f32, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] @@ -158,13 +158,13 @@ } // CHECK-LABEL: func @repeated_add_s( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -197,14 +197,14 @@ } // CHECK-LABEL: func @mul_s( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: f32, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -240,13 +240,13 @@ } // CHECK-LABEL: func @add_dd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) @@ -271,13 +271,13 @@ } // CHECK-LABEL: func @mul_dd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) @@ -303,16 +303,16 @@ // CHECK-LABEL: func @add_ds( // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32xf32> -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref @@ -362,14 +362,14 @@ // CHECK-LABEL: func @mul_ds( // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32xf32> -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>) // CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref @@ -396,16 +396,16 @@ } // CHECK-LABEL: func @add_sd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) @@ -455,14 +455,14 @@ } // CHECK-LABEL: func @mul_sd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>) @@ -490,17 +490,17 @@ } // CHECK-LABEL: func @add_ss( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -573,17 +573,17 @@ } // CHECK-LABEL: func @mul_ss( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -634,18 +634,18 @@ } // CHECK-LABEL: func @two_way_inv( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: f32, // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -727,18 +727,18 @@ } // CHECK-LABEL: func @two_way_inv_alt( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: f32, // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -828,12 +828,12 @@ } // CHECK-LABEL: func @sum_reduction( -// CHECK-SAME: %[[VAL_0:.*]]: tensor>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor>, // CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -869,17 +869,17 @@ } // CHECK-LABEL: func @sum_reduction_ss( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_11]][] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -975,19 +975,19 @@ } // CHECK-LABEL: func @sum_reduction_inv( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*1]]: tensor, -// CHECK-SAME: %[[VAL_2:.*2]]: tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_2:.*2]]: tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_13]][] : memref // CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_9]][] : memref @@ -1091,21 +1091,21 @@ // CHECK-LABEL: func @four_tensors_op( // CHECK-SAME: %[[VAL_0:.*0]]: tensor, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor, -// CHECK-SAME: %[[VAL_3:.*3]]: tensor>, +// CHECK-SAME: %[[VAL_3:.*3]]: tensor>, // CHECK-SAME: %[[VAL_4:.*]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_3]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_3]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor> to memref // CHECK-DAG: %[[VAL_16:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor // CHECK-DAG: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_4]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[VAL_18]] : memref) @@ -1268,21 +1268,21 @@ } // CHECK-LABEL: func @red3s( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, -// CHECK-SAME: %[[VAL_2:.*2]]: tensor>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, +// CHECK-SAME: %[[VAL_2:.*2]]: tensor>, // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_3]] : memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_15]][] : memref // CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -1,10 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s -#Tdd = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }> -#Tds = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> -#Tsd = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }> -#Tss = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#Tdd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }> +#Tds = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> +#Tsd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }> +#Tss = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> #trait2 = { indexing_maps = [ @@ -17,14 +17,14 @@ } // CHECK-LABEL: func @add_dd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>) @@ -53,14 +53,14 @@ } // CHECK-LABEL: func @mul_dd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>) @@ -89,7 +89,7 @@ } // CHECK-LABEL: func @add_ds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index @@ -97,9 +97,9 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) @@ -152,15 +152,15 @@ } // CHECK-LABEL: func @mul_ds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>) @@ -191,7 +191,7 @@ } // CHECK-LABEL: func @add_sd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index @@ -199,9 +199,9 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) @@ -259,15 +259,15 @@ } // CHECK-LABEL: func @mul_sd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>) @@ -299,7 +299,7 @@ } // CHECK-LABEL: func @add_ss( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index @@ -307,11 +307,11 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>) @@ -393,16 +393,16 @@ } // CHECK-LABEL: func @mul_ss( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32x16xf32>) @@ -436,21 +436,21 @@ } // CHECK-LABEL: func @add_ss_ss( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -600,7 +600,7 @@ } #BatchedVector = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed-hi" ], + lvlTypes = [ "dense", "compressed-hi" ], }> // CHECK-LABEL: func.func @sub_ss_batched( // CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>, @@ -704,21 +704,21 @@ } // CHECK-LABEL: func @mul_ss_ss( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -800,20 +800,20 @@ } // CHECK-LABEL: func @add_sd_ds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref @@ -906,18 +906,18 @@ } // CHECK-LABEL: func @mul_sd_ds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -962,15 +962,15 @@ } // CHECK-LABEL: func @matvec( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<16x32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<16xf32> // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { @@ -1013,13 +1013,13 @@ } // CHECK-LABEL: func @sum_reduction( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 10 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_9]]) -> (f32) { @@ -1058,14 +1058,14 @@ } // CHECK-LABEL: func @scale( -// CHECK-SAME: %[[VAL_0:.*]]: tensor>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor>, // CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK-DAG: %[[VAL_8:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK: linalg.fill ins(%{{.*}} : f64) outs(%[[VAL_11]] : memref) @@ -1107,17 +1107,17 @@ } // CHECK-LABEL: func.func @sampled_dense_dense( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor>, // CHECK-SAME: %[[VAL_1:.*1]]: tensor, // CHECK-SAME: %[[VAL_2:.*2]]: tensor, // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK-DAG: %[[VAL_11:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref @@ -1176,26 +1176,26 @@ } // CHECK-LABEL: func @sum_kernel_with_inv( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, -// CHECK-SAME: %[[VAL_2:.*2]]: tensor>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, +// CHECK-SAME: %[[VAL_2:.*2]]: tensor>, // CHECK-SAME: %[[VAL_3:.*3]]: tensor, // CHECK-SAME: %[[VAL_4:.*4]]: tensor, // CHECK-SAME: %[[VAL_5:.*5]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant true -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref -// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor> to memref // CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_3]] : memref // CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_memref %[[VAL_4]] : memref // CHECK-DAG: %[[VAL_22:.*]] = tensor.dim %[[VAL_2]], %[[VAL_6]] : tensor +#Td = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> -#Tddd = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense" ] }> -#Tdds = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }> -#Tdsd = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }> -#Tdss = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }> -#Tsdd = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }> -#Tsds = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }> -#Tssd = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }> -#Tsss = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }> +#Tddd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }> +#Tdds = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }> +#Tdsd = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }> +#Tdss = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }> +#Tsdd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }> +#Tsds = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }> +#Tssd = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }> +#Tsss = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> #trait3 = { indexing_maps = [ @@ -23,7 +23,7 @@ } // CHECK-LABEL: func @add_ddd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -32,7 +32,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>) @@ -65,7 +65,7 @@ } // CHECK-LABEL: func @mul_ddd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -74,7 +74,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>) @@ -107,7 +107,7 @@ } // CHECK-LABEL: func @add_dds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -117,9 +117,9 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant true // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>) @@ -176,7 +176,7 @@ } // CHECK-LABEL: func @mul_dds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -184,9 +184,9 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>) @@ -221,7 +221,7 @@ } // CHECK-LABEL: func @add_dsd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -231,9 +231,9 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) @@ -294,7 +294,7 @@ } // CHECK-LABEL: func @mul_dsd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -302,9 +302,9 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>) @@ -339,7 +339,7 @@ } // CHECK-LABEL: func @add_dss( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -349,11 +349,11 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>) @@ -438,18 +438,18 @@ } // CHECK-LABEL: func @mul_dss( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) @@ -486,7 +486,7 @@ } // CHECK-LABEL: func @add_sdd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -496,9 +496,9 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) @@ -564,7 +564,7 @@ } // CHECK-LABEL: func @mul_sdd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -572,9 +572,9 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>) @@ -610,7 +610,7 @@ } // CHECK-LABEL: func @add_sds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -620,11 +620,11 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>) @@ -714,18 +714,18 @@ } // CHECK-LABEL: func @mul_sds( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) @@ -763,7 +763,7 @@ } // CHECK-LABEL: func @add_ssd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -773,11 +773,11 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_16]] : memref<32x16x8xf32>) @@ -871,18 +871,18 @@ } // CHECK-LABEL: func @mul_ssd( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>) @@ -920,7 +920,7 @@ } // CHECK-LABEL: func @add_sss( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 @@ -930,13 +930,13 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_19]] : memref<32x16x8xf32>) @@ -1054,19 +1054,19 @@ } // CHECK-LABEL: func @mul_sss( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x16x8xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16x8xf32>) -> tensor<32x16x8xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>) @@ -1118,14 +1118,14 @@ // CHECK-LABEL: func @kernel_3d( // CHECK-SAME: %[[VAL_0:.*0]]: tensor, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor, // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 2 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 2 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-DAG: %[[VAL_10:.*]] = tensor.dim %[[VAL_1]], %[[VAL_6]] : tensor> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_3]] : memref @@ -1294,7 +1294,7 @@ } // CHECK-LABEL: func @invariants( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<20xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<30xf32>, // CHECK-SAME: %[[VAL_3:.*]]: tensor<10x20x30xf32>) -> tensor<10x20x30xf32> { @@ -1304,7 +1304,7 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 30 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<30xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : memref<10x20x30xf32> diff --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir @@ -1,10 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s -#SpVec = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> -#Row = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }> -#EncDenseVec = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> +#SpVec = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> +#Row = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] }> +#EncDenseVec = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> #trait1 = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --sparsification --canonicalize --cse | FileCheck %s -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> -#SparseTensor = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> #trait = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir @@ -1,14 +1,14 @@ // RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> #SparseMatrix_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #SparseMatrix_D_P = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense" ], + lvlTypes = [ "dense", "dense" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir @@ -1,10 +1,10 @@ // RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" \ // RUN: | FileCheck %s -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> -#DENSE = #sparse_tensor.encoding<{dimLevelType = ["dense", "dense"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#DENSE = #sparse_tensor.encoding<{lvlTypes = ["dense", "dense"]}> #DENSE_P = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "dense"], + lvlTypes = ["dense", "dense"], dimOrdering = affine_map<(i,j) -> (j,i)> }> // CHECK-LABEL: @concat_sparse_sparse( @@ -270,7 +270,7 @@ // CHECK-DAG: %[[TMP_c9:.*]] = arith.constant 9 : index // CHECK-DAG: %[[TMP_c4:.*]] = arith.constant 4 : index // CHECK: %[[TMP_0:.*]] = bufferization.alloc_tensor(%[[TMP_c9]], %[[TMP_c4]]) : tensor> to memref +// CHECK: %[[VAL_0:.*]] = sparse_tensor.values %[[TMP_0]] : tensor> to memref // CHECK: %[[DIM_0:.*]] = memref.alloca() : memref<2xindex> // CHECK: memref.store %[[TMP_c9]], %[[DIM_0]][%[[TMP_c0]]] : memref<2xindex> // CHECK: memref.store %[[TMP_c4]], %[[DIM_0]][%[[TMP_c1]]] : memref<2xindex> @@ -332,7 +332,7 @@ // CHECK: } // CHECK: } // CHECK: %[[R:.*]] = sparse_tensor.convert %[[TMP_0]] -// CHECK: return %[[R]] : tensor> +// CHECK: return %[[R]] : tensor> func.func @concat_sparse_sparse_annotated_dense(%arg0: tensor<2x4xf64, #DCSR>, %arg1: tensor<3x4xf64, #DCSR>, %arg2: tensor<4x4xf64, #DCSR>) @@ -417,7 +417,7 @@ // CHECK: } // CHECK: } // CHECK: %[[R:.*]] = sparse_tensor.convert %[[TMP_0]] -// CHECK: return %[[R]] : tensor (d1, d0)> }>> +// CHECK: return %[[R]] : tensor (d1, d0)> }>> func.func @concat_sparse_sparse_annotated_dense_permute(%arg0: tensor<2x4xf64, #DCSR>, %arg1: tensor<3x4xf64, #DCSR>, %arg2: tensor<4x4xf64, #DCSR>) diff --git a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir @@ -4,7 +4,7 @@ #map1 = affine_map<(d0, d1, d2, d3) -> (d2, d3)> #map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1)> -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // CHECK-LABEL: func.func @conv2d_all_sparse_CSR( // CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xi32, #{{.*}}>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir @@ -8,21 +8,21 @@ // RUN: FileCheck %s --check-prefix=CHECK-CONVERT #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #DCSC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #SV = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed" ] + lvlTypes = [ "compressed" ] }> #rowsum = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir @@ -1,11 +1,11 @@ // RUN: mlir-opt %s --sparse-tensor-codegen --cse | FileCheck %s #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (0, 4, 1), (0, 8, 1) ] }> @@ -13,7 +13,7 @@ // CHECK-SAME: %[[VAL_0:.*0]]: memref, // CHECK-SAME: %[[VAL_1:.*1]]: memref, // CHECK-SAME: %[[VAL_2:.*2]]: memref, -// CHECK-SAME: %[[VAL_3:.*3]]: !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>) +// CHECK-SAME: %[[VAL_3:.*3]]: !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>) // CHECK: %[[VAL_4:.*]] = sparse_tensor.storage_specifier.init with %[[VAL_3]] // CHECK: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK: %[[VAL_6:.*]] = arith.constant 4 : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --linalg-generalize-named-ops --pre-sparsification-rewrite --sparsification --sparse-tensor-conversion --canonicalize --cse | FileCheck %s -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // CHECK-LABEL: func.func @fill_zero_after_alloc( // CHECK-SAME: %[[Arg0:.*]]: !llvm.ptr, diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir @@ -29,12 +29,12 @@ } #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], slice = [ (0, 4, 1), (2, 4, 1) ] }> #CSR_SLICE_DYN = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], slice = [ (?, ?, ?), (?, ?, ?) ] }> @@ -141,7 +141,7 @@ } #BCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed-hi-nu", "singleton" ], + lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ], }> // CHECK-LABEL: func.func @foreach_bcoo( diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait1 = { indexing_maps = [ @@ -351,13 +351,13 @@ } // CHECK-LABEL: func.func @zero_preserving_math( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>) -> tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> -// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> +// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[T:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_7]] to %[[VAL_8]] step %[[VAL_2]] {{.*}} { @@ -371,11 +371,11 @@ // CHECK: %[[VAL_17:.*]] = math.log1p %[[VAL_16]] : f64 // CHECK: %[[VAL_18:.*]] = math.sin %[[VAL_17]] : f64 // CHECK: %[[VAL_19:.*]] = math.tanh %[[VAL_18]] : f64 -// CHECK: %[[Y:.*]] = sparse_tensor.insert %[[VAL_19]] into %{{.*}}[%[[VAL_10]]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> +// CHECK: %[[Y:.*]] = sparse_tensor.insert %[[VAL_19]] into %{{.*}}[%[[VAL_10]]] : tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> // CHECK: scf.yield %[[Y]] // CHECK: } -// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> -// CHECK: return %[[VAL_20]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> +// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> +// CHECK: return %[[VAL_20]] : tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> // CHECK: } func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #SV> { %c32 = arith.constant 32 : index @@ -398,25 +398,25 @@ } // CHECK-LABEL: func.func @complex_divbyc( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>) -> tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex -// CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> -// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref> +// CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[T:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_2]] {{.*}} { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_10]]] : memref> // CHECK: %[[VAL_13:.*]] = complex.div %[[VAL_12]], %[[VAL_3]] : complex -// CHECK: %[[Y:.*]] = sparse_tensor.insert %[[VAL_13]] into %{{.*}}[%[[VAL_11]]] : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> +// CHECK: %[[Y:.*]] = sparse_tensor.insert %[[VAL_13]] into %{{.*}}[%[[VAL_11]]] : tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> // CHECK: scf.yield %[[Y]] // CHECK: } -// CHECK: %[[VAL_14:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> -// CHECK: return %[[VAL_14]] : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> +// CHECK: %[[VAL_14:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> +// CHECK: return %[[VAL_14]] : tensor<32xcomplex, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> // CHECK: } func.func @complex_divbyc(%arg0: tensor<32xcomplex, #SV>) -> tensor<32xcomplex, #SV> { %c = complex.constant [0.0, 1.0] : complex diff --git a/mlir/test/Dialect/SparseTensor/sparse_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_index.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_index.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_index.mlir @@ -1,11 +1,11 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s #DenseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "dense"] + lvlTypes = ["dense", "dense"] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed"] + lvlTypes = ["compressed", "compressed"] }> #trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait2 = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir @@ -2,22 +2,22 @@ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ // RUN: --sparsification | FileCheck %s -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // CHECK-LABEL: func.func @matmul1( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<20x30xf32>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<10x30xf32>) -> tensor<10x30xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 30 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20x30xf32> // CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x30xf32> // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -53,7 +53,7 @@ // CHECK-LABEL: func.func @matmul_sparse_rhs( // CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<20x30xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<20x30xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<10x30xf32>) -> tensor<10x30xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 10 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index @@ -102,40 +102,40 @@ // Computes C = A x B with all matrices sparse (SpMSpM) in DCSR. // // CHECK-LABEL: func.func @matmul2( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<4x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<8x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) -> tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant false // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<8x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]]] : memref -// CHECK: %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_18]] step %[[VAL_3]] iter_args(%[[VAL_21:.*]] = %[[VAL_6]]) -> (tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_18]] step %[[VAL_3]] iter_args(%[[VAL_21:.*]] = %[[VAL_6]]) -> (tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref -// CHECK: %[[VAL_23:.*]], %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]] = sparse_tensor.expand %[[VAL_6]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref, memref, memref +// CHECK: %[[VAL_23:.*]], %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]] = sparse_tensor.expand %[[VAL_6]] : tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref, memref, memref // CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref // CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_20]], %[[VAL_3]] : index // CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_28]]] : memref // CHECK: %[[VAL_30:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_31:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_3]]] : memref -// CHECK: %[[VAL_32:.*]]:4 = scf.while (%[[VAL_33:.*]] = %[[VAL_27]], %[[VAL_34:.*]] = %[[VAL_30]], %[[VAL_35:.*]] = %[[VAL_26]], %[[VAL_36:.*]] = %[[VAL_21]]) : (index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> (index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_32:.*]]:4 = scf.while (%[[VAL_33:.*]] = %[[VAL_27]], %[[VAL_34:.*]] = %[[VAL_30]], %[[VAL_35:.*]] = %[[VAL_26]], %[[VAL_36:.*]] = %[[VAL_21]]) : (index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) -> (index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_37:.*]] = arith.cmpi ult, %[[VAL_33]], %[[VAL_29]] : index // CHECK: %[[VAL_38:.*]] = arith.cmpi ult, %[[VAL_34]], %[[VAL_31]] : index // CHECK: %[[VAL_39:.*]] = arith.andi %[[VAL_37]], %[[VAL_38]] : i1 -// CHECK: scf.condition(%[[VAL_39]]) %[[VAL_33]], %[[VAL_34]], %[[VAL_35]], %[[VAL_36]] : index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: scf.condition(%[[VAL_39]]) %[[VAL_33]], %[[VAL_34]], %[[VAL_35]], %[[VAL_36]] : index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } do { -// CHECK: ^bb0(%[[VAL_40:.*]]: index, %[[VAL_41:.*]]: index, %[[VAL_42:.*]]: index, %[[VAL_43:.*]]: tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>): +// CHECK: ^bb0(%[[VAL_40:.*]]: index, %[[VAL_41:.*]]: index, %[[VAL_42:.*]]: index, %[[VAL_43:.*]]: tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>): // CHECK: %[[VAL_44:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_40]]] : memref // CHECK: %[[VAL_45:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_41]]] : memref // CHECK: %[[VAL_46:.*]] = arith.cmpi ult, %[[VAL_45]], %[[VAL_44]] : index @@ -143,7 +143,7 @@ // CHECK: %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index // CHECK: %[[VAL_49:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index // CHECK: %[[VAL_50:.*]] = arith.andi %[[VAL_48]], %[[VAL_49]] : i1 -// CHECK: %[[VAL_51:.*]]:2 = scf.if %[[VAL_50]] -> (index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_51:.*]]:2 = scf.if %[[VAL_50]] -> (index, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_52:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_40]]] : memref // CHECK: %[[VAL_53:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_41]]] : memref // CHECK: %[[VAL_54:.*]] = arith.addi %[[VAL_41]], %[[VAL_3]] : index @@ -167,9 +167,9 @@ // CHECK: memref.store %[[VAL_63]], %[[VAL_23]]{{\[}}%[[VAL_59]]] : memref // CHECK: scf.yield %[[VAL_68:.*]] : index // CHECK: } -// CHECK: scf.yield %[[VAL_69:.*]], %[[VAL_43]] : index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_69:.*]], %[[VAL_43]] : index, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } else { -// CHECK: scf.yield %[[VAL_42]], %[[VAL_43]] : index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_42]], %[[VAL_43]] : index, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } // CHECK: %[[VAL_70:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index // CHECK: %[[VAL_71:.*]] = arith.addi %[[VAL_40]], %[[VAL_3]] : index @@ -177,13 +177,13 @@ // CHECK: %[[VAL_73:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index // CHECK: %[[VAL_74:.*]] = arith.addi %[[VAL_41]], %[[VAL_3]] : index // CHECK: %[[VAL_75:.*]] = arith.select %[[VAL_73]], %[[VAL_74]], %[[VAL_41]] : index -// CHECK: scf.yield %[[VAL_72]], %[[VAL_75]], %[[VAL_76:.*]]#0, %[[VAL_76]]#1 : index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_72]], %[[VAL_75]], %[[VAL_76:.*]]#0, %[[VAL_76]]#1 : index, index, index, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } -// CHECK: %[[VAL_77:.*]] = sparse_tensor.compress %[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %[[VAL_78:.*]]#2 into %[[VAL_78]]#3{{\[}}%[[VAL_22]]] : memref, memref, memref, tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: scf.yield %[[VAL_77]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_77:.*]] = sparse_tensor.compress %[[VAL_23]], %[[VAL_24]], %[[VAL_25]], %[[VAL_78:.*]]#2 into %[[VAL_78]]#3{{\[}}%[[VAL_22]]] : memref, memref, memref, tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_77]] : tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } -// CHECK: %[[VAL_79:.*]] = sparse_tensor.load %[[VAL_80:.*]] hasInserts : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: return %[[VAL_79]] : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_79:.*]] = sparse_tensor.load %[[VAL_80:.*]] hasInserts : tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: return %[[VAL_79]] : tensor<4x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @matmul2(%A: tensor<4x8xf64, #DCSR>, %B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> { @@ -197,17 +197,17 @@ // CHECK-LABEL: func.func @conv2d( // CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xi32>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<3x3xi32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<6x6xi32>) -> tensor<6x6xi32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 6 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xi32> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<6x6xi32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref @@ -247,18 +247,18 @@ // CHECK-LABEL: func.func @quantized_matmul( // CHECK-SAME: %[[VAL_0:.*]]: tensor<5x3xi8>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<3x6xi8, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<5x6xi64>) -> tensor<5x6xi64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 5 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : i64 // CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<5x3xi8> -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<5x6xi64> // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref @@ -297,17 +297,17 @@ } // CHECK-LABEL: func.func @sparse_dot( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>, // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_11]][] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir @@ -8,7 +8,7 @@ // RUN: --tensor-bufferize --finalizing-bufferize | \ // RUN: FileCheck %s --check-prefix=CHECK-LIR -#CSR = #sparse_tensor.encoding<{dimLevelType = [ "dense", "compressed" ]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = [ "dense", "compressed" ]}> #trait_matvec = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir @@ -9,7 +9,7 @@ // RUN: FileCheck %s --check-prefix=CHECK-LIR #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> @@ -24,15 +24,15 @@ } // CHECK-HIR-LABEL: func @matvec( -// CHECK-HIR-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>>, +// CHECK-HIR-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>>, // CHECK-HIR-SAME: %[[VAL_1:.*]]: tensor<64xf64>, // CHECK-HIR-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-HIR-DAG: %[[VAL_3:.*]] = arith.constant 64 : index // CHECK-HIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-HIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> // CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir @@ -8,7 +8,7 @@ // RUN: --tensor-bufferize --finalizing-bufferize | \ // RUN: FileCheck %s --check-prefix=CHECK-LIR -#CSR = #sparse_tensor.encoding<{dimLevelType = [ "dense", "compressed" ]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = [ "dense", "compressed" ]}> #trait_matvec = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir @@ -5,7 +5,7 @@ // RUN: --canonicalize --cse | FileCheck %s #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir @@ -5,7 +5,7 @@ // but an acyclic iteration graph using sparse constraints only. #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "dense", "compressed", + lvlTypes = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }> @@ -22,7 +22,7 @@ // CHECK-LABEL: func @mul( // CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20x30x40x50x60x70x80xf32>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<10x20x30x40x50x60x70x80xf32>) -> tensor<10x20x30x40x50x60x70x80xf32> { // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 10 : index @@ -34,11 +34,11 @@ // CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_12:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32> -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32> // CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_11]] to %[[VAL_10]] step %[[VAL_12]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir @@ -1,17 +1,17 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed", "compressed" ] }> #trait_scale_inpl = { @@ -23,13 +23,13 @@ } // CHECK-LABEL: func.func @sparse_simply_dynamic1( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f32 -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_7]] to %[[VAL_8]] step %[[VAL_2]] { @@ -42,8 +42,8 @@ // CHECK: memref.store %[[VAL_15]], %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @sparse_simply_dynamic1(%argx: tensor<32x16xf32, #DCSR>) -> tensor<32x16xf32, #DCSR> { %c = arith.constant 2.0 : f32 @@ -57,12 +57,12 @@ } // CHECK-LABEL: func.func @sparse_simply_dynamic2( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_3:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_3:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_6:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_2]]] : memref // CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_6]] to %[[VAL_7]] step %[[VAL_2]] { @@ -76,8 +76,8 @@ // CHECK: memref.store %[[VAL_15]], %[[VAL_5]]{{\[}}%[[VAL_12]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: return %[[VAL_16]] : tensor<32x16xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @sparse_simply_dynamic2(%argx: tensor<32x16xf32, #DCSR>) -> tensor<32x16xf32, #DCSR> { %0 = linalg.generic #trait_scale_inpl @@ -99,30 +99,30 @@ } // CHECK-LABEL: func.func @sparse_truly_dynamic( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>) -> tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>) -> tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 10 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2.000000e+00 : f32 -// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_9:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_2]] to %[[VAL_1]] step %[[VAL_3]] iter_args(%[[VAL_11:.*]] = %[[VAL_5]]) -> (tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_9:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_2]] to %[[VAL_1]] step %[[VAL_3]] iter_args(%[[VAL_11:.*]] = %[[VAL_5]]) -> (tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref // CHECK: %[[VAL_13:.*]] = arith.addi %[[VAL_10]], %[[VAL_3]] : index // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref -// CHECK: %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_12]] to %[[VAL_14]] step %[[VAL_3]] iter_args(%[[VAL_17:.*]] = %[[VAL_11]]) -> (tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_12]] to %[[VAL_14]] step %[[VAL_3]] iter_args(%[[VAL_17:.*]] = %[[VAL_11]]) -> (tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref // CHECK: %[[VAL_20:.*]] = arith.mulf %[[VAL_19]], %[[VAL_4]] : f32 -// CHECK: %[[VAL_21:.*]] = sparse_tensor.insert %[[VAL_20]] into %[[VAL_17]]{{\[}}%[[VAL_10]], %[[VAL_18]]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: scf.yield %[[VAL_21]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_21:.*]] = sparse_tensor.insert %[[VAL_20]] into %[[VAL_17]]{{\[}}%[[VAL_10]], %[[VAL_18]]] : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_21]] : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } -// CHECK: scf.yield %[[VAL_22:.*]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_22:.*]] : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } -// CHECK: %[[VAL_23:.*]] = sparse_tensor.load %[[VAL_24:.*]] hasInserts : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: return %[[VAL_23]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_23:.*]] = sparse_tensor.load %[[VAL_24:.*]] hasInserts : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: return %[[VAL_23]] : tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32, #DCSR> { %s = arith.constant 2.0 : f32 @@ -148,41 +148,41 @@ } // CHECK-LABEL: func.func @sumred( -// CHECK-SAME: %[[VAL_0:.*]]: tensor>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor>) -> tensor> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor>) -> tensor> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : i32 // CHECK-DAG: %[[VAL_FALSE:.*]] = arith.constant false // CHECK-DAG: %[[VAL_TRUE:.*]] = arith.constant true -// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor> -// CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor> -// CHECK: %[[VAL_7:.*]] = bufferization.alloc_tensor(%[[VAL_5]], %[[VAL_6]]) : tensor> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor> to memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor> to memref -// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_19:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 2 : index} : tensor> to memref -// CHECK: %[[VAL_20:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor> to memref -// CHECK: %[[VAL_21:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor> +// CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor> +// CHECK: %[[VAL_7:.*]] = bufferization.alloc_tensor(%[[VAL_5]], %[[VAL_6]]) : tensor> +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_19:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 2 : index} : tensor> to memref +// CHECK: %[[VAL_20:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor> to memref +// CHECK: %[[VAL_21:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_24:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_3]]] : memref -// CHECK: %[[VAL_26:.*]]:3 = scf.while (%[[VAL_27:.*]] = %[[VAL_22]], %[[VAL_28:.*]] = %[[VAL_24]], %[[VAL_29:.*]] = %[[VAL_7]]) : (index, index, tensor>) -> (index, index, tensor>) { +// CHECK: %[[VAL_26:.*]]:3 = scf.while (%[[VAL_27:.*]] = %[[VAL_22]], %[[VAL_28:.*]] = %[[VAL_24]], %[[VAL_29:.*]] = %[[VAL_7]]) : (index, index, tensor>) -> (index, index, tensor>) { // CHECK: %[[VAL_30:.*]] = arith.cmpi ult, %[[VAL_27]], %[[VAL_23]] : index // CHECK: %[[VAL_31:.*]] = arith.cmpi ult, %[[VAL_28]], %[[VAL_25]] : index // CHECK: %[[VAL_32:.*]] = arith.andi %[[VAL_30]], %[[VAL_31]] : i1 -// CHECK: scf.condition(%[[VAL_32]]) %[[VAL_27]], %[[VAL_28]], %[[VAL_29]] : index, index, tensor> +// CHECK: scf.condition(%[[VAL_32]]) %[[VAL_27]], %[[VAL_28]], %[[VAL_29]] : index, index, tensor> // CHECK: } do { -// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: tensor>): +// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: tensor>): // CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_33]]] : memref // CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_34]]] : memref // CHECK: %[[VAL_38:.*]] = arith.cmpi ult, %[[VAL_37]], %[[VAL_36]] : index @@ -190,20 +190,20 @@ // CHECK: %[[VAL_40:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index // CHECK: %[[VAL_41:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index // CHECK: %[[VAL_42:.*]] = arith.andi %[[VAL_40]], %[[VAL_41]] : i1 -// CHECK: %[[VAL_43:.*]] = scf.if %[[VAL_42]] -> (tensor>) { +// CHECK: %[[VAL_43:.*]] = scf.if %[[VAL_42]] -> (tensor>) { // CHECK: %[[VAL_44:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_33]]] : memref // CHECK: %[[VAL_45:.*]] = arith.addi %[[VAL_33]], %[[VAL_3]] : index // CHECK: %[[VAL_46:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_45]]] : memref // CHECK: %[[VAL_47:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_34]]] : memref // CHECK: %[[VAL_48:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index // CHECK: %[[VAL_49:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_48]]] : memref -// CHECK: %[[VAL_50:.*]]:3 = scf.while (%[[VAL_51:.*]] = %[[VAL_44]], %[[VAL_52:.*]] = %[[VAL_47]], %[[VAL_53:.*]] = %[[VAL_35]]) : (index, index, tensor>) -> (index, index, tensor>) { +// CHECK: %[[VAL_50:.*]]:3 = scf.while (%[[VAL_51:.*]] = %[[VAL_44]], %[[VAL_52:.*]] = %[[VAL_47]], %[[VAL_53:.*]] = %[[VAL_35]]) : (index, index, tensor>) -> (index, index, tensor>) { // CHECK: %[[VAL_54:.*]] = arith.cmpi ult, %[[VAL_51]], %[[VAL_46]] : index // CHECK: %[[VAL_55:.*]] = arith.cmpi ult, %[[VAL_52]], %[[VAL_49]] : index // CHECK: %[[VAL_56:.*]] = arith.andi %[[VAL_54]], %[[VAL_55]] : i1 -// CHECK: scf.condition(%[[VAL_56]]) %[[VAL_51]], %[[VAL_52]], %[[VAL_53]] : index, index, tensor> +// CHECK: scf.condition(%[[VAL_56]]) %[[VAL_51]], %[[VAL_52]], %[[VAL_53]] : index, index, tensor> // CHECK: } do { -// CHECK: ^bb0(%[[VAL_57:.*]]: index, %[[VAL_58:.*]]: index, %[[VAL_59:.*]]: tensor>): +// CHECK: ^bb0(%[[VAL_57:.*]]: index, %[[VAL_58:.*]]: index, %[[VAL_59:.*]]: tensor>): // CHECK: %[[VAL_60:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_57]]] : memref // CHECK: %[[VAL_61:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_58]]] : memref // CHECK: %[[VAL_62:.*]] = arith.cmpi ult, %[[VAL_61]], %[[VAL_60]] : index @@ -211,20 +211,20 @@ // CHECK: %[[VAL_64:.*]] = arith.cmpi eq, %[[VAL_60]], %[[VAL_63]] : index // CHECK: %[[VAL_65:.*]] = arith.cmpi eq, %[[VAL_61]], %[[VAL_63]] : index // CHECK: %[[VAL_66:.*]] = arith.andi %[[VAL_64]], %[[VAL_65]] : i1 -// CHECK: %[[VAL_67:.*]] = scf.if %[[VAL_66]] -> (tensor>) { +// CHECK: %[[VAL_67:.*]] = scf.if %[[VAL_66]] -> (tensor>) { // CHECK: %[[VAL_68:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_57]]] : memref // CHECK: %[[VAL_69:.*]] = arith.addi %[[VAL_57]], %[[VAL_3]] : index // CHECK: %[[VAL_70:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_69]]] : memref // CHECK: %[[VAL_71:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_58]]] : memref // CHECK: %[[VAL_72:.*]] = arith.addi %[[VAL_58]], %[[VAL_3]] : index // CHECK: %[[VAL_73:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_72]]] : memref -// CHECK: %[[VAL_74:.*]]:5 = scf.while (%[[VAL_75:.*]] = %[[VAL_68]], %[[VAL_76:.*]] = %[[VAL_71]], %[[VAL_77:.*]] = %[[VAL_4]], %[[VAL_200:.*]] = %[[VAL_FALSE]], %[[VAL_78:.*]] = %[[VAL_59]]) : (index, index, i32, i1, tensor>) -> (index, index, i32, i1, tensor>) { +// CHECK: %[[VAL_74:.*]]:5 = scf.while (%[[VAL_75:.*]] = %[[VAL_68]], %[[VAL_76:.*]] = %[[VAL_71]], %[[VAL_77:.*]] = %[[VAL_4]], %[[VAL_200:.*]] = %[[VAL_FALSE]], %[[VAL_78:.*]] = %[[VAL_59]]) : (index, index, i32, i1, tensor>) -> (index, index, i32, i1, tensor>) { // CHECK: %[[VAL_79:.*]] = arith.cmpi ult, %[[VAL_75]], %[[VAL_70]] : index // CHECK: %[[VAL_80:.*]] = arith.cmpi ult, %[[VAL_76]], %[[VAL_73]] : index // CHECK: %[[VAL_81:.*]] = arith.andi %[[VAL_79]], %[[VAL_80]] : i1 -// CHECK: scf.condition(%[[VAL_81]]) %[[VAL_75]], %[[VAL_76]], %[[VAL_77]], %[[VAL_200]], %[[VAL_78]] : index, index, i32, i1, tensor> +// CHECK: scf.condition(%[[VAL_81]]) %[[VAL_75]], %[[VAL_76]], %[[VAL_77]], %[[VAL_200]], %[[VAL_78]] : index, index, i32, i1, tensor> // CHECK: } do { -// CHECK: ^bb0(%[[VAL_82:.*]]: index, %[[VAL_83:.*]]: index, %[[VAL_84:.*]]: i32, %[[VAL_201:.*]]: i1, %[[VAL_85:.*]]: tensor>): +// CHECK: ^bb0(%[[VAL_82:.*]]: index, %[[VAL_83:.*]]: index, %[[VAL_84:.*]]: i32, %[[VAL_201:.*]]: i1, %[[VAL_85:.*]]: tensor>): // CHECK: %[[VAL_86:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_82]]] : memref // CHECK: %[[VAL_87:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_83]]] : memref // CHECK: %[[VAL_88:.*]] = arith.cmpi ult, %[[VAL_87]], %[[VAL_86]] : index @@ -232,14 +232,14 @@ // CHECK: %[[VAL_90:.*]] = arith.cmpi eq, %[[VAL_86]], %[[VAL_89]] : index // CHECK: %[[VAL_91:.*]] = arith.cmpi eq, %[[VAL_87]], %[[VAL_89]] : index // CHECK: %[[VAL_92:.*]] = arith.andi %[[VAL_90]], %[[VAL_91]] : i1 -// CHECK: %[[VAL_93:.*]]:3 = scf.if %[[VAL_92]] -> (i32, i1, tensor>) { +// CHECK: %[[VAL_93:.*]]:3 = scf.if %[[VAL_92]] -> (i32, i1, tensor>) { // CHECK: %[[VAL_94:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_82]]] : memref // CHECK: %[[VAL_95:.*]] = memref.load %[[VAL_21]]{{\[}}%[[VAL_83]]] : memref // CHECK: %[[VAL_96:.*]] = arith.muli %[[VAL_94]], %[[VAL_95]] : i32 // CHECK: %[[VAL_97:.*]] = arith.addi %[[VAL_84]], %[[VAL_96]] : i32 -// CHECK: scf.yield %[[VAL_97]], %[[VAL_TRUE]], %[[VAL_85]] : i32, i1, tensor> +// CHECK: scf.yield %[[VAL_97]], %[[VAL_TRUE]], %[[VAL_85]] : i32, i1, tensor> // CHECK: } else { -// CHECK: scf.yield %[[VAL_84]], %[[VAL_201]], %[[VAL_85]] : i32, i1, tensor> +// CHECK: scf.yield %[[VAL_84]], %[[VAL_201]], %[[VAL_85]] : i32, i1, tensor> // CHECK: } // CHECK: %[[VAL_98:.*]] = arith.cmpi eq, %[[VAL_86]], %[[VAL_89]] : index // CHECK: %[[VAL_99:.*]] = arith.addi %[[VAL_82]], %[[VAL_3]] : index @@ -247,17 +247,17 @@ // CHECK: %[[VAL_101:.*]] = arith.cmpi eq, %[[VAL_87]], %[[VAL_89]] : index // CHECK: %[[VAL_102:.*]] = arith.addi %[[VAL_83]], %[[VAL_3]] : index // CHECK: %[[VAL_103:.*]] = arith.select %[[VAL_101]], %[[VAL_102]], %[[VAL_83]] : index -// CHECK: scf.yield %[[VAL_100]], %[[VAL_103]], %[[VAL_104:.*]]#0, %[[VAL_104]]#1, %[[VAL_104]]#2 : index, index, i32, i1, tensor> +// CHECK: scf.yield %[[VAL_100]], %[[VAL_103]], %[[VAL_104:.*]]#0, %[[VAL_104]]#1, %[[VAL_104]]#2 : index, index, i32, i1, tensor> // CHECK: } -// CHECK: %[[VAL_202:.*]] = scf.if %[[VAL_74]]#3 -> (tensor>) { -// CHECK: %[[VAL_105:.*]] = sparse_tensor.insert %[[VAL_74]]#2 into %[[VAL_74]]#4{{\[}}%[[VAL_39]], %[[VAL_63]]] : tensor> -// CHECK: scf.yield %[[VAL_105]] : tensor> +// CHECK: %[[VAL_202:.*]] = scf.if %[[VAL_74]]#3 -> (tensor>) { +// CHECK: %[[VAL_105:.*]] = sparse_tensor.insert %[[VAL_74]]#2 into %[[VAL_74]]#4{{\[}}%[[VAL_39]], %[[VAL_63]]] : tensor> +// CHECK: scf.yield %[[VAL_105]] : tensor> // CHECK: } else { -// CHECK: scf.yield %[[VAL_74]]#4 : tensor> +// CHECK: scf.yield %[[VAL_74]]#4 : tensor> // CHECK: } -// CHECK: scf.yield %[[VAL_202]] : tensor> +// CHECK: scf.yield %[[VAL_202]] : tensor> // CHECK: } else { -// CHECK: scf.yield %[[VAL_59]] : tensor> +// CHECK: scf.yield %[[VAL_59]] : tensor> // CHECK: } // CHECK: %[[VAL_107:.*]] = arith.cmpi eq, %[[VAL_60]], %[[VAL_63]] : index // CHECK: %[[VAL_108:.*]] = arith.addi %[[VAL_57]], %[[VAL_3]] : index @@ -265,11 +265,11 @@ // CHECK: %[[VAL_110:.*]] = arith.cmpi eq, %[[VAL_61]], %[[VAL_63]] : index // CHECK: %[[VAL_111:.*]] = arith.addi %[[VAL_58]], %[[VAL_3]] : index // CHECK: %[[VAL_112:.*]] = arith.select %[[VAL_110]], %[[VAL_111]], %[[VAL_58]] : index -// CHECK: scf.yield %[[VAL_109]], %[[VAL_112]], %[[VAL_113:.*]] : index, index, tensor> +// CHECK: scf.yield %[[VAL_109]], %[[VAL_112]], %[[VAL_113:.*]] : index, index, tensor> // CHECK: } -// CHECK: scf.yield %[[VAL_114:.*]]#2 : tensor> +// CHECK: scf.yield %[[VAL_114:.*]]#2 : tensor> // CHECK: } else { -// CHECK: scf.yield %[[VAL_35]] : tensor> +// CHECK: scf.yield %[[VAL_35]] : tensor> // CHECK: } // CHECK: %[[VAL_115:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index // CHECK: %[[VAL_116:.*]] = arith.addi %[[VAL_33]], %[[VAL_3]] : index @@ -277,10 +277,10 @@ // CHECK: %[[VAL_118:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index // CHECK: %[[VAL_119:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index // CHECK: %[[VAL_120:.*]] = arith.select %[[VAL_118]], %[[VAL_119]], %[[VAL_34]] : index -// CHECK: scf.yield %[[VAL_117]], %[[VAL_120]], %[[VAL_121:.*]] : index, index, tensor> +// CHECK: scf.yield %[[VAL_117]], %[[VAL_120]], %[[VAL_121:.*]] : index, index, tensor> // CHECK: } -// CHECK: %[[VAL_122:.*]] = sparse_tensor.load %[[VAL_123:.*]]#2 hasInserts : tensor> -// CHECK: return %[[VAL_122]] : tensor> +// CHECK: %[[VAL_122:.*]] = sparse_tensor.load %[[VAL_123:.*]]#2 hasInserts : tensor> +// CHECK: return %[[VAL_122]] : tensor> // CHECK: } func.func @sumred(%arga: tensor, %argb: tensor) -> tensor { @@ -312,42 +312,42 @@ } // CHECK-LABEL: func.func @matmat( -// CHECK-SAME: %[[VAL_0:.*]]: tensor>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor>) -> tensor> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor>) -> tensor> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant false // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true -// CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor> -// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor> -// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor(%[[VAL_6]], %[[VAL_7]]) : tensor> -// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor> +// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor> +// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor(%[[VAL_6]], %[[VAL_7]]) : tensor> +// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref -// CHECK: %[[VAL_21:.*]] = scf.for %[[VAL_22:.*]] = %[[VAL_19]] to %[[VAL_20]] step %[[VAL_3]] iter_args(%[[VAL_23:.*]] = %[[VAL_8]]) -> (tensor>) { +// CHECK: %[[VAL_21:.*]] = scf.for %[[VAL_22:.*]] = %[[VAL_19]] to %[[VAL_20]] step %[[VAL_3]] iter_args(%[[VAL_23:.*]] = %[[VAL_8]]) -> (tensor>) { // CHECK: %[[VAL_24:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref -// CHECK: %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]], %[[VAL_28:.*]] = sparse_tensor.expand %[[VAL_8]] : tensor> to memref, memref, memref +// CHECK: %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]], %[[VAL_28:.*]] = sparse_tensor.expand %[[VAL_8]] : tensor> to memref, memref, memref // CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_22]]] : memref // CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_22]], %[[VAL_3]] : index // CHECK: %[[VAL_31:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref // CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_33:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_3]]] : memref -// CHECK: %[[VAL_34:.*]]:4 = scf.while (%[[VAL_35:.*]] = %[[VAL_29]], %[[VAL_36:.*]] = %[[VAL_32]], %[[VAL_37:.*]] = %[[VAL_28]], %[[VAL_38:.*]] = %[[VAL_23]]) : (index, index, index, tensor>) -> (index, index, index, tensor>) { +// CHECK: %[[VAL_34:.*]]:4 = scf.while (%[[VAL_35:.*]] = %[[VAL_29]], %[[VAL_36:.*]] = %[[VAL_32]], %[[VAL_37:.*]] = %[[VAL_28]], %[[VAL_38:.*]] = %[[VAL_23]]) : (index, index, index, tensor>) -> (index, index, index, tensor>) { // CHECK: %[[VAL_39:.*]] = arith.cmpi ult, %[[VAL_35]], %[[VAL_31]] : index // CHECK: %[[VAL_40:.*]] = arith.cmpi ult, %[[VAL_36]], %[[VAL_33]] : index // CHECK: %[[VAL_41:.*]] = arith.andi %[[VAL_39]], %[[VAL_40]] : i1 -// CHECK: scf.condition(%[[VAL_41]]) %[[VAL_35]], %[[VAL_36]], %[[VAL_37]], %[[VAL_38]] : index, index, index, tensor> +// CHECK: scf.condition(%[[VAL_41]]) %[[VAL_35]], %[[VAL_36]], %[[VAL_37]], %[[VAL_38]] : index, index, index, tensor> // CHECK: } do { -// CHECK: ^bb0(%[[VAL_42:.*]]: index, %[[VAL_43:.*]]: index, %[[VAL_44:.*]]: index, %[[VAL_45:.*]]: tensor>): +// CHECK: ^bb0(%[[VAL_42:.*]]: index, %[[VAL_43:.*]]: index, %[[VAL_44:.*]]: index, %[[VAL_45:.*]]: tensor>): // CHECK: %[[VAL_46:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_42]]] : memref // CHECK: %[[VAL_47:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_43]]] : memref // CHECK: %[[VAL_48:.*]] = arith.cmpi ult, %[[VAL_47]], %[[VAL_46]] : index @@ -355,7 +355,7 @@ // CHECK: %[[VAL_50:.*]] = arith.cmpi eq, %[[VAL_46]], %[[VAL_49]] : index // CHECK: %[[VAL_51:.*]] = arith.cmpi eq, %[[VAL_47]], %[[VAL_49]] : index // CHECK: %[[VAL_52:.*]] = arith.andi %[[VAL_50]], %[[VAL_51]] : i1 -// CHECK: %[[VAL_53:.*]]:2 = scf.if %[[VAL_52]] -> (index, tensor>) { +// CHECK: %[[VAL_53:.*]]:2 = scf.if %[[VAL_52]] -> (index, tensor>) { // CHECK: %[[VAL_54:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_42]]] : memref // CHECK: %[[VAL_55:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_43]]] : memref // CHECK: %[[VAL_56:.*]] = arith.addi %[[VAL_43]], %[[VAL_3]] : index @@ -379,9 +379,9 @@ // CHECK: memref.store %[[VAL_65]], %[[VAL_25]]{{\[}}%[[VAL_61]]] : memref // CHECK: scf.yield %[[VAL_70:.*]] : index // CHECK: } -// CHECK: scf.yield %[[VAL_71:.*]], %[[VAL_45]] : index, tensor> +// CHECK: scf.yield %[[VAL_71:.*]], %[[VAL_45]] : index, tensor> // CHECK: } else { -// CHECK: scf.yield %[[VAL_44]], %[[VAL_45]] : index, tensor> +// CHECK: scf.yield %[[VAL_44]], %[[VAL_45]] : index, tensor> // CHECK: } // CHECK: %[[VAL_72:.*]] = arith.cmpi eq, %[[VAL_46]], %[[VAL_49]] : index // CHECK: %[[VAL_73:.*]] = arith.addi %[[VAL_42]], %[[VAL_3]] : index @@ -389,13 +389,13 @@ // CHECK: %[[VAL_75:.*]] = arith.cmpi eq, %[[VAL_47]], %[[VAL_49]] : index // CHECK: %[[VAL_76:.*]] = arith.addi %[[VAL_43]], %[[VAL_3]] : index // CHECK: %[[VAL_77:.*]] = arith.select %[[VAL_75]], %[[VAL_76]], %[[VAL_43]] : index -// CHECK: scf.yield %[[VAL_74]], %[[VAL_77]], %[[VAL_78:.*]]#0, %[[VAL_78]]#1 : index, index, index, tensor> +// CHECK: scf.yield %[[VAL_74]], %[[VAL_77]], %[[VAL_78:.*]]#0, %[[VAL_78]]#1 : index, index, index, tensor> // CHECK: } -// CHECK: %[[VAL_79:.*]] = sparse_tensor.compress %[[VAL_25]], %[[VAL_26]], %[[VAL_27]], %[[VAL_80:.*]]#2 into %[[VAL_80]]#3{{\[}}%[[VAL_24]]] : memref, memref, memref, tensor> -// CHECK: scf.yield %[[VAL_79]] : tensor> +// CHECK: %[[VAL_79:.*]] = sparse_tensor.compress %[[VAL_25]], %[[VAL_26]], %[[VAL_27]], %[[VAL_80:.*]]#2 into %[[VAL_80]]#3{{\[}}%[[VAL_24]]] : memref, memref, memref, tensor> +// CHECK: scf.yield %[[VAL_79]] : tensor> // CHECK: } -// CHECK: %[[VAL_81:.*]] = sparse_tensor.load %[[VAL_82:.*]] hasInserts : tensor> -// CHECK: return %[[VAL_81]] : tensor> +// CHECK: %[[VAL_81:.*]] = sparse_tensor.load %[[VAL_82:.*]] hasInserts : tensor> +// CHECK: return %[[VAL_81]] : tensor> // CHECK: } func.func @matmat(%arga: tensor, %argb: tensor) -> tensor { diff --git a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --canonicalize --post-sparsification-rewrite="enable-runtime-library=false" --sparse-tensor-codegen -cse | FileCheck %s #COO = #sparse_tensor.encoding<{ - dimLevelType = ["compressed-nu", "singleton"], + lvlTypes = ["compressed-nu", "singleton"], crdWidth=32 }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir @@ -10,15 +10,15 @@ // RUN: FileCheck %s --check-prefix=CHECK-PAR4 #DenseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense" ] + lvlTypes = [ "dense", "dense" ] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #trait_dd = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir @@ -2,7 +2,7 @@ // RUN: FileCheck %s #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #trait_matvec = { @@ -15,7 +15,7 @@ doc = "x(i) += A(i,j) * b(j)" } // CHECK-LABEL: func.func @matvec( -// CHECK-SAME: %[[TMP_arg0:.*]]: tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[TMP_arg0:.*]]: tensor<16x32xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[TMP_arg1:.*]]: tensor<32xf32>, // CHECK-SAME: %[[TMP_arg2:.*]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK-DAG: %[[TMP_c16:.*]] = arith.constant 16 : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir @@ -2,7 +2,7 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s #X = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "dense" ], + lvlTypes = [ "dense", "dense", "dense" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir @@ -4,7 +4,7 @@ // RUN: FileCheck %s --check-prefix=CHECK-MIR #X = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "dense" ], + lvlTypes = [ "dense", "dense", "dense" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir @@ -3,8 +3,8 @@ // RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" \ // RUN: --cse --canonicalize | FileCheck %s --check-prefix=CHECK-RWT -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> -#SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // // roundtrip: @@ -62,7 +62,7 @@ // CHECK-RWT: } // CHECK-RWT: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts // CHECK-RWT-NOT: sparse_tensor.convert -// CHECK-RWT: return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK-RWT: return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10xf64, #SparseMatrix> { %0 = tensor.expand_shape %arg0 [[0, 1]] : @@ -135,7 +135,7 @@ // CHECK-RWT: } // CHECK-RWT: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts // CHECK-RWT-NOT: sparse_tensor.convert -// CHECK-RWT: return %[[NT1]] : tensor<100xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> +// CHECK-RWT: return %[[NT1]] : tensor<100xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> // func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<100xf64, #SparseVector> { %0 = tensor.collapse_shape %arg0 [[0, 1]] : @@ -210,7 +210,7 @@ // CHECK-RWT: } // CHECK-RWT: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts // CHECK-RWT-NOT: sparse_tensor.convert -// CHECK-RWT: return %[[NT1]] : tensor> +// CHECK-RWT: return %[[NT1]] : tensor> // func.func @dynamic_sparse_expand(%arg0: tensor) -> tensor { %0 = tensor.expand_shape %arg0 [[0, 1]] : @@ -292,7 +292,7 @@ // CHECK-RWT: } // CHECK-RWT: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts // CHECK-RWT-NOT: sparse_tensor.convert -// CHECK-RWT: return %[[NT1]] : tensor> +// CHECK-RWT: return %[[NT1]] : tensor> // func.func @dynamic_sparse_collapse(%arg0: tensor<10x?xf64, #SparseMatrix>) -> tensor { %0 = tensor.collapse_shape %arg0 [[0, 1]] : diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir @@ -1,12 +1,12 @@ // RUN: mlir-opt %s --linalg-generalize-named-ops --sparsification --cse --canonicalize | FileCheck %s -#COO_2D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> -#COO_3D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> // CHECK-LABEL: func.func @sparse_reshape_fused( // CHECK-SAME: %[[VAL_0:.*]]: tensor<5x6xf32>, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<6x2x3xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>>) -> tensor { +// CHECK-SAME: %[[VAL_1:.*]]: tensor<6x2x3xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>>) -> tensor { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant false // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 5 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py // RUN: mlir-opt %s -sparsification | FileCheck %s -#SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // A contrived example that demonstrates the many different ways // in which scalar values can be involved in a sparse kernel diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --test-tensor-copy-insertion --pre-sparsification-rewrite --sparsification --cse | FileCheck %s -#SM = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#SM = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> #trait_matmul = { indexing_maps = [ @@ -57,7 +57,7 @@ } // CHECK-LABEL: func.func @sampled_dd_unfused( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<8x8xf64>, // CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index @@ -123,9 +123,9 @@ } // CHECK-LABEL: func.func @sparse_sampled_dd_unfused( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<8x8xf64>, -// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { +// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index @@ -133,19 +133,19 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) {bufferization.escape = [false]} : tensor<8x8xf64> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64> -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_5]]] : memref -// CHECK: %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_19]] step %[[VAL_5]] iter_args(%[[VAL_22:.*]] = %[[VAL_10]]) -> (tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_19]] step %[[VAL_5]] iter_args(%[[VAL_22:.*]] = %[[VAL_10]]) -> (tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_21]]] : memref -// CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]] = sparse_tensor.expand %[[VAL_10]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref, memref, memref +// CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]] = sparse_tensor.expand %[[VAL_10]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref, memref, memref // CHECK: %[[VAL_28:.*]] = scf.for %[[VAL_29:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] iter_args(%[[VAL_30:.*]] = %[[VAL_27]]) -> (index) { // CHECK: %[[VAL_31:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_23]], %[[VAL_29]]] : memref<8x8xf64> // CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_21]]] : memref @@ -174,11 +174,11 @@ // CHECK: } // CHECK: scf.yield %[[VAL_50:.*]] : index // CHECK: } -// CHECK: %[[VAL_51:.*]] = sparse_tensor.compress %[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_52:.*]] into %[[VAL_22]]{{\[}}%[[VAL_23]]] : memref, memref, memref, tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: scf.yield %[[VAL_51]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_51:.*]] = sparse_tensor.compress %[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_52:.*]] into %[[VAL_22]]{{\[}}%[[VAL_23]]] : memref, memref, memref, tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_51]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } -// CHECK: %[[VAL_53:.*]] = sparse_tensor.load %[[VAL_54:.*]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: return %[[VAL_53]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_53:.*]] = sparse_tensor.load %[[VAL_54:.*]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: return %[[VAL_53]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @sparse_sampled_dd_unfused(%args: tensor<8x8xf64, #SM>, %arga: tensor<8x8xf64>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s --pre-sparsification-rewrite --sparsification --cse | FileCheck %s -#SM = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#SM = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> #trait_matmul = { indexing_maps = [ @@ -21,27 +21,27 @@ } // CHECK-LABEL: func.func @sparse_sampled_dd_unfused( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<8x8xf64>, -// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { +// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant false // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true -// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64> -// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref -// CHECK: %[[VAL_18:.*]] = scf.for %[[VAL_19:.*]] = %[[VAL_16]] to %[[VAL_17]] step %[[VAL_5]] iter_args(%[[VAL_20:.*]] = %[[VAL_8]]) -> (tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_18:.*]] = scf.for %[[VAL_19:.*]] = %[[VAL_16]] to %[[VAL_17]] step %[[VAL_5]] iter_args(%[[VAL_20:.*]] = %[[VAL_8]]) -> (tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_19]]] : memref -// CHECK: %[[VAL_22:.*]], %[[VAL_23:.*]], %[[VAL_24:.*]], %[[VAL_25:.*]] = sparse_tensor.expand %[[VAL_8]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref, memref, memref +// CHECK: %[[VAL_22:.*]], %[[VAL_23:.*]], %[[VAL_24:.*]], %[[VAL_25:.*]] = sparse_tensor.expand %[[VAL_8]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref, memref, memref // CHECK: %[[VAL_26:.*]] = scf.for %[[VAL_27:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] iter_args(%[[VAL_28:.*]] = %[[VAL_25]]) -> (index) { // CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_21]], %[[VAL_27]]] : memref<8x8xf64> // CHECK: %[[VAL_30:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_19]]] : memref @@ -70,11 +70,11 @@ // CHECK: } {"Emitted from" = "linalg.generic"} // CHECK: scf.yield %[[VAL_48:.*]] : index // CHECK: } {"Emitted from" = "linalg.generic"} -// CHECK: %[[VAL_49:.*]] = sparse_tensor.compress %[[VAL_22]], %[[VAL_23]], %[[VAL_24]], %[[VAL_50:.*]] into %[[VAL_20]]{{\[}}%[[VAL_21]]] : memref, memref, memref, tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: scf.yield %[[VAL_49]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_49:.*]] = sparse_tensor.compress %[[VAL_22]], %[[VAL_23]], %[[VAL_24]], %[[VAL_50:.*]] into %[[VAL_20]]{{\[}}%[[VAL_21]]] : memref, memref, memref, tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_49]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } {"Emitted from" = "linalg.generic"} -// CHECK: %[[VAL_51:.*]] = sparse_tensor.load %[[VAL_52:.*]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: return %[[VAL_51]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_51:.*]] = sparse_tensor.load %[[VAL_52:.*]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: return %[[VAL_51]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @sparse_sampled_dd_unfused(%args: tensor<8x8xf64, #SM>, %arga: tensor<8x8xf64>, diff --git a/mlir/test/Dialect/SparseTensor/sparse_storage.mlir b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_storage.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir @@ -1,13 +1,13 @@ // RUN: mlir-opt %s -sparsification= | FileCheck %s #SparseVector64 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed" ], + lvlTypes = [ "compressed" ], posWidth = 64, crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed" ], + lvlTypes = [ "compressed" ], posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification | FileCheck %s #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #transpose_trait = { @@ -16,34 +16,34 @@ // TODO: improve auto-conversion followed by yield // CHECK-LABEL: func.func @sparse_transpose_auto( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) -> tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.convert %[[VAL_0]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.convert %[[VAL_0]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref -// CHECK: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_3]]) -> (tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_3]]) -> (tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref // CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_13]], %[[VAL_2]] : index // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_17]]] : memref -// CHECK: %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_16]] to %[[VAL_18]] step %[[VAL_2]] iter_args(%[[VAL_21:.*]] = %[[VAL_14]]) -> (tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { +// CHECK: %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_16]] to %[[VAL_18]] step %[[VAL_2]] iter_args(%[[VAL_21:.*]] = %[[VAL_14]]) -> (tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref // CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref -// CHECK: %[[VAL_24:.*]] = sparse_tensor.insert %[[VAL_23]] into %[[VAL_21]]{{\[}}%[[VAL_15]], %[[VAL_22]]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: scf.yield %[[VAL_24]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_24:.*]] = sparse_tensor.insert %[[VAL_23]] into %[[VAL_21]]{{\[}}%[[VAL_15]], %[[VAL_22]]] : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_24]] : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } -// CHECK: scf.yield %[[VAL_25:.*]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: scf.yield %[[VAL_25:.*]] : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } -// CHECK: %[[VAL_26:.*]] = sparse_tensor.load %[[VAL_27:.*]] hasInserts : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK: bufferization.dealloc_tensor %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> -// CHECK: return %[[VAL_26]] : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: %[[VAL_26:.*]] = sparse_tensor.load %[[VAL_27:.*]] hasInserts : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> +// CHECK: bufferization.dealloc_tensor %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> +// CHECK: return %[[VAL_26]] : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @sparse_transpose_auto(%arga: tensor<3x4xf64, #DCSR>) -> tensor<4x3xf64, #DCSR> { diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir @@ -7,7 +7,7 @@ // RUN: mlir-opt %s -sparsification -cse -sparse-vectorization="vl=4 enable-vla-vectorization=true" -cse -split-input-file | \ // RUN: FileCheck %s --check-prefix=CHECK-VEC4-SVE -#DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> +#DenseVector = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> #trait_scale_d = { indexing_maps = [ @@ -86,7 +86,7 @@ // ----- #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed" ], + lvlTypes = [ "compressed" ], posWidth = 32, crdWidth = 32 }> @@ -209,7 +209,7 @@ // ----- -#DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> +#DenseVector = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> #trait_reduction_d = { indexing_maps = [ @@ -309,7 +309,7 @@ // ----- #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], posWidth = 32, crdWidth = 32 }> @@ -448,7 +448,7 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","compressed"]}> #trait_affine = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification -cse -sparse-vectorization="vl=8" -cse | \ // RUN: FileCheck %s -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}> +#SparseMatrix = #sparse_tensor.encoding<{lvlTypes = ["dense","compressed"]}> #trait = { indexing_maps = [ @@ -18,19 +18,19 @@ // // CHECK-LABEL: func.func @sparse_matrix_sum( // CHECK-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-SAME: %[[VAL_1:.*]]: tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, -// CHECK-SAME: %[[VAL_2:.*]]: tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>) -> tensor { +// CHECK-SAME: %[[VAL_1:.*]]: tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>, +// CHECK-SAME: %[[VAL_2:.*]]: tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>) -> tensor { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant dense<0.000000e+00> : vector<8xf64> // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 64 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> to memref // CHECK: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_14]][] : memref // CHECK: %[[VAL_16:.*]] = scf.for %[[VAL_17:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] iter_args(%[[VAL_18:.*]] = %[[VAL_15]]) -> (f64) { diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir @@ -1,16 +1,16 @@ // RUN: mlir-opt %s --sparse-compiler="enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" #MAT_D_C = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "compressed"] + lvlTypes = ["dense", "compressed"] }> #MAT_C_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ], + lvlTypes = [ "compressed", "dense" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir @@ -4,7 +4,7 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #trait_1d = { @@ -17,7 +17,7 @@ } // CHECK-LABEL: func.func @sparse_index_1d_conj( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<8xi64> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>) -> tensor<8xi64> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant dense<0> : vector<8xi64> // CHECK-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xindex> @@ -25,9 +25,9 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = tensor.empty() : tensor<8xi64> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : memref<8xi64> // CHECK: linalg.fill ins(%[[VAL_4]] : i64) outs(%[[VAL_11]] : memref<8xi64>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref @@ -59,7 +59,7 @@ } // CHECK-LABEL: func.func @sparse_index_1d_disj( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>>) -> tensor<8xi64> { +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>) -> tensor<8xi64> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : vector<8xindex> // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i64 @@ -67,9 +67,9 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = tensor.empty() : tensor<8xi64> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : memref<8xi64> // CHECK: linalg.fill ins(%[[VAL_3]] : i64) outs(%[[VAL_11]] : memref<8xi64>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_mv.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparse-compiler="vl=8" | FileCheck %s #Dense = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense" ] + lvlTypes = [ "dense", "dense" ] }> #matvec = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_ops.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -sparsification -cse -sparse-vectorization="vl=8" -cse | \ // RUN: FileCheck %s -#DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> +#DenseVector = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> #trait = { indexing_maps = [ diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir @@ -2,7 +2,7 @@ // RUN: FileCheck %s #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed" ], + lvlTypes = [ "compressed" ], posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir b/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir --- a/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir +++ b/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir @@ -1,6 +1,6 @@ // RUN: mlir-opt %s -sparse-storage-specifier-to-llvm --cse --canonicalize | FileCheck %s -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> // CHECK-LABEL: func.func @sparse_metadata_init() -> !llvm.struct<(array<2 x i64>, array<3 x i64>)> { // CHECK: %[[VAL_0:.*]] = arith.constant 0 : i64 diff --git a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir --- a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir +++ b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir @@ -28,15 +28,15 @@ // // CHECK-ON-LABEL: func.func @sparse_product_reduction_dense_sparse( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<1.000000e+00> : vector<8xf64> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant dense<0.000000e+00> : vector<8xf64> // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-ON-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_5]] : tensor> -// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK-ON: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_5]] : tensor> +// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK-ON: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref // CHECK-ON: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_7]] step %[[VAL_6]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f64) { @@ -62,12 +62,12 @@ // // CHECK-OFF-LABEL: func.func @sparse_product_reduction_dense_sparse( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = tensor.dim %[[VAL_1]], %[[VAL_2]] : tensor> -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = tensor.dim %[[VAL_1]], %[[VAL_2]] : tensor> +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_7]][] : memref // CHECK-OFF: %[[VAL_9:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_2]] to %[[VAL_4]] step %[[VAL_3]] iter_args(%[[VAL_11:.*]] = %[[VAL_8]]) -> (f64) { @@ -86,7 +86,7 @@ // CHECK-OFF: return %[[VAL_22]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["dense","compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["dense","compressed"]}> #trait = { indexing_maps = [ @@ -115,15 +115,15 @@ // // CHECK-ON-LABEL: func.func @sparse_product_reduction_sparse_sparse( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<1.000000e+00> : vector<8xf64> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant dense<0.000000e+00> : vector<8xf64> // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK-ON: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK-ON: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref // CHECK-ON: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref @@ -151,12 +151,12 @@ // // CHECK-OFF-LABEL: func.func @sparse_product_reduction_sparse_sparse( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_7]][] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -176,7 +176,7 @@ // CHECK-OFF: %[[VAL_24:.*]] = bufferization.to_tensor %[[VAL_7]] : memref // CHECK-OFF: return %[[VAL_24]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed","compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed","compressed"]}> #trait = { indexing_maps = [ @@ -211,13 +211,13 @@ // constant type for the pass-through value. // CHECK-ON-LABEL: func.func @sparse_reduction_ori( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi13> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -239,11 +239,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_ori( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -257,7 +257,7 @@ // CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -289,13 +289,13 @@ // CHECK-ON-LABEL: func.func @sparse_reduction_ori_accumulator_on_rhs( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi13> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -317,11 +317,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_ori_accumulator_on_rhs( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -335,7 +335,7 @@ // CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -364,13 +364,13 @@ // // CHECK-ON-LABEL: func.func @sparse_reduction_subi( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref @@ -392,11 +392,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_subi( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -410,7 +410,7 @@ // CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -441,13 +441,13 @@ // Check that we vectorize xor. // CHECK-ON-LABEL: func.func @sparse_reduction_xor( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -469,11 +469,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_xor( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -488,7 +488,7 @@ // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -515,13 +515,13 @@ // Check that we vectorize and. // CHECK-ON-LABEL: func.func @sparse_reduction_and( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -543,11 +543,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_and( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -562,7 +562,7 @@ // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -589,14 +589,14 @@ // Check that we vectorize muli. // CHECK-ON-LABEL: func.func @sparse_reduction_muli( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<1> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_9]][] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref @@ -618,11 +618,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_muli( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -637,7 +637,7 @@ // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -664,13 +664,13 @@ // Check that we vectorize addi. // CHECK-ON-LABEL: func.func @sparse_reduction_addi( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -692,11 +692,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_addi( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -711,7 +711,7 @@ // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -738,13 +738,13 @@ // Check that we vectorize subf. // CHECK-ON-LABEL: func.func @sparse_reduction_subf( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -766,11 +766,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_subf( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -785,7 +785,7 @@ // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ @@ -812,13 +812,13 @@ // Check that we vectorize addf. // CHECK-ON-LABEL: func.func @sparse_reduction_addf( // CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-ON-DAG: %[[VAL_2:.*]] = arith.constant 8 : index // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -840,11 +840,11 @@ // // CHECK-OFF-LABEL: func.func @sparse_reduction_addf( // CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -859,7 +859,7 @@ // CHECK-OFF: return %[[VAL_16]] : tensor // CHECK-OFF: } -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir @@ -26,26 +26,26 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#MAT_C_C = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> -#MAT_D_C = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> -#MAT_C_D = #sparse_tensor.encoding<{dimLevelType = ["compressed", "dense"]}> +#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> +#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "dense"], + lvlTypes = ["dense", "dense"], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ], + lvlTypes = [ "compressed", "dense" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir @@ -26,26 +26,26 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#MAT_C_C = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> -#MAT_D_C = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> -#MAT_C_D = #sparse_tensor.encoding<{dimLevelType = ["compressed", "dense"]}> +#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> +#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "dense"], + lvlTypes = ["dense", "dense"], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ], + lvlTypes = [ "compressed", "dense" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir @@ -16,26 +16,26 @@ // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -#MAT_C_C = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> -#MAT_D_C = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> -#MAT_C_D = #sparse_tensor.encoding<{dimLevelType = ["compressed", "dense"]}> +#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> +#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "dense"], + lvlTypes = ["dense", "dense"], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ], + lvlTypes = [ "compressed", "dense" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir @@ -26,26 +26,26 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#MAT_C_C = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> -#MAT_D_C = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> -#MAT_C_D = #sparse_tensor.encoding<{dimLevelType = ["compressed", "dense"]}> +#MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> +#MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> +#MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "dense"], + lvlTypes = ["dense", "dense"], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ], + lvlTypes = [ "compressed", "dense" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -31,12 +31,12 @@ !Filename = !llvm.ptr #DenseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense" ], + lvlTypes = [ "dense", "dense" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir @@ -17,8 +17,8 @@ // UNSUPPORTED: target=aarch64{{.*}} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#DenseVector = #sparse_tensor.encoding<{lvlTypes = ["dense"]}> #trait_vec_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir @@ -26,8 +26,8 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#DenseVector = #sparse_tensor.encoding<{lvlTypes = ["dense"]}> #trait_vec_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir @@ -16,8 +16,8 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -#COO_2D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> -#COO_3D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> module { func.func private @printMemref3dF32(%ptr : tensor) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir @@ -26,8 +26,8 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // // Traits for tensor operations. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait_cast = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir @@ -27,7 +27,7 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed"] + lvlTypes = [ "compressed", "compressed"] }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir @@ -27,29 +27,29 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Row = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ] + lvlTypes = [ "compressed", "dense" ] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #DCSC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #SortedCOOPerm = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #CCCPerm = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed"], + lvlTypes = [ "compressed", "compressed", "compressed"], dimOrdering = affine_map<(d0, d1, d2) -> (d1, d2, d0)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir @@ -27,19 +27,19 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed"] + lvlTypes = ["compressed", "compressed"] }> #Sparse3dTensor = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed", "compressed"] + lvlTypes = ["compressed", "compressed", "compressed"] }> #Sparse4dTensor = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed", "compressed", "compressed"] + lvlTypes = ["compressed", "compressed", "compressed", "compressed"] }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait_op1 = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -27,7 +27,7 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed"] + lvlTypes = [ "compressed", "compressed"] }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir @@ -27,10 +27,10 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ] }> + lvlTypes = [ "compressed", "compressed", "compressed" ] }> #CDC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense", "compressed" ] + lvlTypes = [ "compressed", "dense", "compressed" ] // FIXME: Still inadmissible might need investigation // dimOrdering = affine_map<(i,j,k) -> (j,k,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir @@ -26,11 +26,11 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> -#CDR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "dense"]}> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> +#CDR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCCC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ] }> #CDCD = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense", "compressed", "dense" ] + lvlTypes = [ "compressed", "dense", "compressed", "dense" ] }> // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir @@ -27,15 +27,15 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed", "compressed" ] }> #CDC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense", "compressed" ] + lvlTypes = [ "compressed", "dense", "compressed" ] }> #DDC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed", "compressed" ] + lvlTypes = [ "dense", "compressed", "compressed" ] }> // Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCCCC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed", "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed", "compressed", "compressed", "compressed" ] }> #CDCDC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense", "compressed", "dense", "compressed"] + lvlTypes = [ "compressed", "dense", "compressed", "dense", "compressed"] }> // Creates and returns 5-D buffer of size (%s1, %s2, %s3, %s4, %s5) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -27,17 +27,17 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (i,j,k)> }> #Tensor2 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (j,k,i)> }> #Tensor3 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #DCSC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir @@ -23,15 +23,15 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ] }> #Tensor2 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed", "dense" ] + lvlTypes = [ "dense", "compressed", "dense" ] }> #Tensor3 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "compressed" ], + lvlTypes = [ "dense", "dense", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (i,k,j)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -27,20 +27,20 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], posWidth = 8, crdWidth = 8 }> #DCSC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, posWidth = 64, crdWidth = 64 }> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, posWidth = 16, crdWidth = 32 diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -27,32 +27,32 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (i,j,k)> }> #Tensor2 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (j,k,i)> }> #Tensor3 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> #Tensor4 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed", "compressed" ], + lvlTypes = [ "dense", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (i,j,k)> }> #Tensor5 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed", "compressed" ], + lvlTypes = [ "dense", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (j,k,i)> }> #Tensor6 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed", "compressed" ], + lvlTypes = [ "dense", "compressed", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir @@ -28,32 +28,32 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "compressed" ] + lvlTypes = [ "dense", "dense", "compressed" ] }> // NOTE: dense after compressed is not currently supported for the target // of direct-sparse2sparse conversion. (It's fine for the source though.) #Tensor2 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed", "dense" ] + lvlTypes = [ "dense", "compressed", "dense" ] }> #Tensor3 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "compressed" ], + lvlTypes = [ "dense", "dense", "compressed" ], dimOrdering = affine_map<(i,j,k) -> (i,k,j)> }> #SingletonTensor1 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed", "singleton" ] + lvlTypes = [ "dense", "compressed", "singleton" ] }> // This also checks the compressed->dense conversion (when there are zeros). #SingletonTensor2 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "singleton" ] + lvlTypes = [ "dense", "dense", "singleton" ] }> // This also checks the singleton->compressed conversion. #SingletonTensor3 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense", "compressed" ] + lvlTypes = [ "dense", "dense", "compressed" ] }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #trait = { @@ -181,4 +181,4 @@ return } -} \ No newline at end of file +} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir @@ -26,7 +26,7 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir @@ -27,19 +27,19 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed"] + lvlTypes = ["compressed", "compressed"] }> #Sparse3dTensor = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed", "compressed"] + lvlTypes = ["compressed", "compressed", "compressed"] }> #Sparse4dTensor = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed", "compressed", "compressed"] + lvlTypes = ["compressed", "compressed", "compressed", "compressed"] }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // An example of a 2D convolution with a sparse filter. module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -31,7 +31,7 @@ !Filename = !llvm.ptr #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed", "compressed", + lvlTypes = [ "compressed", "compressed", "compressed", "compressed", "compressed", "compressed", "compressed", "compressed" ], // Note that any dimOrdering permutation should give the same results // since, even though it impacts the sparse storage scheme layout, diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir @@ -11,30 +11,30 @@ // TODO: support slices on lib path #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> #CSR_SLICE_DYN = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (?, ?, ?), (?, ?, ?) ] }> #COO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #COO_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], slice = [ (1, 4, 1), (1, 4, 2) ] }> #COO_SLICE_DYN = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], slice = [ (?, ?, ?), (?, ?, ?) ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed"] + lvlTypes = ["compressed", "compressed"] }> #trait_1d = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] + lvlTypes = ["compressed"] }> #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed"] + lvlTypes = ["compressed", "compressed"] }> #trait_1d = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir @@ -24,7 +24,7 @@ // Insertion example using pure codegen (no sparse runtime support lib). -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait_mul_s = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir @@ -23,23 +23,23 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Dense = #sparse_tensor.encoding<{ - dimLevelType = ["dense", "dense"] + lvlTypes = ["dense", "dense"] }> #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #Row = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ] + lvlTypes = [ "compressed", "dense" ] }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir @@ -23,19 +23,19 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #TensorCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense", "compressed" ] + lvlTypes = [ "compressed", "dense", "compressed" ] }> #TensorRow = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "dense" ] + lvlTypes = [ "compressed", "compressed", "dense" ] }> #CCoo = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed-nu", "singleton" ] + lvlTypes = [ "compressed", "compressed-nu", "singleton" ] }> #DCoo = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed-nu", "singleton" ] + lvlTypes = [ "dense", "compressed-nu", "singleton" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -37,12 +37,12 @@ // TODO: Investigate the output generated for SVE, see https://github.com/llvm/llvm-project/issues/60626 #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir @@ -11,54 +11,54 @@ // TODO: support lib path. #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #DCSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], slice = [ (0, 4, 1), (0, 8, 1) ] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #CSR_SLICE = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (0, 4, 1), (0, 8, 1) ] }> #COO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #CSR_SLICE_1 = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (0, 4, 2), (0, 4, 1) ] }> #DCSR_SLICE_1 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], slice = [ (0, 4, 2), (1, 4, 1) ] }> #COO_SLICE_1 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], slice = [ (0, 4, 2), (0, 4, 1) ] }> #COO_SLICE_2 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], slice = [ (0, 4, 2), (1, 4, 1) ] }> #CSR_SLICE_dyn = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], slice = [ (?, 4, ?), (?, 4, ?) ] }> #DCSR_SLICE_dyn = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], slice = [ (?, 4, ?), (?, 4, ?) ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // // Traits for 2-d tensor (aka matrix) operations. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -38,7 +38,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], posWidth = 8, crdWidth = 8 }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -31,7 +31,7 @@ !Filename = !llvm.ptr #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed", "compressed" ] }> #mttkrp = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir @@ -27,7 +27,7 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #trait_mult_elt = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #SparseTensor = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed", "compressed" ] }> #redsum = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -31,7 +31,7 @@ !Filename = !llvm.ptr #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir @@ -22,17 +22,17 @@ // TODO: Pack only support CodeGen Path #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #SortedCOOI32 = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> #BCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed-hi-nu", "singleton" ] + lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ] }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // An example of a quantized sparse matmul. With the zero offset for the // sparse input, the sparse compiler generates very efficient code for the diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #trait_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir @@ -28,10 +28,10 @@ // Reduction in this file _are_ supported by the AArch64 SVE backend -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir @@ -19,10 +19,10 @@ // the AArch64 SVE backend (so the set-up is a bit different to // sparse_reducitons.mlir) -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -27,8 +27,8 @@ // Reduction in this file _are_ supported by the AArch64 SVE backend -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> -#DV = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> +#DV = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> #trait_reduction = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_prod.mlir @@ -19,8 +19,8 @@ // the AArch64 SVE backend (so the set-up is a bit different to // sparse_reducitons.mlir) -#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> -#DV = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> +#SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> +#DV = #sparse_tensor.encoding<{ lvlTypes = [ "dense" ] }> #trait_reduction = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -31,7 +31,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SM = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#SM = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> #trait_sampled_dense_dense = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -22,7 +22,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> #trait_scale = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir @@ -23,7 +23,7 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -#SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> module @func_sparse.2 { // Do elementwise x+1 when true, x-1 when false diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir @@ -22,10 +22,10 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir @@ -22,7 +22,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir @@ -29,20 +29,20 @@ !Filename = !llvm.ptr #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #SortedCOOPermuted = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #SortedCOO3D = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ] }> #SortedCOO3DPermuted = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], + lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], dimOrdering = affine_map<(i,j,k) -> (k,i,j)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -31,7 +31,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #spmm = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -31,33 +31,33 @@ // #Dense = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "dense" ] + lvlTypes = [ "dense", "dense" ] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ] + lvlTypes = [ "dense", "compressed" ] }> #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #CSC = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #DCSC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> #BlockRow = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ] + lvlTypes = [ "compressed", "dense" ] }> #BlockCol = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "dense" ], + lvlTypes = [ "compressed", "dense" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -34,7 +34,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir @@ -20,7 +20,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir @@ -34,7 +34,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir @@ -29,7 +29,7 @@ !Filename = !llvm.ptr #SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #trait_sum_reduce = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir @@ -29,7 +29,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> +#SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait_op = { indexing_maps = [ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir @@ -26,7 +26,7 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#ST = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed", "compressed"]}> +#ST = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "compressed"]}> // // Trait for 3-d tensor element wise multiplication. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir @@ -26,8 +26,8 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#ST1 = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed", "compressed"]}> -#ST2 = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed", "dense"]}> +#ST1 = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "compressed"]}> +#ST2 = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "dense"]}> // // Trait for 3-d tensor operation. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir @@ -27,11 +27,11 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #DCSC = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ], + lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)> }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir @@ -28,7 +28,7 @@ // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir @@ -26,8 +26,8 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> // // Traits for tensor operations. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir @@ -26,8 +26,8 @@ // REDEFINE: FileCheck %s // RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}> +#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> +#DenseVector = #sparse_tensor.encoding<{lvlTypes = ["dense"]}> // // Traits for 1-d tensor (aka vector) operations. diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-const.mlir @@ -9,7 +9,7 @@ // RUN: --e main --entry-point-result=void \ // RUN: | FileCheck %s -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> module { // Compute matrix vector y = Ax diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec.mlir @@ -9,7 +9,7 @@ // RUN: --e main --entry-point-result=void \ // RUN: | FileCheck %s -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> module { // Compute matrix vector y = Ax diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py @@ -20,7 +20,7 @@ # handle sparse tensor outputs. _KERNEL_STR = """ #DCSR = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed", "compressed" ] + lvlTypes = [ "compressed", "compressed" ] }> #trait_add_elt = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco_utils.py @@ -255,7 +255,7 @@ return f""" !Ptr = !llvm.ptr #enc = #sparse_tensor.encoding<{{ - dimLevelType = [ {sparsity} ] + lvlTypes = [ {sparsity} ] }}> func.func @{_ENTRY_NAME}(%filename: !Ptr) -> (tensor<{shape}x{type}, #enc>, memref<{rank}xindex>) attributes {{ llvm.emit_c_interface }} {{ @@ -331,7 +331,7 @@ return f""" !Ptr = !llvm.ptr #enc = #sparse_tensor.encoding<{{ - dimLevelType = [ {sparsity} ] + lvlTypes = [ {sparsity} ] }}> func.func @{_ENTRY_NAME}(%t: tensor<{shape}x{type}, #enc>, %filename: !Ptr) attributes {{ llvm.emit_c_interface }} {{ diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py --- a/mlir/test/python/dialects/sparse_tensor/dialect.py +++ b/mlir/test/python/dialects/sparse_tensor/dialect.py @@ -14,19 +14,19 @@ def testEncodingAttr1D(): with Context() as ctx: parsed = Attribute.parse('#sparse_tensor.encoding<{' - ' dimLevelType = [ "compressed" ],' + ' lvlTypes = [ "compressed" ],' ' posWidth = 16,' ' crdWidth = 32' '}>') - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], posWidth = 16, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 16, crdWidth = 32 }> print(parsed) casted = st.EncodingAttr(parsed) # CHECK: equal: True print(f"equal: {casted == parsed}") - # CHECK: dim_level_types: [] - print(f"dim_level_types: {casted.dim_level_types}") + # CHECK: lvl_types: [] + print(f"lvl_types: {casted.lvl_types}") # CHECK: dim_ordering: None print(f"dim_ordering: {casted.dim_ordering}") # CHECK: pos_width: 16 @@ -34,8 +34,8 @@ # CHECK: crd_width: 32 print(f"crd_width: {casted.crd_width}") - created = st.EncodingAttr.get(casted.dim_level_types, None, None, 0, 0) - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> + created = st.EncodingAttr.get(casted.lvl_types, None, None, 0, 0) + # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> print(created) # CHECK: created_equal: False print(f"created_equal: {created == casted}") @@ -52,20 +52,20 @@ def testEncodingAttr2D(): with Context() as ctx: parsed = Attribute.parse('#sparse_tensor.encoding<{' - ' dimLevelType = [ "dense", "compressed" ],' + ' lvlTypes = [ "dense", "compressed" ],' ' dimOrdering = affine_map<(d0, d1) -> (d1, d0)>,' ' posWidth = 8,' ' crdWidth = 32' '}>') - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> print(parsed) casted = st.EncodingAttr(parsed) # CHECK: equal: True print(f"equal: {casted == parsed}") - # CHECK: dim_level_types: [, ] - print(f"dim_level_types: {casted.dim_level_types}") + # CHECK: lvl_types: [, ] + print(f"lvl_types: {casted.lvl_types}") # CHECK: dim_ordering: (d0, d1) -> (d1, d0) print(f"dim_ordering: {casted.dim_ordering}") # CHECK: pos_width: 8 @@ -73,9 +73,9 @@ # CHECK: crd_width: 32 print(f"crd_width: {casted.crd_width}") - created = st.EncodingAttr.get(casted.dim_level_types, casted.dim_ordering, + created = st.EncodingAttr.get(casted.lvl_types, casted.dim_ordering, casted.higher_ordering, 8, 32) - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> print(created) # CHECK: created_equal: True print(f"created_equal: {created == casted}") @@ -87,13 +87,13 @@ with Context() as ctx, Location.unknown(): encoding = st.EncodingAttr( Attribute.parse('#sparse_tensor.encoding<{' - ' dimLevelType = [ "compressed" ], ' + ' lvlTypes = [ "compressed" ], ' ' posWidth = 64,' ' crdWidth = 32' '}>')) tt = RankedTensorType.get((1024,), F32Type.get(), encoding=encoding) - # CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], posWidth = 64, crdWidth = 32 }>> + # CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 64, crdWidth = 32 }>> print(tt) - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], posWidth = 64, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 64, crdWidth = 32 }> print(tt.encoding) assert tt.encoding == encoding