diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h b/mlir/include/mlir-c/Dialect/SparseTensor.h --- a/mlir/include/mlir-c/Dialect/SparseTensor.h +++ b/mlir/include/mlir-c/Dialect/SparseTensor.h @@ -52,9 +52,8 @@ /// Creates a `sparse_tensor.encoding` attribute with the given parameters. MLIR_CAPI_EXPORTED MlirAttribute mlirSparseTensorEncodingAttrGet( MlirContext ctx, intptr_t lvlRank, - enum MlirSparseTensorDimLevelType const *lvlTypes, - MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int posWidth, - int crdWidth); + enum MlirSparseTensorDimLevelType const *lvlTypes, MlirAffineMap dimToLvl, + int posWidth, int crdWidth); /// Returns the level-rank of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED intptr_t @@ -64,13 +63,10 @@ MLIR_CAPI_EXPORTED enum MlirSparseTensorDimLevelType mlirSparseTensorEncodingAttrGetLvlType(MlirAttribute attr, intptr_t lvl); -/// Returns the dimension-ordering of the `sparse_tensor.encoding` attribute. +/// Returns the dimension-to-level mapping of the `sparse_tensor.encoding` +/// attribute. MLIR_CAPI_EXPORTED MlirAffineMap -mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr); - -/// Returns the higher-ordering of the `sparse_tensor.encoding` attribute. -MLIR_CAPI_EXPORTED MlirAffineMap -mlirSparseTensorEncodingAttrGetHigherOrdering(MlirAttribute attr); +mlirSparseTensorEncodingAttrGetDimToLvl(MlirAttribute attr); /// Returns the position bitwidth of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED int diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -144,30 +144,13 @@ properties, and split up how the level-format and properties are specified rather than using this suffix mechanism. - - An optional permutation which maps (higher-ordering)-coordinates - to level-coordinates; defaulting to the identity permutation. - For example, given a 2-d tensor with the default higher-ordering, - `(i, j) -> (i, j)` specifies row-wise storage and `(i, j) -> - (j, i)` specifies column-wise storage. - - TODO: this field is called "dimOrdering" for historical reasons, - even though it actually operates on level-coordinates rather than - dimension-coordinates. - (This will be corrected in an upcoming change that completely - overhauls the syntax of this attribute.) - - - An optional higher-order mapping from dimension-coordinates to - a higher-order coordinate space; defaulting to the identity map. - This is applied before the `dimOrdering`, thus we have the composite: - dimCoords --higherOrdering--> hoCoords --dimOrdering--> lvlCoords. - The higher-order mapping is used to define block-sparse storage, - jagged-diagonal (JDS/ELL/ITPACK) storage, etc. - - For example, given a 2-d tensor, the mapping + - An optional affine map from dimension-coordinates to level-coordinates; + defaulting to the identity map. For example, given a 2-d tensor: + `(i, j) -> (i, j)` specifies row-wise storage, `(i, j) -> (j, i)` + specifies column-wise storage, and `(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)` - imposes an higher-order partitioning into 2x3 blocks along the - matrix layout. For block-sparsity, blocks are typically stored - with compression while dense storage is used within each block + specifies 2x3 block-sparsity. For block-sparsity, blocks are typically + stored with compression while dense storage is used within each block (although hybrid schemes are possible as well). TODO: the following example is out-of-date and will be implemented @@ -175,7 +158,7 @@ (This will be corrected in an upcoming change that completely overhauls the syntax of this attribute.) - The higher-order mapping also provides a notion of "counting a + The dimToLvl mapping also provides a notion of "counting a dimension", where every stored element with the same coordinate is mapped to a new slice. For instance, ELL storage of a 2-d tensor can be defined with the mapping `(i, j) -> (#i, i, j)` @@ -221,7 +204,7 @@ // Doubly compressed sparse column storage with specific bitwidths. #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i, j) -> (j, i)>, + dimToLvl = affine_map<(i, j) -> (j, i)>, posWidth = 32, crdWidth = 8 }> @@ -230,16 +213,14 @@ // Block sparse row storage (2x3 blocks). #BCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], - dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, - higherOrdering = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> + dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> }> ... tensor<20x30xf32, #BCSR> ... // ELL storage (4 jagged diagonals, i.e., at most 4 nonzeros per row). #ELL = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], - dimOrdering = affine_map<(ii, i, j) -> (ii, i, j)>, - higherOrdering = affine_map<(i, j)[c] -> (c * 4 * i, i, j)> + dimToLvl = affine_map<(i, j)[c] -> (c * 4 * i, i, j)> }> ... tensor ... @@ -262,15 +243,16 @@ "::mlir::sparse_tensor::DimLevelType", "level-types" >: $lvlTypes, - // A permutation from (higher-ordering)-coordinates to level-coordinates. - "AffineMap":$dimOrdering, - // A mapping from dimension-coordinates to (higher-ordering)-coordinates. - "AffineMap":$higherOrdering, + // A mapping from dimension-coordinates to level-coordinates. + "AffineMap":$dimToLvl, // The required bitwidth for position storage. "unsigned":$posWidth, // The required bitwidth for coordinate storage. "unsigned":$crdWidth, // A slice attribute for each dimension of the tensor type. + // FIXME: The name used here is `dimSlices`, however the + // parser/printer uses the name `slice` instead. Therefore + // the parser/printer need to be updated to match. ArrayRefParameter< "::mlir::sparse_tensor::SparseTensorDimSliceAttr", "per dimension slice metadata" @@ -279,16 +261,11 @@ let builders = [ AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$lvlTypes, - "AffineMap":$dimOrdering, - "AffineMap":$higherOrdering, + "AffineMap":$dimToLvl, "unsigned":$posWidth, "unsigned":$crdWidth), [{ - return $_get($_ctxt, lvlTypes, - dimOrdering, - higherOrdering, - posWidth, - crdWidth, - ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr>{}); + return $_get($_ctxt, lvlTypes, dimToLvl, posWidth, crdWidth, + ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr>{}); }]> ]; @@ -297,23 +274,40 @@ // Factory methods. // - /// Constructs a new encoding with the dimOrdering and higherOrdering - /// reset to the default/identity. - SparseTensorEncodingAttr withoutOrdering() const; + /// Constructs a new encoding with the given dimToLvl mapping, + /// and all other fields inherited from `this`. + SparseTensorEncodingAttr withDimToLvl(AffineMap dimToLvl) const; + SparseTensorEncodingAttr withDimToLvl(SparseTensorEncodingAttr enc) const; - /// Constructs a new encoding with the pointer and index bitwidth - /// reset to the default. + /// Constructs a new encoding with dimToLvl reset to the default/identity, + /// and all other fields inherited from `this`. + SparseTensorEncodingAttr withoutDimToLvl() const; + + /// Constructs a new encoding with the given pointer and index + /// bitwidths, and all other fields inherited from `this`. + SparseTensorEncodingAttr withBitWidths(unsigned posWidth, unsigned crdWidth) const; + + /// Constructs a new encoding with the pointer and index bitwidths + /// reset to the default, and all other fields inherited from `this`. SparseTensorEncodingAttr withoutBitWidths() const; // - // lvlTypes methods. + // Rank methods. // + /// Returns the expected number of tensor dimensions. Asserts that + /// the encoding is non-null (since no fixed result is valid for every + /// dense-tensor). + ::mlir::sparse_tensor::Dimension getDimRank() const; + /// Returns the number of storage levels. Asserts that the encoding - /// is non-null (since there is no fixed result that's valid for - /// every dense-tensor). + /// is non-null (since no fixed result is valid for every dense-tensor). ::mlir::sparse_tensor::Level getLvlRank() const; + // + // lvlTypes methods. + // + /// Safely looks up the level-type for the requested level. (Returns /// `DimLevelType::Dense` for the null encoding, since dense-tensors /// are always all-dense.) @@ -335,13 +329,18 @@ bool isAllOrdered() const; // - // dimOrdering/higherOrdering methods. + // dimToLvl methods. // - /// Returns true if the encoding has an identity dimension ordering. + /// Returns true if the dimToLvl mapping is the identity. /// Also returns true for the null encoding (since dense-tensors - /// always have the identity ordering). - bool hasIdDimOrdering() const; + /// always have the identity mapping). + bool isIdentity() const; + + /// Returns true if the dimToLvl mapping is a permutation. + /// Also returns true for the null encoding (since dense-tensors + /// always have the identity mapping). + bool isPermutation() const; // // posWidth/crdWidth methods. diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h @@ -45,12 +45,12 @@ /// class SparseTensorType { public: - // We memoize `lvlRank` and `dim2lvl` to avoid repeating the + // We memoize `lvlRank` and `dimToLvl` to avoid repeating the // conditionals throughout the rest of the class. SparseTensorType(RankedTensorType rtp) : rtp(rtp), enc(getSparseTensorEncoding(rtp)), lvlRank(enc ? enc.getLvlRank() : getDimRank()), - dim2lvl(enc.hasIdDimOrdering() ? AffineMap() : enc.getDimOrdering()) { + dimToLvl(enc.isIdentity() ? AffineMap() : enc.getDimToLvl()) { assert(rtp && "got null RankedTensorType"); assert((!isIdentity() || getDimRank() == lvlRank) && "Rank mismatch"); } @@ -65,6 +65,10 @@ // So we must explicitly define the copy-ctor to silence -Wdeprecated-copy. SparseTensorType(const SparseTensorType &) = default; + // + // Factory methods. + // + /// Constructs a new `SparseTensorType` with the same dimension-shape /// and element type, but with the encoding replaced by the given encoding. SparseTensorType withEncoding(SparseTensorEncodingAttr newEnc) const { @@ -73,11 +77,44 @@ /// Constructs a new `SparseTensorType` with the same dimension-shape /// and element type, but with the encoding replaced by - /// `getEncoding().withoutOrdering()`. - SparseTensorType withoutOrdering() const { - return withEncoding(enc.withoutOrdering()); + /// `getEncoding().withDimToLvl(dimToLvl)`. + SparseTensorType withDimToLvl(AffineMap dimToLvl) const { + return withEncoding(enc.withDimToLvl(dimToLvl)); } + SparseTensorType withDimToLvl(SparseTensorEncodingAttr dimToLvlEnc) const { + return withEncoding(enc.withDimToLvl(dimToLvlEnc)); + } + + SparseTensorType withDimToLvl(const SparseTensorType &dimToLvlSTT) const { + return withDimToLvl(dimToLvlSTT.getEncoding()); + } + + /// Constructs a new `SparseTensorType` with the same dimension-shape + /// and element type, but with the encoding replaced by + /// `getEncoding().withoutDimToLvl()`. + SparseTensorType withoutDimToLvl() const { + return withEncoding(enc.withoutDimToLvl()); + } + + /// Constructs a new `SparseTensorType` with the same dimension-shape + /// and element type, but with the encoding replaced by + /// `getEncoding().withBitWidths(posWidth, crdWidth)`. + SparseTensorType withBitWidths(unsigned posWidth, unsigned crdWidth) const { + return withEncoding(enc.withBitWidths(posWidth, crdWidth)); + } + + /// Constructs a new `SparseTensorType` with the same dimension-shape + /// and element type, but with the encoding replaced by + /// `getEncoding().withoutBitWidths()`. + SparseTensorType withoutBitWidths() const { + return withEncoding(enc.withoutBitWidths()); + } + + // + // Other methods. + // + /// Allow implicit conversion to `RankedTensorType`, `ShapedType`, /// and `Type`. These are implicit to help alleviate the impedance /// mismatch for code that has not been converted to use `SparseTensorType` @@ -144,32 +181,36 @@ /// Returns true if the dimToLvl mapping is the identity. /// (This is always true for dense-tensors.) - bool isIdentity() const { return !dim2lvl; } + bool isIdentity() const { return !dimToLvl; } + + /// Returns true if the dimToLvl mapping is a permutation. + /// (This is always true for dense-tensors.) + bool isPermutation() const { return enc.isPermutation(); } /// Returns the dimToLvl mapping (or the null-map for the identity). /// If you intend to compare the results of this method for equality, - /// see `hasSameDimToLvlMap` instead. - AffineMap getDimToLvlMap() const { return dim2lvl; } + /// see `hasSameDimToLvl` instead. + AffineMap getDimToLvl() const { return dimToLvl; } /// Returns the dimToLvl mapping, where the identity map is expanded out /// into a full `AffineMap`. This method is provided as a convenience, - /// but for most purposes other methods (`isIdentity`, `getDimToLvlMap`, + /// but for most purposes other methods (`isIdentity`, `getDimToLvl`, /// etc) will be more helpful. - AffineMap getExpandedDimToLvlMap() const { - return dim2lvl - ? dim2lvl + AffineMap getExpandedDimToLvl() const { + return dimToLvl + ? dimToLvl : AffineMap::getMultiDimIdentityMap(getDimRank(), getContext()); } /// Returns true iff the two types have the same mapping. This method /// takes care to handle identity maps properly, so it should be preferred - /// over using `getDimToLvlMap` followed by `AffineMap::operator==`. - bool hasSameDimToLvlMap(const SparseTensorType &other) const { + /// over using `getDimToLvl` followed by `AffineMap::operator==`. + bool hasSameDimToLvl(const SparseTensorType &other) const { // If the maps are the identity, then we need to check the rank // to be sure they're the same size identity. (And since identity // means dimRank==lvlRank, we use lvlRank as a minor optimization.) return isIdentity() ? (other.isIdentity() && lvlRank == other.lvlRank) - : (dim2lvl == other.dim2lvl); + : (dimToLvl == other.dimToLvl); } /// Returns the dimension-rank. @@ -255,7 +296,7 @@ const SparseTensorEncodingAttr enc; // Memoized to avoid frequent redundant conditionals. const Level lvlRank; - const AffineMap dim2lvl; + const AffineMap dimToLvl; }; /// Convenience method to abbreviate wrapping `getRankedTensorType`. diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp --- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp +++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp @@ -40,18 +40,16 @@ .def_classmethod( "get", [](py::object cls, std::vector lvlTypes, - std::optional dimOrdering, - std::optional higherOrdering, int posWidth, - int crdWidth, MlirContext context) { + std::optional dimToLvl, int posWidth, int crdWidth, + MlirContext context) { return cls(mlirSparseTensorEncodingAttrGet( context, lvlTypes.size(), lvlTypes.data(), - dimOrdering ? *dimOrdering : MlirAffineMap{nullptr}, - higherOrdering ? *higherOrdering : MlirAffineMap{nullptr}, - posWidth, crdWidth)); + dimToLvl ? *dimToLvl : MlirAffineMap{nullptr}, posWidth, + crdWidth)); }, - py::arg("cls"), py::arg("lvl_types"), py::arg("dim_ordering"), - py::arg("higher_ordering"), py::arg("pos_width"), - py::arg("crd_width"), py::arg("context") = py::none(), + py::arg("cls"), py::arg("lvl_types"), py::arg("dim_to_lvl"), + py::arg("pos_width"), py::arg("crd_width"), + py::arg("context") = py::none(), "Gets a sparse_tensor.encoding from parameters.") .def_property_readonly( "lvl_types", @@ -64,19 +62,9 @@ return ret; }) .def_property_readonly( - "dim_ordering", + "dim_to_lvl", [](MlirAttribute self) -> std::optional { - MlirAffineMap ret = - mlirSparseTensorEncodingAttrGetDimOrdering(self); - if (mlirAffineMapIsNull(ret)) - return {}; - return ret; - }) - .def_property_readonly( - "higher_ordering", - [](MlirAttribute self) -> std::optional { - MlirAffineMap ret = - mlirSparseTensorEncodingAttrGetHigherOrdering(self); + MlirAffineMap ret = mlirSparseTensorEncodingAttrGetDimToLvl(self); if (mlirAffineMapIsNull(ret)) return {}; return ret; diff --git a/mlir/lib/CAPI/Dialect/SparseTensor.cpp b/mlir/lib/CAPI/Dialect/SparseTensor.cpp --- a/mlir/lib/CAPI/Dialect/SparseTensor.cpp +++ b/mlir/lib/CAPI/Dialect/SparseTensor.cpp @@ -45,26 +45,21 @@ return isa(unwrap(attr)); } -MlirAttribute mlirSparseTensorEncodingAttrGet( - MlirContext ctx, intptr_t lvlRank, - MlirSparseTensorDimLevelType const *lvlTypes, MlirAffineMap dimOrdering, - MlirAffineMap higherOrdering, int posWidth, int crdWidth) { +MlirAttribute +mlirSparseTensorEncodingAttrGet(MlirContext ctx, intptr_t lvlRank, + MlirSparseTensorDimLevelType const *lvlTypes, + MlirAffineMap dimToLvl, int posWidth, + int crdWidth) { SmallVector cppLvlTypes; cppLvlTypes.reserve(lvlRank); for (intptr_t l = 0; l < lvlRank; ++l) cppLvlTypes.push_back(static_cast(lvlTypes[l])); return wrap(SparseTensorEncodingAttr::get( - unwrap(ctx), cppLvlTypes, unwrap(dimOrdering), unwrap(higherOrdering), - posWidth, crdWidth)); + unwrap(ctx), cppLvlTypes, unwrap(dimToLvl), posWidth, crdWidth)); } -MlirAffineMap mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr) { - return wrap(cast(unwrap(attr)).getDimOrdering()); -} - -MlirAffineMap -mlirSparseTensorEncodingAttrGetHigherOrdering(MlirAttribute attr) { - return wrap(cast(unwrap(attr)).getHigherOrdering()); +MlirAffineMap mlirSparseTensorEncodingAttrGetDimToLvl(MlirAttribute attr) { + return wrap(cast(unwrap(attr)).getDimToLvl()); } intptr_t mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr) { diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -263,15 +263,32 @@ return detail::getIntegerOrIndexType(getContext(), getCrdWidth()); } -SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutOrdering() const { - return SparseTensorEncodingAttr::get(getContext(), getLvlTypes(), AffineMap(), - AffineMap(), getPosWidth(), - getCrdWidth()); +SparseTensorEncodingAttr +SparseTensorEncodingAttr::withDimToLvl(AffineMap dimToLvl) const { + assert(getImpl() && "Uninitialized SparseTensorEncodingAttr"); + return SparseTensorEncodingAttr::get(getContext(), getLvlTypes(), dimToLvl, + getPosWidth(), getCrdWidth()); +} + +SparseTensorEncodingAttr +SparseTensorEncodingAttr::withDimToLvl(SparseTensorEncodingAttr enc) const { + return withDimToLvl(enc ? enc.getDimToLvl() : AffineMap()); +} + +SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutDimToLvl() const { + return withDimToLvl(AffineMap()); +} + +SparseTensorEncodingAttr +SparseTensorEncodingAttr::withBitWidths(unsigned posWidth, + unsigned crdWidth) const { + assert(getImpl() && "Uninitialized SparseTensorEncodingAttr"); + return SparseTensorEncodingAttr::get(getContext(), getLvlTypes(), + getDimToLvl(), posWidth, crdWidth); } SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutBitWidths() const { - return SparseTensorEncodingAttr::get( - getContext(), getLvlTypes(), getDimOrdering(), getHigherOrdering(), 0, 0); + return withBitWidths(0, 0); } bool SparseTensorEncodingAttr::isAllDense() const { @@ -282,8 +299,18 @@ return !getImpl() || llvm::all_of(getLvlTypes(), isOrderedDLT); } -bool SparseTensorEncodingAttr::hasIdDimOrdering() const { - return !getImpl() || !getDimOrdering() || getDimOrdering().isIdentity(); +bool SparseTensorEncodingAttr::isIdentity() const { + return !getImpl() || !getDimToLvl() || getDimToLvl().isIdentity(); +} + +bool SparseTensorEncodingAttr::isPermutation() const { + return !getImpl() || !getDimToLvl() || getDimToLvl().isPermutation(); +} + +Dimension SparseTensorEncodingAttr::getDimRank() const { + assert(getImpl() && "Uninitialized SparseTensorEncodingAttr"); + const auto dimToLvl = getDimToLvl(); + return dimToLvl ? dimToLvl.getNumDims() : getLvlRank(); } Level SparseTensorEncodingAttr::getLvlRank() const { @@ -382,15 +409,14 @@ // Process the data from the parsed dictionary value into struct-like data. SmallVector lvlTypes; SmallVector slices; - AffineMap dimOrd = {}; - AffineMap higherOrd = {}; + AffineMap dimToLvl = {}; unsigned posWidth = 0; unsigned crdWidth = 0; StringRef attrName; // Exactly 6 keys. - SmallVector keys = {"lvlTypes", "dimOrdering", "higherOrdering", - "posWidth", "crdWidth", "slice"}; + SmallVector keys = {"lvlTypes", "dimToLvl", "posWidth", + "crdWidth", "slice"}; while (succeeded(parser.parseOptionalKeyword(&attrName))) { if (!llvm::is_contained(keys, attrName)) { parser.emitError(parser.getNameLoc(), "unexpected key: ") << attrName; @@ -420,18 +446,12 @@ return {}; } } - } else if (attrName == "dimOrdering") { + } else if (attrName == "dimToLvl") { Attribute attr; RETURN_ON_FAIL(parser.parseAttribute(attr)) auto affineAttr = llvm::dyn_cast(attr); - ERROR_IF(!affineAttr, "expected an affine map for dimension ordering") - dimOrd = affineAttr.getValue(); - } else if (attrName == "higherOrdering") { - Attribute attr; - RETURN_ON_FAIL(parser.parseAttribute(attr)) - auto affineAttr = llvm::dyn_cast(attr); - ERROR_IF(!affineAttr, "expected an affine map for higher ordering") - higherOrd = affineAttr.getValue(); + ERROR_IF(!affineAttr, "expected an affine map for dimToLvl") + dimToLvl = affineAttr.getValue(); } else if (attrName == "posWidth") { Attribute attr; RETURN_ON_FAIL(parser.parseAttribute(attr)) @@ -474,8 +494,7 @@ // Construct struct-like storage for attribute. return parser.getChecked( - parser.getContext(), lvlTypes, dimOrd, higherOrd, posWidth, crdWidth, - slices); + parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, slices); } void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { @@ -486,10 +505,8 @@ }); printer << " ]"; // Print remaining members only for non-default values. - if (!hasIdDimOrdering()) - printer << ", dimOrdering = affine_map<" << getDimOrdering() << ">"; - if (getHigherOrdering()) - printer << ", higherOrdering = affine_map<" << getHigherOrdering() << ">"; + if (!isIdentity()) + printer << ", dimToLvl = affine_map<" << getDimToLvl() << ">"; if (getPosWidth()) printer << ", posWidth = " << getPosWidth(); if (getCrdWidth()) @@ -510,9 +527,8 @@ LogicalResult SparseTensorEncodingAttr::verify( function_ref emitError, - ArrayRef lvlTypes, AffineMap dimOrdering, - AffineMap higherOrdering, unsigned posWidth, unsigned crdWidth, - ArrayRef dimSlices) { + ArrayRef lvlTypes, AffineMap dimToLvl, unsigned posWidth, + unsigned crdWidth, ArrayRef dimSlices) { if (!acceptBitWidth(posWidth)) return emitError() << "unexpected position bitwidth: " << posWidth; if (!acceptBitWidth(crdWidth)) @@ -525,25 +541,41 @@ const Level lvlRank = lvlTypes.size(); if (lvlRank == 0) return emitError() << "expected a non-empty array for lvlTypes"; - if (dimOrdering) { - if (!dimOrdering.isPermutation()) + // We save `dimRank` here because we'll also need it to verify `dimSlices`. + const Dimension dimRank = dimToLvl ? dimToLvl.getNumDims() : lvlRank; + if (dimToLvl) { + if (dimToLvl.getNumResults() != lvlRank) return emitError() - << "expected a permutation affine map for dimension ordering"; - if (dimOrdering.getNumResults() != lvlRank) - return emitError() - << "level-rank mismatch between dimOrdering and lvlTypes"; + << "level-rank mismatch between dimToLvl and lvlTypes: " + << dimToLvl.getNumResults() << " != " << lvlRank; + // TODO: The following is attempting to match the old error-conditions + // from prior to merging dimOrdering and higherOrdering into dimToLvl. + // That is, we currently require `dimToLvl` to be either a permutation + // (as when higherOrdering is the identity) or expansive (as per the + // constraints on higherOrdering). However, those constraints do + // not match the intended semantics of `dimToLvl`. As we improve the + // compiler to actually handle non-permutations, we need to update these + // checks to match what is actually supported. In particular, this is + // where we'll have to check that when `lvlToDim` is provided then it + // is indeed an inverse of `dimToLvl`, and when it isn't provided then + // it can be automatically inferred. + if (dimRank == lvlRank && !dimToLvl.isPermutation()) + return emitError() << "expected a permutation affine map for dimToLvl"; + if (dimRank > lvlRank) + return emitError() << "unexpected dimToLvl mapping from " << dimRank + << " to " << lvlRank; } - if (higherOrdering) { - if (higherOrdering.getNumDims() >= higherOrdering.getNumResults()) - return emitError() << "unexpected higher ordering mapping from " - << higherOrdering.getNumDims() << " to " - << higherOrdering.getNumResults(); - if (higherOrdering.getNumResults() != lvlRank) + if (!dimSlices.empty()) { + if (dimSlices.size() != dimRank) return emitError() - << "level-rank mismatch between higherOrdering and lvlTypes"; - } - if (!dimSlices.empty() && dimSlices.size() != lvlRank) { - return emitError() << "level-rank mismatch between dimSlices and lvlTypes"; + << "dimension-rank mismatch between dimSlices and dimToLvl: " + << dimSlices.size() << " != " << dimRank; + // Compiler support for `dimSlices` currently requires that the two + // ranks agree. (However, it does allow `dimToLvl` to be a permutation.) + if (dimRank != lvlRank) + return emitError() + << "dimSlices expected dimension-rank to match level-rank: " + << dimRank << " != " << lvlRank; } return success(); } @@ -558,24 +590,18 @@ function_ref emitError) const { // Check structural integrity. In particular, this ensures that the // level-rank is coherent across all the fields. - RETURN_FAILURE_IF_FAILED(verify(emitError, getLvlTypes(), getDimOrdering(), - getHigherOrdering(), getPosWidth(), - getCrdWidth(), getDimSlices())) + RETURN_FAILURE_IF_FAILED(verify(emitError, getLvlTypes(), getDimToLvl(), + getPosWidth(), getCrdWidth(), getDimSlices())) // Check integrity with tensor type specifics. In particular, we // need only check that the dimension-rank of the tensor agrees with // the dimension-rank of the encoding. const Dimension dimRank = dimShape.size(); if (dimRank == 0) return emitError() << "expected non-scalar sparse tensor"; - if (const auto higherOrdering = getHigherOrdering()) { - if (higherOrdering.getNumDims() != dimRank) - return emitError() << "expected an affine map with " << dimRank - << " dimensions for higher ordering"; - // TODO: verification of higher ordering contents - } else if (dimRank != getLvlRank()) { - return emitError() << "expected an array of size " << dimRank - << " for lvlTypes"; - } + if (getDimRank() != dimRank) + return emitError() + << "dimension-rank mismatch between encoding and tensor shape: " + << getDimRank() << " != " << dimRank; return success(); } @@ -627,14 +653,14 @@ AffineMap lvlPerm, bool ordered) { const SparseTensorType src(rtt); - // The dim-rank of the source `RankedTensorType` is used as the lvl-rank - // of the result `RankedTensorType`. This follows from the fact that the - // result's encoding has the default higher-ordering (hence the result's - // lvl-rank equals its dim-rank). We don't need to assert that `lvlRank` - // agrees with the size of `lvlPerm` because that will be verified by - // `STEA::get`. - const Level lvlRank = src.getDimRank(); + // TODO: This assertion is to match the behavior from before we merged + // dimOrdering and higherOrdering into dimToLvl. However, there's no + // in-principle reason to require this. (wrengr has a commit in the + // wings to fix this.) + assert(src.isPermutation()); + const Level lvlRank = src.getLvlRank(); SmallVector lvlTypes; + lvlTypes.reserve(lvlRank); // An unordered and non-unique compressed level at beginning. // If this is also the last level, then it is unique. @@ -655,7 +681,7 @@ unsigned posWidth = src.getPosWidth(); unsigned crdWidth = src.getCrdWidth(); auto enc = SparseTensorEncodingAttr::get(src.getContext(), lvlTypes, lvlPerm, - AffineMap(), posWidth, crdWidth); + posWidth, crdWidth); return RankedTensorType::get(src.getDimShape(), src.getElementType(), enc); } @@ -671,10 +697,9 @@ Dimension mlir::sparse_tensor::toOrigDim(SparseTensorEncodingAttr enc, Level l) { if (enc) { - auto order = enc.getDimOrdering(); - if (order) { - assert(order.isPermutation()); - return order.getDimPosition(l); + if (const auto dimToLvl = enc.getDimToLvl()) { + assert(enc.isPermutation()); + return dimToLvl.getDimPosition(l); } } return l; @@ -685,11 +710,10 @@ Level mlir::sparse_tensor::toStoredDim(SparseTensorEncodingAttr enc, Dimension d) { if (enc) { - auto order = enc.getDimOrdering(); - if (order) { - assert(order.isPermutation()); + if (const auto dimToLvl = enc.getDimToLvl()) { + assert(enc.isPermutation()); auto maybePos = - order.getResultPosition(getAffineDimExpr(d, enc.getContext())); + dimToLvl.getResultPosition(getAffineDimExpr(d, enc.getContext())); assert(maybePos.has_value()); return *maybePos; } @@ -728,8 +752,7 @@ return SparseTensorEncodingAttr::get( enc.getContext(), dlts, - AffineMap(), // dimOrdering (irrelavant to storage speicifer) - AffineMap(), // highLvlOrdering (irrelavant to storage specifer) + AffineMap(), // dimToLvl (irrelevant to storage specifier) // Always use `index` for memSize and lvlSize instead of reusing // `getPosWidth` and `getCrdWidth`. It allows us to reuse the same SSA // value for different bitwidth, it also avoids casting between index and diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp @@ -385,7 +385,7 @@ // FIXME: `toOrigDim` is deprecated. For now this relies on the // 1:1 mapping between levels and dimensions, since nowhere else - // in the code supports HigherOrdering yet either. + // in the code supports non-permutations yet either. Value lvlSz = mlir::linalg::createOrFoldDimOp(builder, loc, tensor, toOrigDim(enc, l)); // Find upper bound in current dimension. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -481,7 +481,7 @@ nameOstream << sh << "_"; // Permutation information is also used in generating insertion. if (!stt.isIdentity()) - nameOstream << stt.getDimToLvlMap() << "_"; + nameOstream << stt.getDimToLvl() << "_"; nameOstream << stt.getElementType() << "_"; nameOstream << stt.getCrdWidth() << "_" << stt.getPosWidth(); return nameOstream.str().str(); @@ -1139,8 +1139,7 @@ if (!srcEnc || !dstEnc || !dstEnc.isSlice()) return failure(); assert(srcEnc.getLvlTypes() == dstEnc.getLvlTypes()); - assert(srcEnc.getDimOrdering() == dstEnc.getDimOrdering()); - assert(srcEnc.getHigherOrdering() == dstEnc.getHigherOrdering()); + assert(srcEnc.getDimToLvl() == dstEnc.getDimToLvl()); assert(srcEnc.getPosWidth() == dstEnc.getPosWidth()); assert(srcEnc.getCrdWidth() == dstEnc.getCrdWidth()); @@ -1168,7 +1167,7 @@ // FIXME: we need to distinguish level sizes and dimension size for slices // here. Maybe we should store slice level sizes in a different array // instead of reusing it. - assert(srcEnc.hasIdDimOrdering()); + assert(srcEnc.isIdentity()); desc.setSpecifierField(rewriter, loc, StorageSpecifierKind::LvlSize, dim, sizeV); desc.setSpecifierField(rewriter, loc, StorageSpecifierKind::DimStride, @@ -1423,26 +1422,26 @@ fields, nse); MutSparseTensorDescriptor desc(dstTp, fields); - // Construct the `dim2lvl` buffer for handing off to the runtime library. + // Construct the `dimToLvl` buffer for handing off to the runtime library. // FIXME: This code is (mostly) copied from the SparseTensorConversion.cpp // handling of `NewOp`, and only handles permutations. Fixing this // requires waiting for wrengr to finish redoing the CL that handles // all dim<->lvl stuff more robustly. - SmallVector dim2lvlValues(dimRank); + SmallVector dimToLvlValues(dimRank); if (!dstTp.isIdentity()) { - const auto dimOrder = dstTp.getDimToLvlMap(); - assert(dimOrder.isPermutation() && "Got non-permutation"); + const auto dimToLvl = dstTp.getDimToLvl(); + assert(dimToLvl.isPermutation() && "Got non-permutation"); for (Level l = 0; l < lvlRank; l++) { - const Dimension d = dimOrder.getDimPosition(l); - dim2lvlValues[d] = constantIndex(rewriter, loc, l); + const Dimension d = dimToLvl.getDimPosition(l); + dimToLvlValues[d] = constantIndex(rewriter, loc, l); } } else { // The `SparseTensorType` ctor already ensures `dimRank == lvlRank` // when `isIdentity`; so no need to re-assert it here. for (Dimension d = 0; d < dimRank; d++) - dim2lvlValues[d] = constantIndex(rewriter, loc, d); + dimToLvlValues[d] = constantIndex(rewriter, loc, d); } - Value dim2lvl = allocaBuffer(rewriter, loc, dim2lvlValues); + Value dimToLvl = allocaBuffer(rewriter, loc, dimToLvlValues); // Read the COO tensor data. Value xs = desc.getAOSMemRef(); @@ -1458,7 +1457,7 @@ primaryTypeFunctionSuffix(elemTp)}; Value isSorted = createFuncCall(rewriter, loc, readToBuffersFuncName, {boolTp}, - {reader, dim2lvl, xs, ys}, EmitCInterface::On) + {reader, dimToLvl, xs, ys}, EmitCInterface::On) .getResult(0); // If the destination tensor is a sorted COO, we need to sort the COO tensor diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -96,7 +96,7 @@ // `getDimPosition` checks that the expr isa `AffineDimExpr`, // which is all we care about (for supporting permutations). const Dimension dim = - stt.isIdentity() ? lvl : stt.getDimToLvlMap().getDimPosition(lvl); + stt.isIdentity() ? lvl : stt.getDimToLvl().getDimPosition(lvl); if (const auto sz = stt.getStaticDimSize(dim)) return constantIndex(builder, loc, *sz); // If we cannot statically compute the size from the shape, then we @@ -259,9 +259,9 @@ // TODO: This is only ever used for passing into `genAddEltCall`; // is there a better way to encapsulate that pattern (both to avoid // this one-off getter, and to avoid potential mixups)? - Value getDim2LvlMap() const { - assert(isInitialized() && "Must initialize before getDim2LvlMap"); - return params[kParamDim2Lvl]; + Value getDimToLvl() const { + assert(isInitialized() && "Must initialize before getDimToLvl"); + return params[kParamDimToLvl]; } /// Generates a function call, with the current static parameters @@ -282,8 +282,8 @@ static constexpr unsigned kParamDimSizes = 0; static constexpr unsigned kParamLvlSizes = 1; static constexpr unsigned kParamLvlTypes = 2; - static constexpr unsigned kParamLvl2Dim = 3; - static constexpr unsigned kParamDim2Lvl = 4; + static constexpr unsigned kParamLvlToDim = 3; + static constexpr unsigned kParamDimToLvl = 4; static constexpr unsigned kParamPosTp = 5; static constexpr unsigned kParamCrdTp = 6; static constexpr unsigned kParamValTp = 7; @@ -311,39 +311,39 @@ "Dimension-rank mismatch"); params[kParamDimSizes] = allocaBuffer(builder, loc, dimSizes); // The level-sizes array must be passed as well, since for arbitrary - // dim2lvl mappings it cannot be trivially reconstructed at runtime. + // dimToLvl mappings it cannot be trivially reconstructed at runtime. // For now however, since we're still assuming permutations, we will - // initialize this parameter alongside the `dim2lvl` and `lvl2dim` + // initialize this parameter alongside the `dimToLvl` and `lvlToDim` // parameters below. We preinitialize `lvlSizes` for code symmetry. SmallVector lvlSizes(lvlRank); // The dimension-to-level mapping and its inverse. We must preinitialize - // `dim2lvl` so that the true branch below can perform random-access - // `operator[]` assignment. We preinitialize `lvl2dim` for code symmetry. - SmallVector dim2lvl(dimRank); - SmallVector lvl2dim(lvlRank); + // `dimToLvl` so that the true branch below can perform random-access + // `operator[]` assignment. We preinitialize `lvlToDim` for code symmetry. + SmallVector dimToLvl(dimRank); + SmallVector lvlToDim(lvlRank); if (!stt.isIdentity()) { - const auto dimOrder = stt.getDimToLvlMap(); - assert(dimOrder.isPermutation()); + const auto dimToLvlMap = stt.getDimToLvl(); + assert(dimToLvlMap.isPermutation()); for (Level l = 0; l < lvlRank; l++) { // The `d`th source variable occurs in the `l`th result position. - const Dimension d = dimOrder.getDimPosition(l); - dim2lvl[d] = constantIndex(builder, loc, l); - lvl2dim[l] = constantIndex(builder, loc, d); + const Dimension d = dimToLvlMap.getDimPosition(l); + dimToLvl[d] = constantIndex(builder, loc, l); + lvlToDim[l] = constantIndex(builder, loc, d); lvlSizes[l] = dimSizes[d]; } } else { // The `SparseTensorType` ctor already ensures `dimRank == lvlRank` // when `isIdentity`; so no need to re-assert it here. for (Level l = 0; l < lvlRank; l++) { - dim2lvl[l] = lvl2dim[l] = constantIndex(builder, loc, l); + dimToLvl[l] = lvlToDim[l] = constantIndex(builder, loc, l); lvlSizes[l] = dimSizes[l]; } } params[kParamLvlSizes] = allocaBuffer(builder, loc, lvlSizes); - params[kParamLvl2Dim] = allocaBuffer(builder, loc, lvl2dim); - params[kParamDim2Lvl] = stt.isIdentity() - ? params[kParamLvl2Dim] - : allocaBuffer(builder, loc, dim2lvl); + params[kParamLvlToDim] = allocaBuffer(builder, loc, lvlToDim); + params[kParamDimToLvl] = stt.isIdentity() + ? params[kParamLvlToDim] + : allocaBuffer(builder, loc, dimToLvl); // Secondary and primary types encoding. setTemplateTypes(stt); // Finally, make note that initialization is complete. @@ -383,9 +383,9 @@ /// t->add(&val, [i1,..,ik], [p1,..,pk]); static void genAddEltCall(OpBuilder &builder, Location loc, Type eltType, Value lvlCOO, Value valPtr, Value dimCoords, - Value dim2lvl) { + Value dimToLvl) { SmallString<9> name{"addElt", primaryTypeFunctionSuffix(eltType)}; - SmallVector params{lvlCOO, valPtr, dimCoords, dim2lvl}; + SmallVector params{lvlCOO, valPtr, dimCoords, dimToLvl}; Type pTp = getOpaquePointerType(builder); createFuncCall(builder, loc, name, pTp, params, EmitCInterface::On); } @@ -481,7 +481,7 @@ SmallVector srcDimSizes = getDimSizes(rewriter, loc, srcTp, adaptor.getSrc()); NewCallParams params(rewriter, loc); - Value iter = params.genBuffers(srcTp.withoutOrdering(), srcDimSizes) + Value iter = params.genBuffers(srcTp.withoutDimToLvl(), srcDimSizes) .genNewCall(Action::kToIterator, adaptor.getSrc()); // Start a new COO for the destination tensor. SmallVector dstDimSizes; @@ -493,7 +493,7 @@ dstTp.getDimShape(), op.getReassociationIndices()); const Value coo = params.genBuffers(dstTp, dstDimSizes).genNewCall(Action::kEmptyCOO); - const Value dstPerm = params.getDim2LvlMap(); + const Value dstDimToLvl = params.getDimToLvl(); // Construct a while loop over the iterator. const Type iTp = rewriter.getIndexType(); const Value srcDimCoords = genAlloca(rewriter, loc, srcTp.getDimRank(), iTp); @@ -515,7 +515,7 @@ assert(dstTp.getDimRank() == dstDimSizes.size()); reshapeCoords(loc, rewriter, op.getReassociationIndices(), srcDimSizes, srcDimCoords, dstDimSizes, dstDimCoords); - genAddEltCall(rewriter, loc, elemTp, coo, elemPtr, dstDimCoords, dstPerm); + genAddEltCall(rewriter, loc, elemTp, coo, elemPtr, dstDimCoords, dstDimToLvl); rewriter.create(loc); // Final call to construct sparse tensor storage and free temporary resources. rewriter.setInsertionPointAfter(whileOp); @@ -544,7 +544,7 @@ const Type elemTp = stt.getElementType(); // Start an iterator over the tensor (in coordinate order). - const auto noPerm = stt.withoutOrdering(); + const auto noPerm = stt.withoutDimToLvl(); SmallVector dimSizes = getDimSizes(rewriter, loc, noPerm, t); Value iter = NewCallParams(rewriter, loc) .genBuffers(noPerm, dimSizes) @@ -714,7 +714,7 @@ SmallVector dimShapeValues = getDimShape(rewriter, loc, stt); Value dimShapeBuffer = allocaBuffer(rewriter, loc, dimShapeValues); // Allocate `SparseTensorReader` and perform all initial setup that - // does not depend on lvlSizes (nor dim2lvl, lvl2dim, etc). + // does not depend on lvlSizes (nor dimToLvl, lvlToDim, etc). Type opaqueTp = getOpaquePointerType(rewriter); Value valTp = constantPrimaryTypeEncoding(rewriter, loc, stt.getElementType()); @@ -729,7 +729,7 @@ // compile-time. If dimShape is dynamic, then we'll need to generate // code for computing lvlSizes from the `reader`'s actual dimSizes. // - // TODO: For now we're still assuming `dim2lvl` is a permutation. + // TODO: For now we're still assuming `dimToLvl` is a permutation. // But since we're computing lvlSizes here (rather than in the runtime), // we can easily generalize that simply by adjusting this code. // @@ -744,31 +744,31 @@ .getResult(0); } Value lvlSizesBuffer; - Value lvl2dimBuffer; - Value dim2lvlBuffer; + Value lvlToDimBuffer; + Value dimToLvlBuffer; if (!stt.isIdentity()) { - const auto dimOrder = stt.getDimToLvlMap(); - assert(dimOrder.isPermutation() && "Got non-permutation"); - // We preinitialize `dim2lvlValues` since we need random-access writing. + const auto dimToLvl = stt.getDimToLvl(); + assert(dimToLvl.isPermutation() && "Got non-permutation"); + // We preinitialize `dimToLvlValues` since we need random-access writing. // And we preinitialize the others for stylistic consistency. SmallVector lvlSizeValues(lvlRank); - SmallVector lvl2dimValues(lvlRank); - SmallVector dim2lvlValues(dimRank); + SmallVector lvlToDimValues(lvlRank); + SmallVector dimToLvlValues(dimRank); for (Level l = 0; l < lvlRank; l++) { // The `d`th source variable occurs in the `l`th result position. - Dimension d = dimOrder.getDimPosition(l); + Dimension d = dimToLvl.getDimPosition(l); Value lvl = constantIndex(rewriter, loc, l); Value dim = constantIndex(rewriter, loc, d); - dim2lvlValues[d] = lvl; - lvl2dimValues[l] = dim; + dimToLvlValues[d] = lvl; + lvlToDimValues[l] = dim; lvlSizeValues[l] = stt.isDynamicDim(d) ? rewriter.create(loc, dimSizesBuffer, dim) : dimShapeValues[d]; } lvlSizesBuffer = allocaBuffer(rewriter, loc, lvlSizeValues); - lvl2dimBuffer = allocaBuffer(rewriter, loc, lvl2dimValues); - dim2lvlBuffer = allocaBuffer(rewriter, loc, dim2lvlValues); + lvlToDimBuffer = allocaBuffer(rewriter, loc, lvlToDimValues); + dimToLvlBuffer = allocaBuffer(rewriter, loc, dimToLvlValues); } else { // The `SparseTensorType` ctor already ensures `dimRank == lvlRank` // when `isIdentity`; so no need to re-assert it here. @@ -777,15 +777,15 @@ for (Level l = 0; l < lvlRank; l++) iotaValues.push_back(constantIndex(rewriter, loc, l)); lvlSizesBuffer = dimSizesBuffer ? dimSizesBuffer : dimShapeBuffer; - dim2lvlBuffer = lvl2dimBuffer = allocaBuffer(rewriter, loc, iotaValues); + dimToLvlBuffer = lvlToDimBuffer = allocaBuffer(rewriter, loc, iotaValues); } // Use the `reader` to parse the file. SmallVector params{ reader, lvlSizesBuffer, genLvlTypesBuffer(rewriter, loc, stt), - lvl2dimBuffer, - dim2lvlBuffer, + lvlToDimBuffer, + dimToLvlBuffer, constantPosTypeEncoding(rewriter, loc, stt.getEncoding()), constantCrdTypeEncoding(rewriter, loc, stt.getEncoding()), valTp}; @@ -895,10 +895,8 @@ // Set up encoding with right mix of src and dst so that the two // method calls can share most parameters, while still providing // the correct sparsity information to either of them. - const auto mixedEnc = SparseTensorEncodingAttr::get( - op->getContext(), dstEnc.getLvlTypes(), dstEnc.getDimOrdering(), - dstEnc.getHigherOrdering(), srcEnc.getPosWidth(), - srcEnc.getCrdWidth()); + const auto mixedEnc = + dstEnc.withBitWidths(srcEnc.getPosWidth(), srcEnc.getCrdWidth()); // TODO: This is the only place where `kToCOO` (or `kToIterator`) // is called with a non-identity permutation. Is there any clean // way to push the permutation over to the `kFromCOO` side instead? @@ -927,7 +925,7 @@ const auto dstEnc = SparseTensorEncodingAttr::get( op->getContext(), SmallVector(dimRank, DimLevelType::Dense), AffineMap(), - AffineMap(), srcEnc.getPosWidth(), srcEnc.getCrdWidth()); + srcEnc.getPosWidth(), srcEnc.getCrdWidth()); SmallVector dimSizes = getDimSizes(rewriter, loc, srcTp, src); Value iter = NewCallParams(rewriter, loc) .genBuffers(dstTp.withEncoding(dstEnc), dimSizes) @@ -996,7 +994,7 @@ params.genBuffers(dstTp, dimSizes).genNewCall(Action::kEmptyCOO); const Type iTp = rewriter.getIndexType(); Value dimCoords = genAlloca(rewriter, loc, dimRank, iTp); - Value perm = params.getDim2LvlMap(); + Value dimToLvl = params.getDimToLvl(); Value elemPtr = genAllocaScalar(rewriter, loc, elemTp); genDenseTensorOrSparseConstantIterLoop( rewriter, loc, src, dimRank, @@ -1004,7 +1002,8 @@ assert(dcvs.size() == static_cast(dimRank)); storeAll(builder, loc, dimCoords, dcvs); builder.create(loc, val, elemPtr); - genAddEltCall(builder, loc, elemTp, coo, elemPtr, dimCoords, perm); + genAddEltCall(builder, loc, elemTp, coo, elemPtr, dimCoords, + dimToLvl); }); // Final call to construct sparse tensor storage. Value dst = params.genNewCall(Action::kFromCOO, coo); @@ -1284,7 +1283,7 @@ const Dimension dimRank = dstTp.getDimRank(); Value dst; // destination tensor - Value dstPerm; // destination tensor permutation (if sparse out) + Value dstDimToLvl; // destination tensor permutation (if sparse out) // A pointer to the value being inserted (if dense => sparse) Value elemPtr; // Memory that holds the dim-coords for destination tensor (if sparse out) @@ -1318,7 +1317,7 @@ dst = reshapeValuesToLevels(rewriter, loc, dstEnc, dimSizes, dst, dstDimCoords); } else { - dstPerm = params.getDim2LvlMap(); + dstDimToLvl = params.getDimToLvl(); elemPtr = genAllocaScalar(rewriter, loc, elemTp); } } else { @@ -1350,7 +1349,7 @@ // Case: sparse => sparse, except for annotated all dense. storeAll(builder, loc, dstDimCoords, dcvs); genAddEltCall(builder, loc, elemTp, dst, elemPtr, dstDimCoords, - dstPerm); + dstDimToLvl); } else { // Case: sparse => dense, or annotated all dense. const auto lcvs = allDense ? dcvs2lcvs(dcvs) : dcvs; @@ -1368,7 +1367,7 @@ Value val = genValueForDense(builder, loc, adaptedOp, dcvs); builder.create(loc, val, elemPtr); genAddEltCall(builder, loc, elemTp, dst, elemPtr, dstDimCoords, - dstPerm); + dstDimToLvl); } else { // Case: dense => dense, or annotated all dense. Value val = genValueForDense(builder, loc, adaptedOp, dcvs); @@ -1420,7 +1419,7 @@ Value src = adaptor.getOperands()[0]; SmallVector dimSizes = getDimSizes(rewriter, loc, srcTp, src); Value coo = NewCallParams(rewriter, loc) - .genBuffers(srcTp.withoutOrdering(), dimSizes) + .genBuffers(srcTp.withoutDimToLvl(), dimSizes) .genNewCall(Action::kToCOO, src); // Then output the tensor to external file with coordinates in the // externally visible lexicographic coordinate order. A sort is diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -128,13 +128,14 @@ // TODO: The dim level property of the COO type relies on input tensors, the // shape relies on the output tensor -static RankedTensorType -getUnorderedCOOFromTypeWithOrdering(RankedTensorType src, AffineMap ordering) { - return getCOOFromTypeWithOrdering(src, ordering, false); +static RankedTensorType getCOOType(const SparseTensorType &stt, bool ordered) { + return getCOOFromTypeWithOrdering(stt, stt.getDimToLvl(), ordered); } -static RankedTensorType getUnorderedCOOFromType(RankedTensorType src) { - return getCOOFromType(src, false); +static RankedTensorType getBufferType(const SparseTensorType &stt, + bool needTmpCOO) { + return needTmpCOO ? getCOOType(stt, /*ordered=*/false) + : stt.getRankedTensorType(); } /// Collects the dynamic dimension sizes for `tp` with the assumption that @@ -409,10 +410,9 @@ Value nnz = rewriter.create(loc, srcTensor); // Only need an unordered COO buffer if input and output are not sorted // in the same way. - Type bufferTp = - srcTp.isAllOrdered() && srcTp.isIdentity() && dstTp.isIdentity() - ? dstTp.getRankedTensorType() - : getUnorderedCOOFromType(dstTp); + Type bufferTp = getBufferType( + dstTp.withoutDimToLvl(), + !srcTp.isAllOrdered() || !srcTp.isIdentity() || !dstTp.isIdentity()); SmallVector dynSizes; Value buffer = rewriter .create(loc, bufferTp, dynSizes, Value(), @@ -520,10 +520,9 @@ Value nnz = rewriter.create(loc, srcTensor); // Only need a unordered COO buffer if input and output are not sorted // in the same way. - Type bufferTp = - srcTp.isAllOrdered() && srcTp.isIdentity() && dstTp.isIdentity() - ? dstTp.getRankedTensorType() - : getUnorderedCOOFromType(dstTp); + Type bufferTp = getBufferType( + dstTp.withoutDimToLvl(), + !srcTp.isAllOrdered() || !srcTp.isIdentity() || !dstTp.isIdentity()); Value buffer = rewriter @@ -646,12 +645,12 @@ Value annotatedDenseDst; if (dstTp.hasEncoding()) { bool allOrdered = false; - // When concatenating on dimension 0, and all inputs are sorted and have - // an identity dimOrdering, the concatenate will generate coords in - // lexOrder thus no need for the tmp COO buffer. + // When concatenating on dimension 0, and all inputs are sorted + // and have an identity dimToLvl, the concatenate will generate + // coords in lexOrder thus no need for the tmp COO buffer. // TODO: When conDim != 0, as long as conDim is the first dimension // in all input/output buffers, and all input/output buffers have the same - // dimOrdering, the tmp COO buffer is still unnecessary (e.g, concatenate + // dimToLvl, the tmp COO buffer is still unnecessary (e.g, concatenate // CSC matrices along column). if (!allDense && conDim == 0 && dstTp.isIdentity()) { for (auto i : op.getInputs()) { @@ -663,8 +662,8 @@ } needTmpCOO = !allDense && !allOrdered; - const RankedTensorType tp = needTmpCOO ? getUnorderedCOOFromType(dstTp) - : dstTp.getRankedTensorType(); + const RankedTensorType tp = + getBufferType(dstTp.withoutDimToLvl(), needTmpCOO); encDst = needTmpCOO ? getSparseTensorEncoding(tp) : encDst; SmallVector dynSizes; getDynamicSizes(dstTp, sizes, dynSizes); @@ -829,16 +828,20 @@ // COO tensor. // TODO: enhance foreachOp to take ordering to remove the need of a // temporary COO tensor here. - const RankedTensorType bufferTp = dstTp.isIdentity() || fromSparseConst - ? dstTp.getRankedTensorType() - : getUnorderedCOOFromTypeWithOrdering( - dstTp, dstTp.getDimToLvlMap()); + const RankedTensorType bufferTp = + getBufferType(dstTp, !dstTp.isIdentity() && !fromSparseConst); // Only imposes foreach order on dense constant (which will be statically // sorted by the sparse compiler), otherwise the rotated loop sequence // results to bad cache locality. - AffineMapAttr foreachOrder = nullptr; - if (encDst.getDimOrdering() && fromSparseConst) - foreachOrder = AffineMapAttr::get(encDst.getDimOrdering()); + const AffineMapAttr foreachOrder = + (!dstTp.isIdentity() && fromSparseConst) + ? AffineMapAttr::get(dstTp.getExpandedDimToLvl()) + : nullptr; + // TODO: This assertion is to match the behavior from before we merged + // dimOrdering and higherOrdering into dimToLvl. Although the above + // can construct `foreachOrder` for non-permutations, it's not clear + // that the `foreachOp` below actually supports non-permutations. + assert(!foreachOrder || dstTp.isPermutation()); auto buffer = rewriter.create(loc, bufferTp, dynSizes).getResult(); @@ -948,17 +951,16 @@ // 1. the src tensor is not a COO and // 2. the src tensor is not ordered in the same way as the target // tensor (e.g., src tensor is not ordered or src tensor haves a different - // dimOrdering). + // dimToLvl). if (const SparseTensorType srcTp(srcRTT); - !(srcTp.isAllOrdered() && srcTp.hasSameDimToLvlMap(dstTp))) { + !(srcTp.isAllOrdered() && srcTp.hasSameDimToLvl(dstTp))) { // Construct a COO tensor from the src tensor. // TODO: there may be cases for which more efficiently without // going through an intermediate COO, such as cases that only change // the overhead types. SmallVector dynSrcSizes; getDynamicSizes(srcRTT, srcSizes, dynSrcSizes); - srcRTT = - getUnorderedCOOFromTypeWithOrdering(srcRTT, dstTp.getDimToLvlMap()); + srcRTT = getCOOType(srcTp.withDimToLvl(dstTp), /*ordered=*/false); // Ensure that mutating `srcRTT` didn't invalidate `dimRank`. assert(static_cast(srcRTT.getRank()) == dimRank); tmpCoo = rewriter @@ -993,7 +995,7 @@ // Sort the COO tensor so that its elements are ordered via increasing // coordinates for the storage ordering of the dst tensor. Use SortCoo // if the COO tensor has the same ordering as the dst tensor. - if (dimRank > 1 && srcTp.hasSameDimToLvlMap(dstTp)) { + if (dimRank > 1 && srcTp.hasSameDimToLvl(dstTp)) { Value xs = genToCoordinatesBuffer(rewriter, loc, src); rewriter.create( loc, nnz, xs, ValueRange{y}, rewriter.getIndexAttr(dimRank), @@ -1172,8 +1174,7 @@ // Implement the NewOp as follows: // %orderedCoo = sparse_tensor.new %filename // %t = sparse_tensor.convert %orderedCoo - RankedTensorType cooTp = - getCOOFromTypeWithOrdering(dstTp, encDst.getDimOrdering(), true); + RankedTensorType cooTp = getCOOType(dstTp, /*ordered=*/true); Value cooTensor = rewriter.create(loc, cooTp, op.getSource()); Value convert = rewriter.replaceOpWithNewOp( op, dstTp.getRankedTensorType(), cooTensor); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -1920,11 +1920,14 @@ // especially if it is a direct yield! // auto srcTp = getRankedTensorType(tval); - auto dstEnc = SparseTensorEncodingAttr::get( - getContext(), srcEnc.getLvlTypes(), - permute(env, env.op().getMatchingIndexingMap(t)), // new order - srcEnc.getHigherOrdering(), srcEnc.getPosWidth(), - srcEnc.getCrdWidth()); + // TODO: This assertion is to match the behavior from prior to + // merging dimOrdering and higherOrdering into dimToLvl. However, + // since `permute` returns a permutation, we can remove this + // restriction by instead composing the result of `permute` + // with `srcEnc.getDimToLvl`. + assert(srcEnc.isPermutation()); + auto dstEnc = + srcEnc.withDimToLvl(permute(env, env.op().getMatchingIndexingMap(t))); auto dstTp = RankedTensorType::get(srcTp.getShape(), srcTp.getElementType(), dstEnc); auto convert = rewriter.create(tval.getLoc(), dstTp, tval); diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c --- a/mlir/test/CAPI/sparse_tensor.c +++ b/mlir/test/CAPI/sparse_tensor.c @@ -26,8 +26,7 @@ const char *originalAsm = "#sparse_tensor.encoding<{ " "lvlTypes = [ \"dense\", \"compressed\", \"compressed\"], " - "dimOrdering = affine_map<(d0, d1, d2) -> (d0, d1, d2)>, " - "higherOrdering = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, " + "dimToLvl = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, " "posWidth = 32, crdWidth = 64 }>"; // clang-format on MlirAttribute originalAttr = @@ -35,14 +34,10 @@ // CHECK: isa: 1 fprintf(stderr, "isa: %d\n", mlirAttributeIsASparseTensorEncodingAttr(originalAttr)); - MlirAffineMap dimOrdering = - mlirSparseTensorEncodingAttrGetDimOrdering(originalAttr); - // CHECK: (d0, d1, d2) -> (d0, d1, d2) - mlirAffineMapDump(dimOrdering); - MlirAffineMap higherOrdering = - mlirSparseTensorEncodingAttrGetHigherOrdering(originalAttr); + MlirAffineMap dimToLvl = + mlirSparseTensorEncodingAttrGetDimToLvl(originalAttr); // CHECK: (d0, d1)[s0] -> (s0, d0, d1) - mlirAffineMapDump(higherOrdering); + mlirAffineMapDump(dimToLvl); // CHECK: level_type: 4 // CHECK: level_type: 8 // CHECK: level_type: 8 @@ -61,7 +56,7 @@ fprintf(stderr, "crdWidth: %d\n", crdWidth); MlirAttribute newAttr = mlirSparseTensorEncodingAttrGet( - ctx, lvlRank, lvlTypes, dimOrdering, higherOrdering, posWidth, crdWidth); + ctx, lvlRank, lvlTypes, dimToLvl, posWidth, crdWidth); mlirAttributeDump(newAttr); // For debugging filecheck output. // CHECK: equal: 1 fprintf(stderr, "equal: %d\n", mlirAttributeEqual(originalAttr, newAttr)); diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -32,7 +32,7 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i, j) -> (j, i)> + dimToLvl = affine_map<(i, j) -> (j, i)> }> #DCSR = #sparse_tensor.encoding<{ @@ -43,7 +43,7 @@ #Dense3D = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ], - dimOrdering = affine_map<(i, j, k) -> (k, i, j)> + dimToLvl = affine_map<(i, j, k) -> (k, i, j)> }> #Coo = #sparse_tensor.encoding<{ @@ -52,7 +52,7 @@ #CooPNo = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-no" ], - dimOrdering = affine_map<(i, j) -> (j, i)> + dimToLvl = affine_map<(i, j) -> (j, i)> }> #ccoo = #sparse_tensor.encoding<{ @@ -189,7 +189,7 @@ // // Querying for dimension 1 in the tensor type needs to be permuted // into querying for dimension 2 in the stored sparse tensor scheme, -// since the latter honors the dimOrdering. +// since the latter honors the dimToLvl mapping. // // CHECK-LABEL: func @sparse_dense_3d_dyn( // CHECK-SAME: %[[A0:.*]]: memref, diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir --- a/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_dealloc.mlir @@ -9,7 +9,7 @@ #CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -22,12 +22,12 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #SparseTensor = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed", "compressed"], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> // CHECK-LABEL: func @sparse_nop( diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir --- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir @@ -12,12 +12,12 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i, j) -> (j, i)> + dimToLvl = affine_map<(i, j) -> (j, i)> }> #SparseTensor = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed", "compressed"], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> // CHECK-LABEL: func @sparse_convert_1d( diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2dense.mlir @@ -13,7 +13,7 @@ #SparseTensor = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed", "compressed"], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> // CHECK-LABEL: func @sparse_convert_1d( diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir @@ -36,7 +36,7 @@ #TsssPermuted = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> #COOSlice = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir @@ -6,12 +6,14 @@ // ----- +// expected-error@+2 {{dimension-rank mismatch between encoding and tensor shape: 2 != 1}} #a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> -func.func private @tensor_dimlevel_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for lvlTypes}} +func.func private @tensor_dimlevel_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // ----- -#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimOrdering = affine_map<(i) -> (i)>}> // expected-error {{level-rank mismatch between dimOrdering and lvlTypes}} +// expected-error@+1 {{level-rank mismatch between dimToLvl and lvlTypes: 1 != 2}} +#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimToLvl = affine_map<(i) -> (i)>}> func.func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> () // ----- @@ -26,18 +28,13 @@ // ----- -#a = #sparse_tensor.encoding<{dimOrdering = "wrong"}> // expected-error {{expected an affine map for dimension ordering}} -func.func private @tensor_dimorder_mismatch(%arg0: tensor<8xi32, #a>) -> () +#a = #sparse_tensor.encoding<{dimToLvl = "wrong"}> // expected-error {{expected an affine map for dimToLvl}} +func.func private @tensor_dimtolvl_mismatch(%arg0: tensor<8xi32, #a>) -> () // ----- -#a = #sparse_tensor.encoding<{higherOrdering = "wrong"}> // expected-error {{expected an affine map for higher ordering}} -func.func private @tensor_highorder_mismatch(%arg0: tensor<8xi32, #a>) -> () - -// ----- - -// expected-error@+1 {{expected a permutation affine map for dimension ordering}} -#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimOrdering = affine_map<(i,j) -> (i,i)>}> +// expected-error@+1 {{expected a permutation affine map for dimToLvl}} +#a = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"], dimToLvl = affine_map<(i,j) -> (i,i)>}> func.func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> () // ----- @@ -67,11 +64,6 @@ // ----- -#a = #sparse_tensor.encoding<{lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, higherOrdering = affine_map<(i, j) -> (j, i)>}> // expected-error {{unexpected higher ordering mapping from 2 to 2}} -func.func private @tensor_invalid_key(%arg0: tensor<10x60xf32, #a>) -> () - -// ----- - #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], slice = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}} diff --git a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir --- a/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir +++ b/mlir/test/Dialect/SparseTensor/one_shot_bufferize_tensor_copy_insertion.mlir @@ -3,7 +3,7 @@ #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> // CHECK-LABEL: func @bufferization_alloc_tensor diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir @@ -7,7 +7,7 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i, j) -> (j, i)> + dimToLvl = affine_map<(i, j) -> (j, i)> }> #COO = #sparse_tensor.encoding<{ @@ -26,8 +26,8 @@ } // CHECK-LABEL: func.func @sparse_new_csc( -// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor (d1, d0)> }>> { -// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor (d1, d0)> }>> +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor (d1, d0)> }>> { +// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor (d1, d0)> }>> // CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]] // CHECK: bufferization.dealloc_tensor %[[COO]] // CHECK: return %[[R]] diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -8,7 +8,7 @@ #CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)>, + dimToLvl = affine_map<(i,j) -> (i,j)>, posWidth = 64, crdWidth = 64 }> @@ -21,26 +21,26 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)>, + dimToLvl = affine_map<(i,j) -> (j,i)>, posWidth = 0, crdWidth = 0 }> // CHECK-LABEL: func private @sparse_csc( -// CHECK-SAME: tensor (d1, d0)> }>>) +// CHECK-SAME: tensor (d1, d0)> }>>) func.func private @sparse_csc(tensor) // ----- #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)>, + dimToLvl = affine_map<(i,j) -> (j,i)>, posWidth = 0, crdWidth = 64 }> // CHECK-LABEL: func private @sparse_dcsc( -// CHECK-SAME: tensor (d1, d0)>, crdWidth = 64 }>>) +// CHECK-SAME: tensor (d1, d0)>, crdWidth = 64 }>>) func.func private @sparse_dcsc(tensor) // ----- @@ -77,12 +77,11 @@ #BCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], - dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, - higherOrdering = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> + dimToLvl = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> }> // CHECK-LABEL: func private @sparse_bcsr( -// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], higherOrdering = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> +// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>) @@ -90,12 +89,11 @@ #ELL = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], - dimOrdering = affine_map<(ii, i, j) -> (ii, i, j)>, - higherOrdering = affine_map<(i,j)[c] -> (c*4*i, i, j)> + dimToLvl = affine_map<(i,j)[c] -> (c*4*i, i, j)> }> // CHECK-LABEL: func private @sparse_ell( -// CHECK-SAME: tensor (d0 * (s0 * 4), d0, d1)> }>> +// CHECK-SAME: tensor (d0 * (s0 * 4), d0, d1)> }>> func.func private @sparse_ell(tensor) // ----- diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir @@ -4,12 +4,12 @@ #SparseMatrix_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #SparseMatrix_D_P = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // CHECK-LABEL: func.func @concat_mix_dense( diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir @@ -5,7 +5,7 @@ #DENSE = #sparse_tensor.encoding<{lvlTypes = ["dense", "dense"]}> #DENSE_P = #sparse_tensor.encoding<{ lvlTypes = ["dense", "dense"], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // CHECK-LABEL: @concat_sparse_sparse( // CHECK-SAME: %[[TMP_arg0:.*]]: tensor<2x4xf64, #sparse_tensor @@ -417,7 +417,7 @@ // CHECK: } // CHECK: } // CHECK: %[[R:.*]] = sparse_tensor.convert %[[TMP_0]] -// CHECK: return %[[R]] : tensor (d1, d0)> }>> +// CHECK: return %[[R]] : tensor (d1, d0)> }>> func.func @concat_sparse_sparse_annotated_dense_permute(%arg0: tensor<2x4xf64, #DCSR>, %arg1: tensor<3x4xf64, #DCSR>, %arg2: tensor<4x4xf64, #DCSR>) diff --git a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_expand.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_expand.mlir @@ -13,12 +13,12 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #SV = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir @@ -10,7 +10,7 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #trait_matvec = { @@ -24,15 +24,15 @@ } // CHECK-HIR-LABEL: func @matvec( -// CHECK-HIR-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>>, +// CHECK-HIR-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>, // CHECK-HIR-SAME: %[[VAL_1:.*]]: tensor<64xf64>, // CHECK-HIR-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-HIR-DAG: %[[VAL_3:.*]] = arith.constant 64 : index // CHECK-HIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-HIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> // CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir @@ -6,7 +6,7 @@ #CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> // diff --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir @@ -2,12 +2,12 @@ #CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> #SparseTensor = #sparse_tensor.encoding<{ diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir @@ -3,7 +3,7 @@ #X = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> #trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir @@ -5,7 +5,7 @@ #X = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "dense" ], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> #trait = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir @@ -20,12 +20,12 @@ // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.convert %[[VAL_0]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.convert %[[VAL_0]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_3]]) -> (tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) { @@ -42,7 +42,7 @@ // CHECK: scf.yield %[[VAL_25:.*]] : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } // CHECK: %[[VAL_26:.*]] = sparse_tensor.load %[[VAL_27:.*]] hasInserts : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> -// CHECK: bufferization.dealloc_tensor %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> +// CHECK: bufferization.dealloc_tensor %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> // CHECK: return %[[VAL_26]] : tensor<4x3xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // CHECK: } func.func @sparse_transpose_auto(%arga: tensor<3x4xf64, #DCSR>) diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_concat.mlir @@ -6,12 +6,12 @@ #MAT_C_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir @@ -31,22 +31,22 @@ #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ lvlTypes = ["dense", "dense"], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir @@ -31,22 +31,22 @@ #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ lvlTypes = ["dense", "dense"], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir @@ -21,22 +21,22 @@ #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ lvlTypes = ["dense", "dense"], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir @@ -31,22 +31,22 @@ #MAT_C_D = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #MAT_D_D = #sparse_tensor.encoding<{ lvlTypes = ["dense", "dense"], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_C_D_P = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #MAT_D_C_P = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -32,12 +32,12 @@ #DenseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> #SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> #trait_assign = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir @@ -36,7 +36,7 @@ #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #SortedCOO = #sparse_tensor.encoding<{ @@ -45,12 +45,12 @@ #SortedCOOPerm = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #CCCPerm = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed"], - dimOrdering = affine_map<(d0, d1, d2) -> (d1, d2, d0)> + dimToLvl = affine_map<(d0, d1, d2) -> (d1, d2, d0)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir @@ -32,7 +32,7 @@ #CDC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] // FIXME: Still inadmissible might need investigation - // dimOrdering = affine_map<(i,j,k) -> (j,k,i)> + // dimToLvl = affine_map<(i,j,k) -> (j,k,i)> }> // Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir @@ -31,7 +31,7 @@ #CDR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "dense"]}> #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // An example of a 2D convolution with a sparse filter. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -28,17 +28,17 @@ #Tensor1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (i,j,k)> + dimToLvl = affine_map<(i,j,k) -> (i,j,k)> }> #Tensor2 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (j,k,i)> + dimToLvl = affine_map<(i,j,k) -> (j,k,i)> }> #Tensor3 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -32,7 +32,7 @@ #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir @@ -32,7 +32,7 @@ #Tensor3 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (i,k,j)> + dimToLvl = affine_map<(i,j,k) -> (i,k,j)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -34,14 +34,14 @@ #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)>, + dimToLvl = affine_map<(i,j) -> (j,i)>, posWidth = 64, crdWidth = 64 }> #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)>, + dimToLvl = affine_map<(i,j) -> (j,i)>, posWidth = 16, crdWidth = 32 }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -28,32 +28,32 @@ #Tensor1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (i,j,k)> + dimToLvl = affine_map<(i,j,k) -> (i,j,k)> }> #Tensor2 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (j,k,i)> + dimToLvl = affine_map<(i,j,k) -> (j,k,i)> }> #Tensor3 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> #Tensor4 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (i,j,k)> + dimToLvl = affine_map<(i,j,k) -> (i,j,k)> }> #Tensor5 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (j,k,i)> + dimToLvl = affine_map<(i,j,k) -> (j,k,i)> }> #Tensor6 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir @@ -39,7 +39,7 @@ #Tensor3 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], - dimOrdering = affine_map<(i,j,k) -> (i,k,j)> + dimToLvl = affine_map<(i,j,k) -> (i,k,j)> }> #SingletonTensor1 = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir @@ -27,7 +27,7 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -33,10 +33,10 @@ #SparseTensor = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed", "compressed", "compressed", "compressed", "compressed", "compressed" ], - // Note that any dimOrdering permutation should give the same results + // Note that any dimToLvl permutation should give the same results // since, even though it impacts the sparse storage scheme layout, // it should not change the semantics. - dimOrdering = affine_map<(i,j,k,l,m,n,o,p) -> (p,o,j,k,i,l,m,n)> + dimToLvl = affine_map<(i,j,k,l,m,n,o,p) -> (p,o,j,k,i,l,m,n)> }> #trait_flatten = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -38,12 +38,12 @@ #CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -32,7 +32,7 @@ #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (i,j)> + dimToLvl = affine_map<(i,j) -> (i,j)> }> #eltwise_mult = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir @@ -32,7 +32,7 @@ #CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir @@ -23,7 +23,7 @@ #CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir @@ -26,7 +26,7 @@ #CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir @@ -34,7 +34,7 @@ #SortedCOOPermuted = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #SortedCOO3D = #sparse_tensor.encoding<{ @@ -43,7 +43,7 @@ #SortedCOO3DPermuted = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], - dimOrdering = affine_map<(i,j,k) -> (k,i,j)> + dimToLvl = affine_map<(i,j,k) -> (k,i,j)> }> #trait_scale = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -44,12 +44,12 @@ #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #BlockRow = #sparse_tensor.encoding<{ @@ -58,7 +58,7 @@ #BlockCol = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> // diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir @@ -32,7 +32,7 @@ #DCSC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)> + dimToLvl = affine_map<(i,j) -> (j,i)> }> #transpose_trait = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py @@ -145,8 +145,7 @@ for pwidth in [32]: for iwidth in [32]: for e in [True]: - attr = st.EncodingAttr.get(level, ordering, None, pwidth, - iwidth) + attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth) opt = (f'parallelization-strategy=none') compiler = sparse_compiler.SparseCompiler( options=opt, opt_level=0, shared_libs=[support_lib]) diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py @@ -139,7 +139,7 @@ for ordering in orderings: for pwidth in bitwidths: for iwidth in bitwidths: - attr = st.EncodingAttr.get(level, ordering, None, pwidth, iwidth) + attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth) build_compile_and_run_SpMM(attr, compiler) count = count + 1 # CHECK: Passed 8 tests diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py @@ -89,7 +89,7 @@ for level in levels: for ordering in orderings: for bwidth in bitwidths: - attr = st.EncodingAttr.get(level, ordering, None, bwidth, bwidth) + attr = st.EncodingAttr.get(level, ordering, bwidth, bwidth) build_compile_and_run_output(attr, compiler) count = count + 1 diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py @@ -214,7 +214,7 @@ for ordering in orderings: for pwidth in bitwidths: for iwidth in bitwidths: - attr = st.EncodingAttr.get(level, ordering, None, pwidth, iwidth) + attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth) types.append(ir.RankedTensorType.get(shape, f64, attr)) # # For exhaustiveness we should have one or more StressTest, such diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py @@ -366,7 +366,7 @@ mlir_storage_format = [f.value for f in self.format_pack.formats] return sparse_tensor.EncodingAttr.get(mlir_storage_format, ir.AffineMap.get_permutation(order), - None, _POS_WIDTH, _CRD_WIDTH) + _POS_WIDTH, _CRD_WIDTH) def _make_format(formats: List[ModeFormat], diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py --- a/mlir/test/python/dialects/sparse_tensor/dialect.py +++ b/mlir/test/python/dialects/sparse_tensor/dialect.py @@ -27,14 +27,14 @@ # CHECK: lvl_types: [] print(f"lvl_types: {casted.lvl_types}") - # CHECK: dim_ordering: None - print(f"dim_ordering: {casted.dim_ordering}") + # CHECK: dim_to_lvl: None + print(f"dim_to_lvl: {casted.dim_to_lvl}") # CHECK: pos_width: 16 print(f"pos_width: {casted.pos_width}") # CHECK: crd_width: 32 print(f"crd_width: {casted.crd_width}") - created = st.EncodingAttr.get(casted.lvl_types, None, None, 0, 0) + created = st.EncodingAttr.get(casted.lvl_types, None, 0, 0) # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> print(created) # CHECK: created_equal: False @@ -53,11 +53,11 @@ with Context() as ctx: parsed = Attribute.parse('#sparse_tensor.encoding<{' ' lvlTypes = [ "dense", "compressed" ],' - ' dimOrdering = affine_map<(d0, d1) -> (d1, d0)>,' + ' dimToLvl = affine_map<(d0, d1) -> (d1, d0)>,' ' posWidth = 8,' ' crdWidth = 32' '}>') - # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> print(parsed) casted = st.EncodingAttr(parsed) @@ -66,16 +66,15 @@ # CHECK: lvl_types: [, ] print(f"lvl_types: {casted.lvl_types}") - # CHECK: dim_ordering: (d0, d1) -> (d1, d0) - print(f"dim_ordering: {casted.dim_ordering}") + # CHECK: dim_to_lvl: (d0, d1) -> (d1, d0) + print(f"dim_to_lvl: {casted.dim_to_lvl}") # CHECK: pos_width: 8 print(f"pos_width: {casted.pos_width}") # CHECK: crd_width: 32 print(f"crd_width: {casted.crd_width}") - created = st.EncodingAttr.get(casted.lvl_types, casted.dim_ordering, - casted.higher_ordering, 8, 32) - # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> + created = st.EncodingAttr.get(casted.lvl_types, casted.dim_to_lvl, 8, 32) + # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> print(created) # CHECK: created_equal: True print(f"created_equal: {created == casted}")