diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h b/mlir/include/mlir-c/Dialect/SparseTensor.h --- a/mlir/include/mlir-c/Dialect/SparseTensor.h +++ b/mlir/include/mlir-c/Dialect/SparseTensor.h @@ -41,40 +41,40 @@ // SparseTensorEncodingAttr //===----------------------------------------------------------------------===// -/// Checks whether the given attribute is a sparse_tensor.encoding attribute. +/// Checks whether the given attribute is a `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr); -/// Creates a sparse_tensor.encoding attribute with the given parameters. +/// Creates a `sparse_tensor.encoding` attribute with the given parameters. MLIR_CAPI_EXPORTED MlirAttribute mlirSparseTensorEncodingAttrGet( - MlirContext ctx, intptr_t numDimLevelTypes, + MlirContext ctx, intptr_t lvlRank, enum MlirSparseTensorDimLevelType const *dimLevelTypes, - MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, - int pointerBitWidth, int indexBitWidth); + MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int posWidth, + int crdWidth); -/// Returns the number of dim level types in a sparse_tensor.encoding attribute. +/// Returns the level-rank of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED intptr_t -mlirSparseTensorEncodingGetNumDimLevelTypes(MlirAttribute attr); +mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr); -/// Returns a specified dim level type in a sparse_tensor.encoding attribute. +/// Returns a specified level-type of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED enum MlirSparseTensorDimLevelType -mlirSparseTensorEncodingAttrGetDimLevelType(MlirAttribute attr, intptr_t pos); +mlirSparseTensorEncodingAttrGetDimLevelType(MlirAttribute attr, intptr_t lvl); -/// Returns the dimension ordering in a sparse_tensor.encoding attribute. +/// Returns the dimension-ordering of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED MlirAffineMap mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr); -/// Returns the higher ordering in a sparse_tensor.encoding attribute. +/// Returns the higher-ordering of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED MlirAffineMap mlirSparseTensorEncodingAttrGetHigherOrdering(MlirAttribute attr); -/// Returns the pointer bit width in a sparse_tensor.encoding attribute. +/// Returns the position bitwidth of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED int -mlirSparseTensorEncodingAttrGetPointerBitWidth(MlirAttribute attr); +mlirSparseTensorEncodingAttrGetPosWidth(MlirAttribute attr); -/// Returns the index bit width in a sparse_tensor.encoding attribute. +/// Returns the coordinate bitwidth of the `sparse_tensor.encoding` attribute. MLIR_CAPI_EXPORTED int -mlirSparseTensorEncodingAttrGetIndexBitWidth(MlirAttribute attr); +mlirSparseTensorEncodingAttrGetCrdWidth(MlirAttribute attr); #ifdef __cplusplus } diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h @@ -40,12 +40,12 @@ /// This type is used in the public API at all places where MLIR expects /// values with the built-in type "index". For now, we simply assume that -/// type is 64-bit, but targets with different "index" bit widths should +/// type is 64-bit, but targets with different "index" bitwidths should /// link with an alternatively built runtime support library. // TODO: support such targets? using index_type = uint64_t; -/// Encoding of overhead types (both pointer overhead and indices +/// Encoding of overhead types (both position overhead and coordinate /// overhead), for "overloading" @newSparseTensor. enum class OverheadType : uint32_t { kIndex = 0, diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -19,6 +19,41 @@ list traits = []> : AttrDef; +//===----------------------------------------------------------------------===// +// Type aliases. +// +// These attributes are just like `IndexAttr` (include/mlir/IR/OpBase.td), +// except that: +// (1) the `summary` is more specific (i.e., the fourth parameter to +// `TypedAttrBase`), which helps tablegen provide better error messages. +// (2) tablegen-generated getters will have the given `returnType`, in +// lieu of the `APInt` that `IndexAttr` uses. This avoids the boilerplate +// of needing to say `get{FOO}().getZExtValue()`, as well as using +// C++ types which better document intent. +//===----------------------------------------------------------------------===// + +def DimensionAttr : + TypedAttrBase< + Index, "IntegerAttr", + And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">, + CPred<"$_self.cast<::mlir::IntegerAttr>().getType()" + ".isa<::mlir::IndexType>()">]>, + "dimension attribute"> { + let returnType = [{::mlir::sparse_tensor::Dimension}]; + let convertFromStorage = [{$_self.getValue().getZExtValue()}]; +} + +def LevelAttr : + TypedAttrBase< + Index, "IntegerAttr", + And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">, + CPred<"$_self.cast<::mlir::IntegerAttr>().getType()" + ".isa<::mlir::IndexType>()">]>, + "level attribute"> { + let returnType = [{::mlir::sparse_tensor::Level}]; + let convertFromStorage = [{$_self.getValue().getZExtValue()}]; +} + //===----------------------------------------------------------------------===// // Sparse Tensor Dimension Slice Attribute. //===----------------------------------------------------------------------===// @@ -92,73 +127,86 @@ The attribute consists of the following fields. - - Dimension level type for each dimension of a tensor type: - - **dense** : dimension is dense, all entries along this dimension - are stored - - **compressed** : dimension is sparse, only nonzeros along this dimensions - are stored - - **singleton** : dimension stores individual indices with no siblings - By default, each dimension level types has the property of being unique - (no duplicates at that level) and ordered (indices appear sorted at that - level). The following two suffixes can be used to make the last two - dimension level types not-unique (duplicates may appear) and not-ordered - (indices may appear unsorted). + - Level-type for each level of a tensor type: + - **dense** : all entries along this level are stored. + - **compressed** : only nonzeros along this level are stored. + - **singleton** : a variant of the compressed level-format, + for when coordinates are guaranteed to have no siblings at this level. + By default, each level-type has the property of being unique (no + duplicates at that level) and ordered (coordinates appear sorted + at that level). The following two suffixes can be used to specify + that the level should instead be non-unique (duplicates may appear) + and/or non-ordered (coordinates may appear unsorted). - **-nu** : not unique - **-no** : not ordered - Currently, these suffixes, is present, should appear in this order. - In the future, we may introduce many more dimension level types and - properties, and separate specifying the two completely rather than - using this suffix mechanism. - - - An optional dimension ordering on the indices of this tensor type. Unlike - dense storage, most sparse storage schemes do not provide fast random - access. This affine map specifies the order of dimensions that should be - supported by the sparse storage scheme. For example, for a 2-d tensor, - `(i, j) -> (i, j)` requests row-wise storage and `(i, j) -> (j, i)` - requests column-wise storage. By default, an identify mapping is used, - which implies that the original indices directly correspond to stored - indices. - - - An optional higher-ordering mapping from the original index space of - the tensor to a higher-order index space, used to define block-sparse - storage or ELL (jagged diagonal) storage. For example, for a 2-d tensor, - the mapping `(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)` - imposes an higher-order partitioning into 2x3 blocks along the matrix - layout. A dimension ordering can be used to define a desired ordering - on this higher-order index space. Likewise, the dimension level types - define dense or compressed storage along this higher-order index space. - For block-sparse, blocks are typically stored with compression while - dense storage is used within each block (although hybrid schemes are - possible as well). The higher-order mapping also provides a notion of - "counting a dimension", where every stored element with the same index - is mapped to a new slice. For instance, ELL storage of a 2-d tensor can - be defined with the mapping `(i, j) -> (#i, i, j)` using the notation - of [Chou20]. Lacking the `#` symbol in MLIR's affine mapping, we use - a free symbol `c` to define such counting, together with a constant - that denotes the number of resulting slices. For example, the mapping - `(i, j)[c] -> (c * 3 * i, i, j)` with the first two higher-order indices - stored dense and the innermost compressed denotes ELL storage with - three jagged diagonals that count the dimension `i`. - - TODO: introduce a real counting symbol to MLIR's mapping, since an - expression like 3*c*i has no direct interpretation? - - - The required bit width for "pointer" storage (integral offsets into - the sparse storage scheme). A narrow width reduces the memory footprint - of overhead storage, as long as the width suffices to define the total - required range (viz. the maximum number of stored entries over all indirection - dimensions). The choices are `8`, `16`, `32`, `64`, or, the default, `0` to - indicate the native bit width. - - - The required bit width for "index" storage (elements of the coordinates of - stored entries). A narrow width reduces the memory footprint of overhead - storage, as long as the width suffices to define the total required range - (viz. the maximum value of each tensor index over all dimensions). The - choices are `8`, `16`, `32`, `64`, or, the default, `0` to indicate a - native bit width. - - - An optional array of SparseTensorDimSliceAttr, which specifies how the sparse - tensor is partitioned on each level. + Currently, these suffixes (if present) must appear in this order. + In the future, we may introduce additional level-types and + properties, and split up how the level-format and properties are + specified rather than using this suffix mechanism. + + TODO: This field is called "dimLevelType" for historical reasons, + even though the types are per-level rather than per-dimension. + (This will be corrected in an upcoming change that completely + overhauls the syntax of this attribute.) + + - An optional permutation which maps (higher-ordering)-coordinates + to level-coordinates; defaulting to the identity permutation. + For example, given a 2-d tensor with the default higher-ordering, + `(i, j) -> (i, j)` specifies row-wise storage and `(i, j) -> + (j, i)` specifies column-wise storage. + + TODO: this field is called "dimOrdering" for historical reasons, + even though it actually operates on level-coordinates rather than + dimension-coordinates. + (This will be corrected in an upcoming change that completely + overhauls the syntax of this attribute.) + + - An optional higher-order mapping from dimension-coordinates to + a higher-order coordinate space; defaulting to the identity map. + This is applied before the `dimOrdering`, thus we have the composite: + dimCoords --higherOrdering--> hoCoords --dimOrdering--> lvlCoords. + The higher-order mapping is used to define block-sparse storage, + jagged-diagonal (JDS/ELL/ITPACK) storage, etc. + + For example, given a 2-d tensor, the mapping + `(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)` + imposes an higher-order partitioning into 2x3 blocks along the + matrix layout. For block-sparsity, blocks are typically stored + with compression while dense storage is used within each block + (although hybrid schemes are possible as well). + + TODO: the following example is out-of-date and will be implemented + in a different manner than described here. + (This will be corrected in an upcoming change that completely + overhauls the syntax of this attribute.) + + The higher-order mapping also provides a notion of "counting a + dimension", where every stored element with the same coordinate + is mapped to a new slice. For instance, ELL storage of a 2-d + tensor can be defined with the mapping `(i, j) -> (#i, i, j)` + using the notation of [Chou20]. Lacking the `#` symbol in MLIR's + affine mapping, we use a free symbol `c` to define such counting, + together with a constant that denotes the number of resulting + slices. For example, the mapping `(i, j)[c] -> (c * 3 * i, i, j)` + with the level-types `["dense", "dense", "compressed"]` denotes ELL + storage with three jagged diagonals that count the dimension `i`. + + - The required bitwidth for "position" storage (integral offsets + into the sparse storage scheme). A narrow width reduces the memory + footprint of overhead storage, as long as the width suffices to + define the total required range (viz. the maximum number of stored + entries over all indirection levels). The choices are `8`, `16`, + `32`, `64`, or, the default, `0` to indicate the native bitwidth. + + - The required bitwidth for "coordinate" storage (the coordinates + of stored entries). A narrow width reduces the memory footprint + of overhead storage, as long as the width suffices to define + the total required range (viz. the maximum value of each tensor + coordinate over all levels). The choices are `8`, `16`, `32`, + `64`, or, the default, `0` to indicate a native bitwidth. + + - An optional array of `SparseTensorDimSliceAttr`, which specifies + how the sparse tensor is partitioned on each dimension. Examples: @@ -179,8 +227,8 @@ #DCSC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(i, j) -> (j, i)>, - pointerBitWidth = 32, - indexBitWidth = 8 + posWidth = 32, + crdWidth = 8 }> ... tensor<8x8xf64, #DCSC> ... @@ -214,20 +262,20 @@ // Data in sparse tensor encoding. let parameters = ( ins - // A dimension level type for each dimension of the tensor type. + // A level-type for each level of the sparse storage. ArrayRefParameter< "::mlir::sparse_tensor::DimLevelType", - "per dimension level type" + "level-types" >: $dimLevelType, - // A dimension order on the indices of this tensor type. + // A permutation from (higher-ordering)-coordinates to level-coordinates. "AffineMap":$dimOrdering, - // A mapping between the original and higher-ordering index space. + // A mapping from dimension-coordinates to (higher-ordering)-coordinates. "AffineMap":$higherOrdering, - // The required bit width for pointer storage. - "unsigned":$pointerBitWidth, - // The required bit width for index storage. - "unsigned":$indexBitWidth, - // A dimension level type for each dimension of the tensor type. + // The required bitwidth for position storage. + "unsigned":$posWidth, + // The required bitwidth for coordinate storage. + "unsigned":$crdWidth, + // A slice attribute for each dimension of the tensor type. ArrayRefParameter< "::mlir::sparse_tensor::SparseTensorDimSliceAttr", "per dimension slice metadata" @@ -238,23 +286,23 @@ AttrBuilder<(ins "ArrayRef<::mlir::sparse_tensor::DimLevelType>":$dimLevelType, "AffineMap":$dimOrdering, "AffineMap":$higherOrdering, - "unsigned":$pointerBitWidth, - "unsigned":$indexBitWidth), [{ + "unsigned":$posWidth, + "unsigned":$crdWidth), [{ return $_get($_ctxt, dimLevelType, dimOrdering, higherOrdering, - pointerBitWidth, - indexBitWidth, + posWidth, + crdWidth, ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr>{}); }]> ]; let extraClassDeclaration = [{ - /// Returns the type for pointer storage based on pointerBitWidth - Type getPointerType() const; + /// Returns the type for position storage based on posWidth + Type getPosType() const; - /// Returns the type for index storage based on indexBitWidth - Type getIndexType() const; + /// Returns the type for coordinate storage based on crdWidth + Type getCrdType() const; /// Constructs a new encoding with the dimOrdering and higherOrdering /// reset to the default/identity. @@ -316,9 +364,9 @@ // The C++ enum for Storage Specifier kind. def SparseTensorStorageSpecifierKindEnum : I32EnumAttr<"StorageSpecifierKind", "sparse tensor storage specifier kind", [ - I32EnumAttrCase<"DimSize", 0, "dim_sz">, - I32EnumAttrCase<"PtrMemSize", 1, "ptr_mem_sz">, - I32EnumAttrCase<"IdxMemSize", 2, "idx_mem_sz">, + I32EnumAttrCase<"LvlSize", 0, "lvl_sz">, + I32EnumAttrCase<"PosMemSize", 1, "pos_mem_sz">, + I32EnumAttrCase<"CrdMemSize", 2, "crd_mem_sz">, I32EnumAttrCase<"ValMemSize", 3, "val_mem_sz">, ]> { let genSpecializedAttr = 0; diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td @@ -20,7 +20,7 @@ types first class citizens within the MLIR compiler infrastructure. The dialect forms a bridge between high-level operations on sparse tensors types and lower-level operations on the actual sparse storage - schemes consisting of pointers, indices, and values. Lower-level + schemes consisting of positions, coordinates, and values. Lower-level support may consist of fully generated code or may be provided by means of a small sparse runtime support library. @@ -31,27 +31,29 @@ to tensor algebra by [Kjolstad17,Kjolstad20] in the Sparse Tensor Algebra Compiler (TACO) project (see http://tensor-compiler.org). - The MLIR implementation [Biketal22] closely follows the "sparse iteration - theory" that forms the foundation of TACO. A rewriting rule is applied to - each tensor expression in the Linalg dialect (MLIR's tensor index notation) - where the sparsity of tensors is indicated using the per-dimension level - types dense/compressed together with a specification of the order on the - dimensions (see [Chou18] for an in-depth discussions and possible - extensions to these level types). Subsequently, a topologically sorted - iteration graph, reflecting the required order on indices with respect - to the dimensions of each tensor, is constructed to ensure that all tensors - are visited in natural index order. Next, iteration lattices are - constructed for the tensor expression for every index in topological - order. Each iteration lattice point consists of a conjunction of tensor - indices together with a tensor (sub)expression that needs to be evaluated - for that conjunction. Within the lattice, iteration points are ordered - according to the way indices are exhausted. As such these iteration - lattices drive actual sparse code generation, which consists of a - relatively straightforward one-to-one mapping from iteration lattices - to combinations of for-loops, while-loops, and if-statements. Sparse - tensor outputs that materialize uninitialized are handled with direct - insertions if all parallel loops are outermost or insertions that - indirectly go through a 1-dimensional access pattern expansion + The MLIR implementation [Biketal22] closely follows the "sparse + iteration theory" that forms the foundation of TACO. A rewriting + rule is applied to each tensor expression in the Linalg dialect + (MLIR's tensor index notation) where the sparsity of tensors is + indicated using the per-level level-types (e.g., dense, compressed, + singleton) together with a specification of the order on the levels + (see [Chou18] for an in-depth discussions and possible extensions + to these level-types). Subsequently, a topologically sorted + iteration graph, reflecting the required order on coordinates with + respect to the levels of each tensor, is constructed to ensure + that all tensors are visited in natural level-coordinate order. + Next, iteration lattices are constructed for the tensor expression + for every index in topological order. Each iteration lattice point + consists of a conjunction of tensor coordinates together with a tensor + (sub)expression that needs to be evaluated for that conjunction. + Within the lattice, iteration points are ordered according to + the way coordinates are exhausted. As such these iteration + lattices drive actual sparse code generation, which consists of + a relatively straightforward one-to-one mapping from iteration + lattices to combinations of for-loops, while-loops, and if-statements. + Sparse tensor outputs that materialize uninitialized are handled with + direct insertions if all parallel loops are outermost or insertions + that indirectly go through a 1-dimensional access pattern expansion (a.k.a. workspace) where feasible [Gustavson72,Bik96,Kjolstad19]. * [Bik96] Aart J.C. Bik. Compiler Support for Sparse Matrix Computations. diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -54,82 +54,94 @@ } def SparseTensor_PackOp : SparseTensor_Op<"pack", [Pure]>, - Arguments<(ins 1DTensorOf<[AnyType]>:$data, - 2DTensorOf<[AnySignlessIntegerOrIndex]>:$indices)>, + Arguments<(ins 1DTensorOf<[AnyType]>:$values, + 2DTensorOf<[AnySignlessIntegerOrIndex]>:$coordinates)>, Results<(outs AnySparseTensor: $result)> { - let summary = "Returns a sparse tensor from the given (data, indices) pair"; + let summary = "Returns a sparse tensor from the given (values, coordinates) pair"; let description = [{ - Packs the data/indices into a COO sparse tensor. The coordinates in `indices` - shall not exceed the dimension sizes of the returned sparse tensor. Note - that the returned tensor must be statically shaped because it is impossible - to infer the shape from sparse coordinates. - - `$indices`: stored via a 2-D tensor of integer elements with shape [N, ndims], - which specifies the indices of the elements in the sparse tensor that contains - non-zero values. - - `$data`: stored via a 1-D tensor with shape [N], that supplies the corresponding - values for the indices. - - The operation can be used to materialize a sparse tensor from external sources. - E.g., when passing from Python as two numpy arrays for data and indices. + Packs the values/coordinates into a COO sparse tensor. The length + of `values` must match the outer-length of `coordinates`, since these + two tensors are "zipped" together. The `coordinates` argument provides + level-coords for each value, therefore, the inner-length of `coordinates` + must match the level-rank of the returned tensor, and each level-coords + must be valid for the level-sizes of the returned tensor. Note that + the returned tensor must be statically shaped because it is impossible + to infer the dimension-shape from level-coordinates alone. + + TODO: The returned tensor is allowed (in principle) to have non-identity + dimOrdering/higherOrdering mappings. However, the current implementation + does not yet support them. + + - `coordinates : tensor` + supplies the level-coords for each element in `values`. + - `values : tensor` + supplies the corresponding values for each entry in `coordinates`. + + This operation can be used to materialize a sparse tensor from external + sources; e.g., when passing two numpy arrays from Python. Example: - ```mlir - %data = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64> - %indices = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex> - %st = sparse_tensor.pack %data, %indices : tensor<3xf64>, tensor<3x2xindex - to tensor<3x4xf64, #COO> + ```mlir + %values = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64> + %coordinates = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex> + %st = sparse_tensor.pack %values, %coordinates + : tensor<3xf64>, tensor<3x2xindex> to tensor<3x4xf64, #COO> // yields COO format |1.1, 0.0, 0.0, 0.0| // of 3x4 matrix |0.0, 0.0, 2.2, 3.3| // |0.0, 0.0, 0.0, 0.0| ``` }]; - let assemblyFormat = "$data `,` $indices attr-dict `:` type($data) `,` type($indices)" - "`to` type($result)"; + let assemblyFormat = + "$values `,` $coordinates attr-dict" + "`:` type($values) `,` type($coordinates) `to` type($result)"; let hasVerifier = 1; } def SparseTensor_UnpackOp : SparseTensor_Op<"unpack", [Pure]>, Arguments<(ins AnySparseTensor:$tensor)>, - Results<(outs 1DTensorOf<[AnyType]>:$data, - 2DTensorOf<[AnySignlessIntegerOrIndex]>:$indices, - AnySignlessIntegerOrIndex:$nnz)> { - let summary = "Returns the (data, indices) pair unpacked from the input tensor"; + Results<(outs 1DTensorOf<[AnyType]>:$values, + 2DTensorOf<[AnySignlessIntegerOrIndex]>:$coordinates, + AnySignlessIntegerOrIndex:$nse)> { + let summary = "Returns the (values, coordinates) pair unpacked from the input tensor"; let description = [{ - Unpack is the inverse operation of `sparse_tensor::pack`. It returns the data/indices - extracted from a COO sparse tensor. Additionally, it also returns an integer value - indicating the number of entries in the source tensor. + The unpack operation is the inverse of `sparse_tensor::pack`. It returns + the values, level-coordinates, and number-of-stored-entries extracted + from the sparse tensor. The source tensor is allowed (in principle) + to have non-identity dimOrdering/higherOrdering mappings. Regardless + of the mappings, the returned `coordinates` are always level-coordinates, + because this is what we mean by "unpacking" as opposed to other forms + of exposing sparse tensors to external clients. This operation can be + used for returning an unpacked MLIR sparse tensor to frontend; e.g., + returning two numpy arrays to Python. - The operation can be used to return an unpacked MLIR sparse tensor to frontend. - E.g., returning two numpy arrays for data and indices. + TODO: the current implementation does not yet support non-identity mappings. - The unpack operation ends the life time of the sparse tensor, and using this - after the unpack is undefined behavior. + This operation ends the lifetime of the sparse tensor, and using + the tensor after the unpack is undefined behavior. Example: + ```mlir // input COO format |1.1, 0.0, 0.0, 0.0| // of 3x4 matrix |0.0, 0.0, 2.2, 3.3| // |0.0, 0.0, 0.0, 0.0| - %data, %indices, %nnz = sparse_tensor.unpack %st - : tensor<3x4xf64, #COO> - to tensor<2xf64>, tensor<2x2xindex>, index - - // %data = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64> - // %indices = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex> - // %nnz = 2 - + %values, %coordinates, %nse + = sparse_tensor.unpack %st + : tensor<3x4xf64, #COO> to tensor<2xf64>, tensor<2x2xindex>, index + // %values = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64> + // %coordinates = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex> + // %nse = 3 ``` }]; - let assemblyFormat = "$tensor attr-dict `:` type($tensor) " - "`to` type($data) `,` type($indices)`,` type($nnz)"; + let assemblyFormat = + "$tensor attr-dict `:` type($tensor)" + "`to` type($values) `,` type($coordinates) `,` type($nse)"; let hasVerifier = 1; } @@ -182,25 +194,25 @@ let hasVerifier = 1; } -def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [Pure]>, - Arguments<(ins AnySparseTensor:$tensor, IndexAttr:$dimension)>, +def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions", [Pure]>, + Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { - let summary = "Extracts pointers array at given dimension from a tensor"; + let summary = "Extracts the `level`-th positions array of the `tensor`"; let description = [{ - Returns the pointers array of the sparse storage format at the - given dimension for the given sparse tensor. This is similar to the - `bufferization.to_memref` operation in the sense that it provides a bridge - between a tensor world view and a bufferized world view. Unlike the - `bufferization.to_memref` operation, however, this sparse operation actually - lowers into code that extracts the pointers array from the sparse storage - scheme (either by calling a support library or through direct code). + Returns the positions array of the tensor's storage at the given + level. This is similar to the `bufferization.to_memref` operation + in the sense that it provides a bridge between a tensor world view + and a bufferized world view. Unlike the `bufferization.to_memref` + operation, however, this sparse operation actually lowers into code + that extracts the positions array from the sparse storage itself + (either by calling a support library or through direct code). Writing into the result of this operation is undefined behavior. Example: ```mlir - %1 = sparse_tensor.pointers %0 { dimension = 1 : index } + %1 = sparse_tensor.positions %0 { level = 1 : index } : tensor<64x64xf64, #CSR> to memref ``` }]; @@ -208,25 +220,25 @@ let hasVerifier = 1; } -def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [Pure]>, - Arguments<(ins AnySparseTensor:$tensor, IndexAttr:$dimension)>, +def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates", [Pure]>, + Arguments<(ins AnySparseTensor:$tensor, LevelAttr:$level)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { - let summary = "Extracts indices array at given dimension from a tensor"; + let summary = "Extracts the `level`-th coordinates array of the `tensor`"; let description = [{ - Returns the indices array of the sparse storage format at the - given dimension for the given sparse tensor. This is similar to the - `bufferization.to_memref` operation in the sense that it provides a bridge - between a tensor world view and a bufferized world view. Unlike the - `bufferization.to_memref` operation, however, this sparse operation actually - lowers into code that extracts the indices array from the sparse storage - scheme (either by calling a support library or through direct code). + Returns the coordinates array of the tensor's storage at the given + level. This is similar to the `bufferization.to_memref` operation + in the sense that it provides a bridge between a tensor world view + and a bufferized world view. Unlike the `bufferization.to_memref` + operation, however, this sparse operation actually lowers into code + that extracts the coordinates array from the sparse storage itself + (either by calling a support library or through direct code). Writing into the result of this operation is undefined behavior. Example: ```mlir - %1 = sparse_tensor.indices %0 { dimension = 1 : index } + %1 = sparse_tensor.coordinates %0 { level = 1 : index } : tensor<64x64xf64, #CSR> to memref ``` }]; @@ -234,30 +246,31 @@ let hasVerifier = 1; } -def SparseTensor_ToIndicesBufferOp : SparseTensor_Op<"indices_buffer", [Pure]>, +def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer", [Pure]>, Arguments<(ins AnySparseTensor:$tensor)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { - let summary = "Extracts the linear indices array from a tensor"; + let summary = "Extracts the linear coordinates array from a tensor"; let description = [{ - Returns the linear indices array for a sparse tensor with a trailing COO - region with at least two dimensions. It is an error if the tensor doesn't - contain such a COO region. This is similar to the `bufferization.to_memref` - operation in the sense that it provides a bridge between a tensor world view - and a bufferized world view. Unlike the `bufferization.to_memref` operation, - however, this sparse operation actually lowers into code that extracts the - linear indices array from the sparse storage scheme that stores the indices - for the COO region as an array of structures. For example, a 2D COO sparse - tensor with two non-zero elements at coordinates (1, 3) and (4, 6) are - stored in a linear buffer as (1, 4, 3, 6) instead of two buffer as (1, 4) - and (3, 6). + Returns the linear coordinates array for a sparse tensor with + a trailing COO region with at least two levels. It is an error + if the tensor doesn't contain such a COO region. This is similar + to the `bufferization.to_memref` operation in the sense that it + provides a bridge between a tensor world view and a bufferized + world view. Unlike the `bufferization.to_memref` operation, + however, this operation actually lowers into code that extracts + the linear coordinates array from the sparse storage scheme that + stores the coordinates for the COO region as an array of structures. + For example, a 2D COO sparse tensor with two non-zero elements at + coordinates (1, 3) and (4, 6) are stored in a linear buffer as + (1, 4, 3, 6) instead of two buffer as (1, 4) and (3, 6). Writing into the result of this operation is undefined behavior. Example: ```mlir - %1 = sparse_tensor.indices_buffer %0 - : tensor<64x64xf64, #COO> to memref + %1 = sparse_tensor.coordinates_buffer %0 + : tensor<64x64xf64, #COO> to memref ``` }]; let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)"; @@ -348,8 +361,9 @@ Results<(outs SparseTensorStorageSpecifier:$result)> { let summary = ""; let description = [{ - Returns an initial storage specifier value. A storage specifier value holds - the sizes for tensor dimensions, pointer arrays, index arrays, and the value array. + Returns an initial storage specifier value. A storage specifier + value holds the level-sizes, position arrays, coordinate arrays, + and the value array. Example: @@ -364,22 +378,22 @@ def SparseTensor_GetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.get", [Pure]>, Arguments<(ins SparseTensorStorageSpecifier:$specifier, SparseTensorStorageSpecifierKindAttr:$specifierKind, - OptionalAttr:$dim)>, + OptionalAttr:$level)>, Results<(outs Index:$result)> { let summary = ""; let description = [{ Returns the requested field of the given storage_specifier. - Example of querying the size of the index array for level 0: + Example of querying the size of the coordinates array for level 0: ```mlir - %0 = sparse_tensor.storage_specifier.get %arg0 idx_mem_sz at 0 + %0 = sparse_tensor.storage_specifier.get %arg0 crd_mem_sz at 0 : !sparse_tensor.storage_specifier<#COO> ``` }]; - let assemblyFormat = "$specifier $specifierKind (`at` $dim^)? attr-dict `:` " - "qualified(type($specifier))"; + let assemblyFormat = "$specifier $specifierKind (`at` $level^)? attr-dict" + "`:` qualified(type($specifier))"; let hasVerifier = 1; let hasFolder = 1; } @@ -388,7 +402,7 @@ [Pure, AllTypesMatch<["result", "specifier"]>]>, Arguments<(ins SparseTensorStorageSpecifier:$specifier, SparseTensorStorageSpecifierKindAttr:$specifierKind, - OptionalAttr:$dim, + OptionalAttr:$level, Index:$value)>, Results<(outs SparseTensorStorageSpecifier:$result)> { let summary = ""; @@ -396,16 +410,15 @@ Set the field of the storage specifier to the given input value. Returns the updated storage_specifier as a new SSA value. - Example of updating the sizes of the index array for level 0: + Example of updating the sizes of the coordinates array for level 0: ```mlir - %0 = sparse_tensor.storage_specifier.set %arg0 idx_mem_sz at 0 with %new_sz + %0 = sparse_tensor.storage_specifier.set %arg0 crd_mem_sz at 0 with %new_sz : !sparse_tensor.storage_specifier<#COO> - ``` }]; - let assemblyFormat = "$specifier $specifierKind (`at` $dim^)? `with` $value attr-dict `:` " - "qualified(type($result))"; + let assemblyFormat = "$specifier $specifierKind (`at` $level^)? `with` $value" + " attr-dict `:` qualified(type($result))"; let hasVerifier = 1; } @@ -429,16 +442,16 @@ } def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate", [Pure]>, - Arguments<(ins Variadic:$inputs, IndexAttr:$dimension)>, + Arguments<(ins Variadic:$inputs, DimensionAttr:$dimension)>, Results<(outs AnyRankedTensor:$result)> { let summary = "Concatenates a list of tensors into a single tensor."; let description = [{ - Concatenates a list input tensors and the output tensor with the same rank. - The concatenation happens on the specified `dimension` (0<= dimension < rank). - The resulting `dimension` size is the sum of all the input dimension sizes, - while all the other dimensions should have the same size in the input and - output tensors. + Concatenates a list input tensors and the output tensor with the same + dimension-rank. The concatenation happens on the specified `dimension` + (0 <= dimension < dimRank). The resulting `dimension` size is the + sum of all the input sizes for that dimension, while all the other + dimensions should have the same size in the input and output tensors. Only statically-sized input tensors are accepted, while the output tensor can be dynamically-sized. @@ -470,24 +483,23 @@ AllTypesMatch<["tensor", "result"]>]>, Arguments<(ins AnyType:$value, AnySparseTensor:$tensor, - Variadic:$indices)>, + Variadic:$lvlCoords)>, Results<(outs AnySparseTensor:$result)> { - string summary = "Inserts a value into given sparse tensor"; + string summary = "Inserts a value into the sparse tensor"; string description = [{ - Inserts the given value at given indices into the underlying - sparse storage format of the given tensor with the given indices. - The arity of indices must match the rank of the tensor. This - operation can only be applied when a tensor materializes unintialized - with a `bufferization.alloc_tensor` operation and the final tensor - is constructed with a `load` operation that has the `hasInserts` - attribute set. - - Properties in the sparse tensor type fully describe what kind - of insertion order is allowed. When all dimensions have "unique" + Inserts the value into the underlying storage of the tensor at the + given level-coordinates. The arity of `lvlCoords` must match the + level-rank of the tensor. This operation can only be applied when + the tensor materializes unintialized from a `bufferization.alloc_tensor` + operation and the final tensor is constructed with a `load` operation + which has the `hasInserts` attribute set. + + The level-properties of the sparse tensor type fully describe what + kind of insertion order is allowed. When all levels have "unique" and "ordered" properties, for example, insertions should occur in - strict lexicographical index order. Other properties define - different insertion regimens. Inserting in a way contrary to - these properties results in undefined behavior. + strict lexicographical level-coordinate order. Other properties + define different insertion regimens. Inserting in a way contrary + to these properties results in undefined behavior. Note that this operation is "impure" in the sense that even though the result is modeled through an SSA value, the insertion is eventually @@ -501,7 +513,8 @@ %result = sparse_tensor.insert %val into %tensor[%i,%j] : tensor<1024x1024xf64, #CSR> ``` }]; - let assemblyFormat = "$value `into` $tensor `[` $indices `]` attr-dict `:` type($tensor)"; + let assemblyFormat = "$value `into` $tensor `[` $lvlCoords `]` attr-dict" + "`:` type($tensor)"; let hasVerifier = 1; } @@ -580,7 +593,7 @@ Index:$count)> { string summary = "Expands an access pattern for insertion"; string description = [{ - Performs an access pattern expansion for the innermost dimensions of the + Performs an access pattern expansion for the innermost levels of the given tensor. This operation is useful to implement kernels in which a sparse tensor appears as output. This technique is known under several different names and using several alternative implementations, @@ -588,15 +601,17 @@ [Pissanetzky84], in phase scan [Duff90], access pattern expansion [Bik96], and workspaces [Kjolstad19]. - The values and filled array have sizes that suffice for a *dense* innermost - dimension (e.g. a full row for matrices). The added array and count are used - to store new indices when a false value is encountered in the filled array. - All arrays should be allocated before the loop (possibly even shared between - loops in a future optimization) so that their *dense* initialization can be - amortized over many iterations. Setting and resetting the dense arrays in - the loop nest itself is kept *sparse* by only iterating over set elements - through an indirection using the added array, so that the operations are - kept proportional to the number of nonzeros. + The `values` and `filled` arrays must have lengths equal to the + level-size of the innermost level (i.e., as if the innermost level + were *dense*). The `added` array and `count` are used to store new + level-coordinates when a false value is encountered in the `filled` + array. All arrays should be allocated before the loop (possibly even + shared between loops in a future optimization) so that their *dense* + initialization can be amortized over many iterations. Setting and + resetting the dense arrays in the loop nest itself is kept *sparse* + by only iterating over set elements through an indirection using + the added array, so that the operations are kept proportional to + the number of nonzeros. Note that this operation is "impure" in the sense that even though the results are modeled through SSA values, the operation relies on a proper @@ -620,18 +635,19 @@ StridedMemRefRankOf<[Index],[1]>:$added, Index:$count, AnySparseTensor:$tensor, - Variadic:$indices)>, + Variadic:$lvlCoords)>, Results<(outs AnySparseTensor:$result)> { string summary = "Compressed an access pattern for insertion"; string description = [{ Finishes a single access pattern expansion by moving inserted elements into the sparse storage scheme of the given tensor with the given - indices. The arity of indices is one less than the rank of the tensor, - with the remainder innermost indices defined through the added array. - The values and filled array are reset in a *sparse* fashion by only - iterating over set elements through an indirection using the added - array, so that the operations are kept proportional to the number of - nonzeros. See the `sparse_tensor.expand` operation for more details. + level-coordinates. The arity of `lvlCoords` is one less than the + level-rank of the tensor, with the coordinate of the innermost + level defined through the `added` array. The `values` and `filled` + arrays are reset in a *sparse* fashion by only iterating over set + elements through an indirection using the `added` array, so that + the operations are kept proportional to the number of nonzeros. + See the `sparse_tensor.expand` operation for more details. Note that this operation is "impure" in the sense that even though the result is modeled through an SSA value, the insertion is eventually @@ -645,7 +661,7 @@ ``` }]; let assemblyFormat = "$values `,` $filled `,` $added `,` $count" - " `into` $tensor `[` $indices `]` attr-dict" + " `into` $tensor `[` $lvlCoords `]` attr-dict" " `:` type($values) `,` type($filled) `,` type($added)" " `,` type($tensor)"; let hasVerifier = 1; @@ -1161,17 +1177,16 @@ the dimensions are visited (e.g., row first or column first). This is only applicable when the input tensor is a non-annotated dense tensor. - For an input tensor with rank n, the block must take n + 1 (and additional loop - carried variables as described below) arguments. The first n arguments must be - Index type, together indicating the current coordinates of the element being visited. - The last argument must have the same type as the - tensor's element type, representing the actual value loaded from the input - tensor at the given coordinates. + For an input tensor with dim-rank `n`, the block must take `n + 1` + arguments (plus additional loop-carried variables as described below). + The first `n` arguments provide the dimension-coordinates of the element + being visited, and must all have `index` type. The `(n+1)`-th argument + provides the element's value, and must have the tensor's element type. `sparse_tensor.foreach` can also operate on loop-carried variables and returns the final values after loop termination. The initial values of the variables are passed as additional SSA operands to the "sparse_tensor.foreach" following the n + 1 - SSA values mentioned above (n coordinate and 1 value). + SSA values mentioned above (n coordinates and 1 value). The region must terminate with a "sparse_tensor.yield" that passes the current values of all loop-carried variables to the next iteration, or to the @@ -1188,9 +1203,10 @@ } ``` - It is important to note that foreach generated loop iterates over the stored elements - in the storage order. However, no matter what storage order is used, the indices passed - to the block always obey the original dimension order. + It is important to note that the generated loop iterates over + elements in their storage order. However, regardless of the + storage scheme used by the tensor, the block is always given + the dimension-coordinates. For example: ```mlir diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h @@ -226,22 +226,20 @@ bool isOrderedLvl(Level l) const { return isOrderedDLT(getLvlType(l)); } bool isUniqueLvl(Level l) const { return isUniqueDLT(getLvlType(l)); } - /// Returns the index-overhead bitwidth, defaulting to zero. - unsigned getIndexBitWidth() const { return enc ? enc.getIndexBitWidth() : 0; } + /// Returns the coordinate-overhead bitwidth, defaulting to zero. + unsigned getCrdWidth() const { return enc ? enc.getCrdWidth() : 0; } - /// Returns the pointer-overhead bitwidth, defaulting to zero. - unsigned getPointerBitWidth() const { - return enc ? enc.getPointerBitWidth() : 0; - } + /// Returns the position-overhead bitwidth, defaulting to zero. + unsigned getPosWidth() const { return enc ? enc.getPosWidth() : 0; } - /// Returns the index-overhead MLIR type, defaulting to `IndexType`. - Type getIndexType() const { - return detail::getIntegerOrIndexType(getContext(), getIndexBitWidth()); + /// Returns the coordinate-overhead MLIR type, defaulting to `IndexType`. + Type getCrdType() const { + return detail::getIntegerOrIndexType(getContext(), getCrdWidth()); } - /// Returns the pointer-overhead MLIR type, defaulting to `IndexType`. - Type getPointerType() const { - return detail::getIntegerOrIndexType(getContext(), getPointerBitWidth()); + /// Returns the position-overhead MLIR type, defaulting to `IndexType`. + Type getPosType() const { + return detail::getIntegerOrIndexType(getContext(), getPosWidth()); } private: diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td @@ -37,11 +37,11 @@ encoding ::= attribute-value ``` - Values with storage_specifier types represent aggregated storage scheme metadata - for the given sparse tensor encoding. - It currently holds a set of values for sizes of sparse tensor dimension, index array, - pointer array and value array. - Note that the type is not yet stable and subject to change in the near future. + Values with storage_specifier types represent aggregated storage scheme + metadata for the given sparse tensor encoding. It currently holds + a set of values for level-sizes, coordinate arrays, position arrays, + and value array. Note that the type is not yet stable and subject to + change in the near future. Examples: diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/COO.h @@ -27,24 +27,24 @@ namespace sparse_tensor { /// An element of a sparse tensor in coordinate-scheme representation -/// (i.e., a pair of indices and value). For example, a rank-1 vector -/// element would look like +/// (i.e., a pair of coordinates and value). For example, a rank-1 +/// vector element would look like /// ({i}, a[i]) /// and a rank-5 tensor element would look like /// ({i,j,k,l,m}, a[i,j,k,l,m]) /// -/// The indices are represented as a (non-owning) pointer into a shared -/// pool of indices, rather than being stored directly in this object. -/// This significantly improves performance because it: (1) reduces -/// the per-element memory footprint, and (2) centralizes the memory -/// management for indices. The only downside is that the indices -/// themselves cannot be retrieved without knowing the rank of the -/// tensor to which this element belongs (and that rank is not stored -/// in this object). +/// The coordinates are represented as a (non-owning) pointer into +/// a shared pool of coordinates, rather than being stored directly in +/// this object. This significantly improves performance because it: +/// (1) reduces the per-element memory footprint, and (2) centralizes +/// the memory management for coordinates. The only downside is that +/// the coordinates themselves cannot be retrieved without knowing the +/// rank of the tensor to which this element belongs (and that rank is +/// not stored in this object). template struct Element final { - Element(const uint64_t *ind, V val) : indices(ind), value(val){}; - const uint64_t *indices; // pointer into shared index pool + Element(const uint64_t *coords, V val) : coords(coords), value(val){}; + const uint64_t *coords; // pointer into shared coordinates pool V value; }; @@ -58,9 +58,9 @@ /// Precondition: the elements must both be valid for `rank`. bool operator()(const Element &e1, const Element &e2) const { for (uint64_t d = 0; d < rank; ++d) { - if (e1.indices[d] == e2.indices[d]) + if (e1.coords[d] == e2.coords[d]) continue; - return e1.indices[d] < e2.indices[d]; + return e1.coords[d] < e2.coords[d]; } return false; } @@ -132,11 +132,11 @@ assert(dimSizes[d] > 0 && "Dimension size zero has trivial storage"); if (capacity) { elements.reserve(capacity); - indices.reserve(capacity * dimRank); + coordinates.reserve(capacity * dimRank); } } - /// Gets the rank of the tensor. + /// Gets the dimension-rank of the tensor. uint64_t getRank() const { return dimSizes.size(); } /// Gets the dimension-sizes array. @@ -149,37 +149,38 @@ ElementLT getElementLT() const { return ElementLT(getRank()); } /// Adds an element to the tensor. This method does not check whether - /// `ind` is already associated with a value, it adds it regardless. + /// `dimCoords` is already associated with a value, it adds it regardless. /// Resolving such conflicts is left up to clients of the iterator /// interface. /// /// This method invalidates all iterators. /// /// Asserts: - /// * the `ind` is valid for `rank` - /// * the elements of `ind` are valid for `dimSizes`. - void add(const std::vector &ind, V val) { - const uint64_t *base = indices.data(); - uint64_t size = indices.size(); - uint64_t rank = getRank(); - assert(ind.size() == rank && "Element rank mismatch"); - for (uint64_t r = 0; r < rank; ++r) { - assert(ind[r] < dimSizes[r] && "Index is too large for the dimension"); - indices.push_back(ind[r]); + /// * the `dimCoords` is valid for `getRank`. + /// * the components of `dimCoords` are valid for `getDimSizes`. + void add(const std::vector &dimCoords, V val) { + const uint64_t *base = coordinates.data(); + const uint64_t size = coordinates.size(); + const uint64_t dimRank = getRank(); + assert(dimCoords.size() == dimRank && "Element rank mismatch"); + for (uint64_t d = 0; d < dimRank; ++d) { + assert(dimCoords[d] < dimSizes[d] && + "Coordinate is too large for the dimension"); + coordinates.push_back(dimCoords[d]); } - // This base only changes if indices were reallocated. In that case, we - // need to correct all previous pointers into the vector. Note that this - // only happens if we did not set the initial capacity right, and then only - // for every internal vector reallocation (which with the doubling rule - // should only incur an amortized linear overhead). - const uint64_t *newBase = indices.data(); + // This base only changes if `coordinates` was reallocated. In which + // case, we need to correct all previous pointers into the vector. + // Note that this only happens if we did not set the initial capacity + // right, and then only for every internal vector reallocation (which + // with the doubling rule should only incur an amortized linear overhead). + const uint64_t *const newBase = coordinates.data(); if (newBase != base) { for (uint64_t i = 0, n = elements.size(); i < n; ++i) - elements[i].indices = newBase + (elements[i].indices - base); + elements[i].coords = newBase + (elements[i].coords - base); base = newBase; } // Add the new element and update the sorted bit. - Element addedElem(base + size, val); + const Element addedElem(base + size, val); if (!elements.empty() && isSorted) isSorted = getElementLT()(elements.back(), addedElem); elements.push_back(addedElem); @@ -188,8 +189,9 @@ const_iterator begin() const { return elements.cbegin(); } const_iterator end() const { return elements.cend(); } - /// Sorts elements lexicographically by index. If an index is mapped to - /// multiple values, then the relative order of those values is unspecified. + /// Sorts elements lexicographically by coordinates. If a coordinate + /// is mapped to multiple values, then the relative order of those + /// values is unspecified. /// /// This method invalidates all iterators. void sort() { @@ -202,7 +204,7 @@ private: const std::vector dimSizes; // per-dimension sizes std::vector> elements; // all COO elements - std::vector indices; // shared index pool + std::vector coordinates; // shared coordinate pool bool isSorted; }; diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h @@ -46,7 +46,7 @@ /// then returns an arbitrary value. If `IsPattern` is false, then /// reads the value from the current line buffer beginning at `linePtr`. template -inline std::enable_if_t::value, V> readCOOValue(char **linePtr) { +inline std::enable_if_t::value, V> readValue(char **linePtr) { // The external formats always store these numerical values with the type // double, but we cast these values to the sparse tensor object type. // For a pattern tensor, we arbitrarily pick the value 1 for all entries. @@ -59,7 +59,7 @@ /// then returns an arbitrary value. If `IsPattern` is false, then reads /// the value from the current line buffer beginning at `linePtr`. template -inline std::enable_if_t::value, V> readCOOValue(char **linePtr) { +inline std::enable_if_t::value, V> readValue(char **linePtr) { // Read two values to make a complex. The external formats always store // numerical values with the type double, but we cast these values to the // sparse tensor object type. For a pattern tensor, we arbitrarily pick the @@ -72,14 +72,12 @@ return V(re, im); } -/// Returns an element-value. If `is_pattern` is true, then returns an -/// arbitrary value. If `is_pattern` is false, then reads the value from +/// Returns an element-value. If `isPattern` is true, then returns an +/// arbitrary value. If `isPattern` is false, then reads the value from /// the current line buffer beginning at `linePtr`. template -inline V readCOOValue(char **linePtr, bool is_pattern) { - if (is_pattern) - return readCOOValue(linePtr); - return readCOOValue(linePtr); +inline V readValue(char **linePtr, bool isPattern) { + return isPattern ? readValue(linePtr) : readValue(linePtr); } } // namespace detail @@ -171,15 +169,17 @@ return isSymmetric_; } - /// Gets the rank of the tensor. Is only valid after parsing the header. + /// Gets the dimension-rank of the tensor. Is only valid after parsing + /// the header. uint64_t getRank() const { assert(isValid() && "Attempt to getRank() before readHeader()"); return idata[0]; } - /// Gets the number of non-zeros. Is only valid after parsing the header. - uint64_t getNNZ() const { - assert(isValid() && "Attempt to getNNZ() before readHeader()"); + /// Gets the number of stored elements. Is only valid after parsing + /// the header. + uint64_t getNSE() const { + assert(isValid() && "Attempt to getNSE() before readHeader()"); return idata[1]; } @@ -201,23 +201,22 @@ /// Reads a sparse tensor element from the next line in the input file and /// returns the value of the element. Stores the coordinates of the element - /// to the `indices` array. + /// to the `dimCoords` array. template - V readCOOElement(uint64_t rank, uint64_t *indices) { - assert(rank == getRank() && "rank mismatch"); - char *linePtr = readCOOIndices(indices); - return detail::readCOOValue(&linePtr, isPattern()); + V readElement(uint64_t dimRank, uint64_t *dimCoords) { + assert(dimRank == getRank() && "rank mismatch"); + char *linePtr = readCoords(dimCoords); + return detail::readValue(&linePtr, isPattern()); } /// Allocates a new COO object for `lvlSizes`, initializes it by reading - /// all the elements from the file and applying `dim2lvl` to their indices, - /// and then closes the file. + /// all the elements from the file and applying `dim2lvl` to their + /// dim-coordinates, and then closes the file. /// /// Preconditions: /// * `lvlSizes` must be valid for `lvlRank`. /// * `dim2lvl` must be valid for `getRank()`. - /// * `dim2lvl` maps indices valid for `getDimSizes()` to indices - /// valid for `lvlSizes`. + /// * `dim2lvl` maps `getDimSizes()`-coordinates to `lvlSizes`-coordinates. /// * the file's actual value type can be read as `V`. /// /// Asserts: @@ -263,22 +262,22 @@ void readLine(); /// Reads the next line of the input file and parses the coordinates - /// into the `indices` argument. Returns the position in the `line` + /// into the `dimCoords` argument. Returns the position in the `line` /// buffer where the element's value should be parsed from. This method - /// has been factored out from `readCOOElement` to minimize code bloat + /// has been factored out from `readElement` to minimize code bloat /// for the generated library. /// - /// Precondition: `indices` is valid for `getRank()`. - template - char *readCOOIndices(I *indices) { + /// Precondition: `dimCoords` is valid for `getRank()`. + template + char *readCoords(C *dimCoords) { readLine(); // Local variable for tracking the parser's position in the `line` buffer. char *linePtr = line; for (uint64_t dimRank = getRank(), d = 0; d < dimRank; ++d) { - // Parse the 1-based index. - uint64_t idx = strtoul(linePtr, &linePtr, 10); - // Store the 0-based index. - indices[d] = static_cast(idx - 1); + // Parse the 1-based coordinate. + uint64_t c = strtoul(linePtr, &linePtr, 10); + // Store the 0-based coordinate. + dimCoords[d] = static_cast(c - 1); } return linePtr; } @@ -334,8 +333,8 @@ const uint64_t dimRank = getRank(); assert(lvlRank == dimRank && "Rank mismatch"); detail::PermutationRef d2l(dimRank, dim2lvl); - // Prepare a COO object with the number of nonzeros as initial capacity. - auto *lvlCOO = new SparseTensorCOO(lvlRank, lvlSizes, getNNZ()); + // Prepare a COO object with the number of stored elems as initial capacity. + auto *lvlCOO = new SparseTensorCOO(lvlRank, lvlSizes, getNSE()); // Do some manual LICM, to avoid assertions in the for-loop. const bool IsPattern = isPattern(); if (IsPattern) @@ -352,17 +351,17 @@ detail::PermutationRef dim2lvl, SparseTensorCOO *lvlCOO) { const uint64_t dimRank = getRank(); - std::vector dimInd(dimRank); - std::vector lvlInd(lvlRank); - for (uint64_t nnz = getNNZ(), k = 0; k < nnz; ++k) { - // We inline `readCOOElement` here in order to avoid redundant + std::vector dimCoords(dimRank); + std::vector lvlCoords(lvlRank); + for (uint64_t nse = getNSE(), k = 0; k < nse; ++k) { + // We inline `readElement` here in order to avoid redundant // assertions, since they're guaranteed by the call to `isValid()` - // and the construction of `dimInd` above. - char *linePtr = readCOOIndices(dimInd.data()); - const V value = detail::readCOOValue(&linePtr); - dim2lvl.pushforward(dimRank, dimInd.data(), lvlInd.data()); + // and the construction of `dimCoords` above. + char *linePtr = readCoords(dimCoords.data()); + const V value = detail::readValue(&linePtr); + dim2lvl.pushforward(dimRank, dimCoords.data(), lvlCoords.data()); // TODO: - lvlCOO->add(lvlInd, value); + lvlCOO->add(lvlCoords, value); } } @@ -371,6 +370,10 @@ const uint64_t *dim2lvl, C *lvlCoordinates, V *values) { assert(isValid() && "Attempt to readCOO() before readHeader()"); + // Construct a `PermutationRef` for the `pushforward` below. + // TODO: This specific implementation does not generalize to arbitrary + // mappings, but once we functionalize the `dim2lvl` argument we can + // simply use that function instead. const uint64_t dimRank = getRank(); assert(lvlRank == dimRank && "Rank mismatch"); detail::PermutationRef d2l(dimRank, dim2lvl); @@ -391,19 +394,19 @@ detail::PermutationRef dim2lvl, C *lvlCoordinates, V *values) { const uint64_t dimRank = getRank(); - const uint64_t nse = getNNZ(); + const uint64_t nse = getNSE(); std::vector dimCoords(dimRank); // Read the first element with isSorted=false as a way to avoid accessing its // previous element. bool isSorted = false; char *linePtr; - // We inline `readCOOElement` here in order to avoid redundant assertions, + // We inline `readElement` here in order to avoid redundant assertions, // since they're guaranteed by the call to `isValid()` and the construction // of `dimCoords` above. - auto readElement = [&]() { - linePtr = readCOOIndices(dimCoords.data()); + const auto readNextElement = [&]() { + linePtr = readCoords(dimCoords.data()); dim2lvl.pushforward(dimRank, dimCoords.data(), lvlCoordinates); - *values = detail::readCOOValue(&linePtr); + *values = detail::readValue(&linePtr); if (isSorted) { // Note that isSorted was set to false while reading the first element, // to guarantee the safeness of using prevLvlCoords. @@ -421,10 +424,10 @@ lvlCoordinates += lvlRank; ++values; }; - readElement(); + readNextElement(); isSorted = true; for (uint64_t n = 1; n < nse; ++n) - readElement(); + readNextElement(); return isSorted; } @@ -434,21 +437,21 @@ inline void writeExtFROSTT(const SparseTensorCOO &coo, const char *filename) { assert(filename && "Got nullptr for filename"); - auto &dimSizes = coo.getDimSizes(); - auto &elements = coo.getElements(); - const uint64_t rank = coo.getRank(); - const uint64_t nnz = elements.size(); + const auto &dimSizes = coo.getDimSizes(); + const auto &elements = coo.getElements(); + const uint64_t dimRank = coo.getRank(); + const uint64_t nse = elements.size(); std::fstream file; file.open(filename, std::ios_base::out | std::ios_base::trunc); assert(file.is_open()); - file << "; extended FROSTT format\n" << rank << " " << nnz << std::endl; - for (uint64_t r = 0; r < rank - 1; ++r) - file << dimSizes[r] << " "; - file << dimSizes[rank - 1] << std::endl; - for (uint64_t i = 0; i < nnz; ++i) { - auto &idx = elements[i].indices; - for (uint64_t r = 0; r < rank; ++r) - file << (idx[r] + 1) << " "; + file << "; extended FROSTT format\n" << dimRank << " " << nse << std::endl; + for (uint64_t d = 0; d < dimRank - 1; ++d) + file << dimSizes[d] << " "; + file << dimSizes[dimRank - 1] << std::endl; + for (uint64_t i = 0; i < nse; ++i) { + const auto &coords = elements[i].coords; + for (uint64_t d = 0; d < dimRank; ++d) + file << (coords[d] + 1) << " "; file << elements[i].value << std::endl; } file.flush(); diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h @@ -15,9 +15,9 @@ // This file contains definitions for the following classes: // // * `SparseTensorStorageBase` -// * `SparseTensorStorage` +// * `SparseTensorStorage` // * `SparseTensorEnumeratorBase` -// * `SparseTensorEnumerator` +// * `SparseTensorEnumerator` // * `SparseTensorNNZ` // // Ideally we would split the storage classes and enumerator classes @@ -52,9 +52,9 @@ // These macros ensure consistent error messages, without risk of incuring // an additional method call to do so. #define ASSERT_VALID_DIM(d) \ - assert(d < getDimRank() && "Dimension index is out of bounds"); + assert(d < getDimRank() && "Dimension is out of bounds"); #define ASSERT_VALID_LVL(l) \ - assert(l < getLvlRank() && "Level index is out of bounds"); + assert(l < getLvlRank() && "Level is out of bounds"); #define ASSERT_COMPRESSED_LVL(l) \ assert(isCompressedLvl(l) && "Level is not compressed"); #define ASSERT_COMPRESSED_OR_SINGLETON_LVL(l) \ @@ -70,8 +70,8 @@ // retained for the sake of future-proofing. #define ASSERT_DENSE_DLT(dlt) assert(isDenseDLT(dlt) && "Level is not dense"); -/// Abstract base class for `SparseTensorStorage`. This class -/// takes responsibility for all the ``-independent aspects +/// Abstract base class for `SparseTensorStorage`. This class +/// takes responsibility for all the ``-independent aspects /// of the tensor (e.g., shape, sparsity, permutation). In addition, /// we use function overloading to implement "partial" method /// specialization, which the C-API relies on to catch type errors @@ -89,7 +89,7 @@ /// and prone to introduce confusion whenever the qualifiers are dropped. /// Where necessary, we use "axis" as the generic term. /// -/// The *size* of an axis is the cardinality of possible coordinate/index +/// The *size* of an axis is the cardinality of possible coordinate /// values along that axis (regardless of which coordinates have stored /// element values). As such, each size must be non-zero since if any /// axis has size-zero then the whole tensor would have trivial storage @@ -129,8 +129,7 @@ /// * `dimSizes`, `lvlSizes`, `lvlTypes`, and `lvl2dim` must be nonnull. /// * `dimSizes` must be valid for `dimRank`. /// * `lvlSizes`, `lvlTypes`, and `lvl2dim` must be valid for `lvlRank`. - /// * `lvl2dim` must map indices valid for `lvlSizes` to indices valid - /// for `dimSizes`. + /// * `lvl2dim` must map `lvlSizes`-coordinates to `dimSizes`-coordinates. /// /// Asserts: /// * `dimRank` and `lvlRank` are nonzero. @@ -213,28 +212,29 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_NEWENUMERATOR) #undef DECL_NEWENUMERATOR - /// Gets pointers-overhead storage. -#define DECL_GETPOINTERS(PNAME, P) \ - virtual void getPointers(std::vector

**, uint64_t); - MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETPOINTERS) -#undef DECL_GETPOINTERS + /// Gets positions-overhead storage for the given level. +#define DECL_GETPOSITIONS(PNAME, P) \ + virtual void getPositions(std::vector

**, uint64_t); + MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETPOSITIONS) +#undef DECL_GETPOSITIONS - /// Gets indices-overhead storage. -#define DECL_GETINDICES(INAME, I) \ - virtual void getIndices(std::vector **, uint64_t); - MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETINDICES) -#undef DECL_GETINDICES - virtual uint64_t getIndex(uint64_t l, uint64_t pos) const = 0; + /// Gets coordinates-overhead storage for the given level. +#define DECL_GETCOORDINATES(INAME, C) \ + virtual void getCoordinates(std::vector **, uint64_t); + MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETCOORDINATES) +#undef DECL_GETCOORDINATES + /// Gets the coordinate-value stored at the given level and position. + virtual uint64_t getCrd(uint64_t lvl, uint64_t pos) const = 0; /// Gets primary storage. #define DECL_GETVALUES(VNAME, V) virtual void getValues(std::vector **); MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES) #undef DECL_GETVALUES - /// Element-wise insertion in lexicographic index order. The first - /// argument is the level-indices for the value being inserted. + /// Element-wise insertion in lexicographic coordinate order. The first + /// argument is the level-coordinates for the value being inserted. // TODO: For better safety, this should take a parameter for the - // length of `lvlInd` and check that against `getLvlRank()`. + // length of `lvlCoords` and check that against `getLvlRank()`. #define DECL_LEXINSERT(VNAME, V) virtual void lexInsert(const uint64_t *, V); MLIR_SPARSETENSOR_FOREVERY_V(DECL_LEXINSERT) #undef DECL_LEXINSERT @@ -244,7 +244,7 @@ /// iterating over the nonzero elements. /// /// Arguments: - /// * `lvlInd` the level-indices shared by the values being inserted. + /// * `lvlCoords` the level-coordinates shared by the values being inserted. /// * `values` a map from last-level coordinates to their associated value. /// * `filled` a map from last-level coordinates to bool, indicating /// whether `values` contains a valid value to be inserted. @@ -269,22 +269,22 @@ //===----------------------------------------------------------------------===// // This forward decl is necessary for defining `SparseTensorStorage`, // but isn't sufficient for splitting it off. -template +template class SparseTensorEnumerator; /// A memory-resident sparse tensor using a storage scheme based on -/// per-dimension sparse/dense annotations. This data structure provides +/// per-level sparse/dense annotations. This data structure provides /// a bufferized form of a sparse tensor type. In contrast to generating /// setup methods for each differently annotated sparse tensor, this /// method provides a convenient "one-size-fits-all" solution that simply /// takes an input tensor and annotations to implement all required setup /// in a general manner. -template +template class SparseTensorStorage final : public SparseTensorStorageBase { /// Private constructor to share code between the other constructors. /// Beware that the object is not necessarily guaranteed to be in a /// valid state after this constructor alone; e.g., `isCompressedLvl(l)` - /// doesn't entail `!(pointers[l].empty())`. + /// doesn't entail `!(positions[l].empty())`. /// /// Preconditions/assertions are as per the `SparseTensorStorageBase` ctor. SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes, @@ -292,7 +292,7 @@ const DimLevelType *lvlTypes, const uint64_t *lvl2dim) : SparseTensorStorageBase(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, lvl2dim), - pointers(lvlRank), indices(lvlRank), lvlCursor(lvlRank) {} + positions(lvlRank), coordinates(lvlRank), lvlCursor(lvlRank) {} public: /// Constructs a sparse tensor with the given encoding, and allocates @@ -339,11 +339,11 @@ /// are as per the `SparseTensorStorageBase` ctor; which is to say, /// the `dimSizes` and `lvlSizes` must both be "sizes" not "shapes", /// since there's nowhere to reconstruct dynamic sizes from. - static SparseTensorStorage * + static SparseTensorStorage * newEmpty(uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank, const uint64_t *lvlSizes, const DimLevelType *lvlTypes, const uint64_t *lvl2dim) { - return new SparseTensorStorage(dimRank, dimSizes, lvlRank, + return new SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, lvl2dim, true); } @@ -365,7 +365,7 @@ // that there's a feasible way to do so from within the library itself. // Therefore, when we functionalize the `lvl2dim` mapping we'll have // to update the type/preconditions of this factory too. - static SparseTensorStorage * + static SparseTensorStorage * newFromCOO(uint64_t dimRank, const uint64_t *dimShape, uint64_t lvlRank, const DimLevelType *lvlTypes, const uint64_t *lvl2dim, SparseTensorCOO &lvlCOO); @@ -375,9 +375,9 @@ /// /// Preconditions: /// * as per the `SparseTensorStorageBase` ctor. - /// * `src2lvl` must be valid for `srcRank`, must map indices valid for - /// `source.getDimSizes()` to indices valid for `lvlSizes`, and therefore - /// must be the inverse of `lvl2dim`. + /// * `src2lvl` must be valid for `srcRank`, must map coordinates valid + /// for `source.getDimSizes()` to coordinates valid for `lvlSizes`, + /// and therefore must be the inverse of `lvl2dim`. /// * `source` must have the same value type `V`. /// /// Asserts: @@ -396,7 +396,7 @@ // enable this factory to be used for performing a much larger class of // transformations (which can already be handled by the `SparseTensorNNZ` // implementation). - static SparseTensorStorage * + static SparseTensorStorage * newFromSparseTensor(uint64_t dimRank, const uint64_t *dimShape, uint64_t lvlRank, const uint64_t *lvlSizes, const DimLevelType *lvlTypes, const uint64_t *lvl2dim, @@ -406,67 +406,67 @@ ~SparseTensorStorage() final = default; /// Partially specialize these getter methods based on template types. - void getPointers(std::vector

**out, uint64_t l) final { + void getPositions(std::vector

**out, uint64_t lvl) final { assert(out && "Received nullptr for out parameter"); - ASSERT_VALID_LVL(l); - *out = &pointers[l]; + ASSERT_VALID_LVL(lvl); + *out = &positions[lvl]; } - void getIndices(std::vector **out, uint64_t l) final { + void getCoordinates(std::vector **out, uint64_t lvl) final { assert(out && "Received nullptr for out parameter"); - ASSERT_VALID_LVL(l); - *out = &indices[l]; + ASSERT_VALID_LVL(lvl); + *out = &coordinates[lvl]; } void getValues(std::vector **out) final { assert(out && "Received nullptr for out parameter"); *out = &values; } - uint64_t getIndex(uint64_t l, uint64_t pos) const final { - ASSERT_COMPRESSED_OR_SINGLETON_LVL(l); - assert(pos < indices[l].size() && "Index position is out of bounds"); - return indices[l][pos]; // Converts the stored `I` into `uint64_t`. + uint64_t getCrd(uint64_t lvl, uint64_t pos) const final { + ASSERT_COMPRESSED_OR_SINGLETON_LVL(lvl); + assert(pos < coordinates[lvl].size() && "Position is out of bounds"); + return coordinates[lvl][pos]; // Converts the stored `C` into `uint64_t`. } /// Partially specialize lexicographical insertions based on template types. - void lexInsert(const uint64_t *lvlInd, V val) final { - assert(lvlInd && "Received nullptr for level-indices"); + void lexInsert(const uint64_t *lvlCoords, V val) final { + assert(lvlCoords && "Received nullptr for level-coordinates"); // First, wrap up pending insertion path. uint64_t diffLvl = 0; - uint64_t topIdx = 0; + uint64_t full = 0; if (!values.empty()) { - diffLvl = lexDiff(lvlInd); + diffLvl = lexDiff(lvlCoords); endPath(diffLvl + 1); - topIdx = lvlCursor[diffLvl] + 1; + full = lvlCursor[diffLvl] + 1; } // Then continue with insertion path. - insPath(lvlInd, diffLvl, topIdx, val); + insPath(lvlCoords, diffLvl, full, val); } /// Partially specialize expanded insertions based on template types. - void expInsert(uint64_t *lvlInd, V *values, bool *filled, uint64_t *added, + void expInsert(uint64_t *lvlCoords, V *values, bool *filled, uint64_t *added, uint64_t count) final { - assert((lvlInd && values && filled && added) && "Received nullptr"); + assert((lvlCoords && values && filled && added) && "Received nullptr"); if (count == 0) return; // Sort. std::sort(added, added + count); // Restore insertion path for first insert. const uint64_t lastLvl = getLvlRank() - 1; - uint64_t index = added[0]; - assert(filled[index] && "added index is not filled"); - lvlInd[lastLvl] = index; - lexInsert(lvlInd, values[index]); - values[index] = 0; - filled[index] = false; + uint64_t c = added[0]; + assert(filled[c] && "added coordinate is not filled"); + lvlCoords[lastLvl] = c; + lexInsert(lvlCoords, values[c]); + values[c] = 0; + filled[c] = false; // Subsequent insertions are quick. for (uint64_t i = 1; i < count; ++i) { - assert(index < added[i] && "non-lexicographic insertion"); - index = added[i]; - assert(filled[index] && "added index is not filled"); - lvlInd[lastLvl] = index; - insPath(lvlInd, lastLvl, added[i - 1] + 1, values[index]); - values[index] = 0; - filled[index] = false; + assert(c < added[i] && "non-lexicographic insertion"); + c = added[i]; + assert(filled[c] && "added coordinate is not filled"); + lvlCoords[lastLvl] = c; + insPath(lvlCoords, lastLvl, added[i - 1] + 1, values[c]); + values[c] = 0; + filled[c] = false; } } @@ -478,14 +478,14 @@ endPath(0); } - /// Allocates a new enumerator for this class's `` types and - /// erase the `` parts from the type. Callers must make sure to + /// Allocates a new enumerator for this class's `` types and + /// erase the `` parts from the type. Callers must make sure to /// delete the enumerator when they're done with it. void newEnumerator(SparseTensorEnumeratorBase **out, uint64_t trgRank, const uint64_t *trgSizes, uint64_t srcRank, const uint64_t *src2trg) const final { assert(out && "Received nullptr for out parameter"); - *out = new SparseTensorEnumerator(*this, trgRank, trgSizes, + *out = new SparseTensorEnumerator(*this, trgRank, trgSizes, srcRank, src2trg); } @@ -498,11 +498,11 @@ SparseTensorCOO *toCOO(uint64_t trgRank, const uint64_t *trgSizes, uint64_t srcRank, const uint64_t *src2trg) const { // We inline `newEnumerator` to avoid virtual dispatch and allocation. - SparseTensorEnumerator enumerator(*this, trgRank, trgSizes, + SparseTensorEnumerator enumerator(*this, trgRank, trgSizes, srcRank, src2trg); auto *coo = new SparseTensorCOO(trgRank, trgSizes, values.size()); enumerator.forallElements( - [&coo](const auto &trgInd, V val) { coo->add(trgInd, val); }); + [&coo](const auto &trgCoords, V val) { coo->add(trgCoords, val); }); // TODO: This assertion assumes there are no stored zeros, // or if there are then that we don't filter them out. // Cf., @@ -511,58 +511,58 @@ } private: - /// Appends an arbitrary new position to `pointers[l]`. This method + /// Appends an arbitrary new position to `positions[lvl]`. This method /// checks that `pos` is representable in the `P` type; however, it /// does not check that `pos` is semantically valid (i.e., larger than - /// the previous position and smaller than `indices[l].capacity()`). - void appendPointer(uint64_t l, uint64_t pos, uint64_t count = 1) { - ASSERT_COMPRESSED_LVL(l); + /// the previous position and smaller than `coordinates[lvl].capacity()`). + void appendPos(uint64_t lvl, uint64_t pos, uint64_t count = 1) { + ASSERT_COMPRESSED_LVL(lvl); // TODO: we'd like to recover the nicer error message: - // "Pointer value is too large for the P-type" - pointers[l].insert(pointers[l].end(), count, - detail::checkOverflowCast

(pos)); + // "Position value is too large for the P-type" + positions[lvl].insert(positions[lvl].end(), count, + detail::checkOverflowCast

(pos)); } - /// Appends index `i` to level `l`, in the semantically general sense. - /// For non-dense levels, that means appending to the `indices[l]` array, - /// checking that `i` is representable in the `I` type; however, we do - /// not verify other semantic requirements (e.g., that `i` is in bounds - /// for `lvlSizes[l]`, and not previously occurring in the same segment). - /// For dense levels, this method instead appends the appropriate number - /// of zeros to the `values` array, where `full` is the number of "entries" - /// already written to `values` for this segment (aka one after the highest - /// index previously appended). - void appendIndex(uint64_t l, uint64_t full, uint64_t i) { - const auto dlt = getLvlType(l); // Avoid redundant bounds checking. + /// Appends coordinate `crd` to level `lvl`, in the semantically + /// general sense. For non-dense levels, that means appending to the + /// `coordinates[lvl]` array, checking that `crd` is representable in + /// the `C` type; however, we do not verify other semantic requirements + /// (e.g., that `crd` is in bounds for `lvlSizes[lvl]`, and not previously + /// occurring in the same segment). For dense levels, this method instead + /// appends the appropriate number of zeros to the `values` array, where + /// `full` is the number of "entries" already written to `values` for this + /// segment (aka one after the highest coordinate previously appended). + void appendCrd(uint64_t lvl, uint64_t full, uint64_t crd) { + const auto dlt = getLvlType(lvl); // Avoid redundant bounds checking. if (isCompressedDLT(dlt) || isSingletonDLT(dlt)) { // TODO: we'd like to recover the nicer error message: - // "Index value is too large for the I-type" - indices[l].push_back(detail::checkOverflowCast(i)); - } else { // Dense dimension. + // "Coordinate value is too large for the C-type" + coordinates[lvl].push_back(detail::checkOverflowCast(crd)); + } else { // Dense level. ASSERT_DENSE_DLT(dlt); - assert(i >= full && "Index was already filled"); - if (i == full) + assert(crd >= full && "Coordinate was already filled"); + if (crd == full) return; // Short-circuit, since it'll be a nop. - if (l + 1 == getLvlRank()) - values.insert(values.end(), i - full, 0); + if (lvl + 1 == getLvlRank()) + values.insert(values.end(), crd - full, 0); else - finalizeSegment(l + 1, 0, i - full); + finalizeSegment(lvl + 1, 0, crd - full); } } - /// Writes the given coordinate to `indices[l][pos]`. This method - /// checks that `i` is representable in the `I` type; however, it - /// does not check that `i` is semantically valid (i.e., in bounds - /// for `dimSizes[l]` and not elsewhere occurring in the same segment). - void writeIndex(uint64_t l, uint64_t pos, uint64_t i) { - ASSERT_COMPRESSED_OR_SINGLETON_LVL(l); + /// Writes the given coordinate to `coordinates[lvl][pos]`. This method + /// checks that `crd` is representable in the `C` type; however, it + /// does not check that `crd` is semantically valid (i.e., in bounds + /// for `dimSizes[lvl]` and not elsewhere occurring in the same segment). + void writeCrd(uint64_t lvl, uint64_t pos, uint64_t crd) { + ASSERT_COMPRESSED_OR_SINGLETON_LVL(lvl); // Subscript assignment to `std::vector` requires that the `pos`-th // entry has been initialized; thus we must be sure to check `size()` // here, instead of `capacity()` as would be ideal. - assert(pos < indices[l].size() && "Index position is out of bounds"); + assert(pos < coordinates[lvl].size() && "Position is out of bounds"); // TODO: we'd like to recover the nicer error message: - // "Index value is too large for the I-type" - indices[l][pos] = detail::checkOverflowCast(i); + // "Coordinate value is too large for the C-type" + coordinates[lvl][pos] = detail::checkOverflowCast(crd); } /// Computes the assembled-size associated with the `l`-th level, @@ -571,12 +571,12 @@ /// storage, as opposed to "level-sizes" which are the cardinality /// of possible coordinates for that level. /// - /// Precondition: the `pointers[l]` array must be fully initialized + /// Precondition: the `positions[l]` array must be fully initialized /// before calling this method. uint64_t assembledSize(uint64_t parentSz, uint64_t l) const { const auto dlt = getLvlType(l); // Avoid redundant bounds checking. if (isCompressedDLT(dlt)) - return pointers[l][parentSz]; + return positions[l][parentSz]; if (isSingletonDLT(dlt)) return parentSz; // New size is same as the parent. if (isDenseDLT(dlt)) @@ -586,18 +586,18 @@ } /// Initializes sparse tensor storage scheme from a memory-resident sparse - /// tensor in coordinate scheme. This method prepares the pointers and - /// indices arrays under the given per-dimension dense/sparse annotations. + /// tensor in coordinate scheme. This method prepares the positions and + /// coordinates arrays under the given per-level dense/sparse annotations. /// /// Preconditions: /// * the `lvlElements` must be lexicographically sorted. - /// * the indices of every element are valid for `getLvlSizes()` + /// * the coordinates of every element are valid for `getLvlSizes()` /// (i.e., equal rank and pointwise less-than). void fromCOO(const std::vector> &lvlElements, uint64_t lo, uint64_t hi, uint64_t l) { const uint64_t lvlRank = getLvlRank(); assert(l <= lvlRank && hi <= lvlElements.size()); - // Once dimensions are exhausted, insert the numerical values. + // Once levels are exhausted, insert the numerical values. if (l == lvlRank) { assert(lo < hi); values.push_back(lvlElements[lo].value); @@ -606,30 +606,30 @@ // Visit all elements in this interval. uint64_t full = 0; while (lo < hi) { // If `hi` is unchanged, then `lo < lvlElements.size()`. - // Find segment in interval with same index elements in this level. - const uint64_t i = lvlElements[lo].indices[l]; + // Find segment in interval with same coordinate at this level. + const uint64_t c = lvlElements[lo].coords[l]; uint64_t seg = lo + 1; if (isUniqueLvl(l)) - while (seg < hi && lvlElements[seg].indices[l] == i) + while (seg < hi && lvlElements[seg].coords[l] == c) ++seg; // Handle segment in interval for sparse or dense level. - appendIndex(l, full, i); - full = i + 1; + appendCrd(l, full, c); + full = c + 1; fromCOO(lvlElements, lo, seg, l + 1); // And move on to next segment in interval. lo = seg; } - // Finalize the sparse pointer structure at this level. + // Finalize the sparse position structure at this level. finalizeSegment(l, full); } - /// Finalizes the sparse pointer structure at this level. + /// Finalizes the sparse position structure at this level. void finalizeSegment(uint64_t l, uint64_t full = 0, uint64_t count = 1) { if (count == 0) return; // Short-circuit, since it'll be a nop. const auto dlt = getLvlType(l); // Avoid redundant bounds checking. if (isCompressedDLT(dlt)) { - appendPointer(l, indices[l].size(), count); + appendPos(l, coordinates[l].size(), count); } else if (isSingletonDLT(dlt)) { return; // Nothing to finalize. } else { // Dense dimension. @@ -661,29 +661,29 @@ } /// Continues a single insertion path, outer to inner. The first - /// argument is the storage-level indices for the value being inserted. - void insPath(const uint64_t *lvlInd, uint64_t diffLvl, uint64_t topIdx, + /// argument is the level-coordinates for the value being inserted. + void insPath(const uint64_t *lvlCoords, uint64_t diffLvl, uint64_t full, V val) { const uint64_t lvlRank = getLvlRank(); assert(diffLvl <= lvlRank && "Level-diff is out of bounds"); for (uint64_t l = diffLvl; l < lvlRank; ++l) { - const uint64_t i = lvlInd[l]; - appendIndex(l, topIdx, i); - topIdx = 0; - lvlCursor[l] = i; + const uint64_t c = lvlCoords[l]; + appendCrd(l, full, c); + full = 0; + lvlCursor[l] = c; } values.push_back(val); } - /// Finds the lexicographically first level where the level-indices + /// Finds the lexicographically first level where the level-coordinates /// in the argument differ from those in the current cursor. - uint64_t lexDiff(const uint64_t *lvlInd) const { + uint64_t lexDiff(const uint64_t *lvlCoords) const { const uint64_t lvlRank = getLvlRank(); for (uint64_t l = 0; l < lvlRank; ++l) - if (lvlInd[l] > lvlCursor[l]) + if (lvlCoords[l] > lvlCursor[l]) return l; else - assert(lvlInd[l] == lvlCursor[l] && "non-lexicographic insertion"); + assert(lvlCoords[l] == lvlCursor[l] && "non-lexicographic insertion"); assert(0 && "duplicate insertion"); return -1u; } @@ -691,10 +691,10 @@ // Allow `SparseTensorEnumerator` to access the data-members (to avoid // the cost of virtual-function dispatch in inner loops), without // making them public to other client code. - friend class SparseTensorEnumerator; + friend class SparseTensorEnumerator; - std::vector> pointers; - std::vector> indices; + std::vector> positions; + std::vector> coordinates; std::vector values; std::vector lvlCursor; // cursor for lexicographic insertion. }; @@ -709,7 +709,7 @@ /// `SparseTensorStorage` under a permutation. That is, the `forallElements` /// method encapsulates the loop-nest for enumerating the elements of /// the source tensor (in whatever order is best for the source tensor), -/// and applies a permutation to the coordinates/indices before handing +/// and applies a permutation to the coordinates before handing /// each element to the callback. A single enumerator object can be /// freely reused for several calls to `forallElements`, just so long /// as each call is sequential with respect to one another. @@ -719,10 +719,10 @@ /// outlive the sparse tensor they depend on. /// /// Design Note: The reason we define this class instead of simply using -/// `SparseTensorEnumerator` is because we need to hide/generalize -/// the `` template parameters from MLIR client code (to simplify the +/// `SparseTensorEnumerator` is because we need to hide/generalize +/// the `` template parameters from MLIR client code (to simplify the /// type parameters used for direct sparse-to-sparse conversion). And the -/// reason we define the `SparseTensorEnumerator` subclasses rather +/// reason we define the `SparseTensorEnumerator` subclasses rather /// than simply using this class, is to avoid the cost of virtual-method /// dispatch within the loop-nest. template @@ -735,8 +735,8 @@ /// Preconditions: /// * the `src` must have the same `V` value type. /// * `trgSizes` must be valid for `trgRank`. - /// * `src2trg` must be valid for `srcRank`, and must map indices - /// valid for `src.getDimSizes()` to indices valid for `trgSizes`. + /// * `src2trg` must be valid for `srcRank`, and must map coordinates + /// valid for `src.getDimSizes()` to coordinates valid for `trgSizes`. /// /// Asserts: /// * `trgSizes` must be nonnull and must contain only nonzero sizes. @@ -779,7 +779,7 @@ const std::vector &getTrgSizes() const { return trgSizes; } /// Enumerates all elements of the source tensor, permutes their - /// indices, and passes the permuted element to the callback. + /// coordinates, and passes the permuted element to the callback. /// The callback must not store the cursor reference directly, /// since this function reuses the storage. Instead, the callback /// must copy it if they want to keep it. @@ -793,11 +793,11 @@ }; //===----------------------------------------------------------------------===// -template +template class MLIR_SPARSETENSOR_GSL_POINTER [[nodiscard]] SparseTensorEnumerator final : public SparseTensorEnumeratorBase { using Base = SparseTensorEnumeratorBase; - using StorageImpl = SparseTensorStorage; + using StorageImpl = SparseTensorStorage; public: /// Constructs an enumerator which automatically applies the given @@ -830,7 +830,7 @@ /// The recursive component of the public `forallElements`. void forallElements(ElementConsumer yield, uint64_t parentPos, uint64_t l) { - // Recover the `` type parameters of `src`. + // Recover the `` type parameters of `src`. const auto &src = static_cast(this->src); if (l == src.getLvlRank()) { assert(parentPos < src.values.size() && @@ -844,28 +844,28 @@ if (isCompressedDLT(dlt)) { // Look up the bounds of the `l`-level segment determined by the // `(l - 1)`-level position `parentPos`. - const std::vector

&pointersL = src.pointers[l]; - assert(parentPos + 1 < pointersL.size() && - "Parent pointer position is out of bounds"); - const uint64_t pstart = static_cast(pointersL[parentPos]); - const uint64_t pstop = static_cast(pointersL[parentPos + 1]); - // Loop-invariant code for looking up the `l`-level coordinates/indices. - const std::vector &indicesL = src.indices[l]; - assert(pstop <= indicesL.size() && "Index position is out of bounds"); + const std::vector

&positionsL = src.positions[l]; + assert(parentPos + 1 < positionsL.size() && + "Parent position is out of bounds"); + const uint64_t pstart = static_cast(positionsL[parentPos]); + const uint64_t pstop = static_cast(positionsL[parentPos + 1]); + // Loop-invariant code for looking up the `l`-level coordinates. + const std::vector &coordinatesL = src.coordinates[l]; + assert(pstop <= coordinatesL.size() && "Stop position is out of bounds"); for (uint64_t pos = pstart; pos < pstop; ++pos) { - cursorL = static_cast(indicesL[pos]); + cursorL = static_cast(coordinatesL[pos]); forallElements(yield, pos, l + 1); } } else if (isSingletonDLT(dlt)) { - cursorL = src.getIndex(l, parentPos); + cursorL = src.getCrd(l, parentPos); forallElements(yield, parentPos, l + 1); - } else { // Dense dimension. + } else { // Dense level. ASSERT_DENSE_DLT(dlt); const uint64_t sz = src.getLvlSizes()[l]; const uint64_t pstart = parentPos * sz; - for (uint64_t i = 0; i < sz; ++i) { - cursorL = i; - forallElements(yield, pstart + i, l + 1); + for (uint64_t c = 0; c < sz; ++c) { + cursorL = c; + forallElements(yield, pstart + c, l + 1); } } } @@ -913,29 +913,29 @@ assert(enumerator.getTrgRank() == getLvlRank() && "Tensor rank mismatch"); assert(enumerator.getTrgSizes() == lvlSizes && "Tensor size mismatch"); enumerator.forallElements( - [this](const std::vector &ind, V) { add(ind); }); + [this](const std::vector &lvlCoords, V) { add(lvlCoords); }); } /// The type of callback functions which receive an nnz-statistic. using NNZConsumer = const std::function &; - /// Lexicographically enumerates all indicies for levels strictly + /// Lexicographically enumerates all coordinates for levels strictly /// less than `stopLvl`, and passes their nnz statistic to the callback. /// Since our use-case only requires the statistic not the coordinates /// themselves, we do not bother to construct those coordinates. - void forallIndices(uint64_t stopLvl, NNZConsumer yield) const; + void forallCoords(uint64_t stopLvl, NNZConsumer yield) const; private: /// Adds a new element (i.e., increment its statistics). We use /// a method rather than inlining into the lambda in `initialize`, /// to avoid spurious templating over `V`. And this method is private - /// to avoid needing to re-assert validity of `lvlInd` (which is + /// to avoid needing to re-assert validity of `lvlCoords` (which is /// guaranteed by `forallElements`). - void add(const std::vector &lvlInd); + void add(const std::vector &lvlCoords); - /// Recursive component of the public `forallIndices`. - void forallIndices(NNZConsumer yield, uint64_t stopLvl, uint64_t parentPos, - uint64_t l) const; + /// Recursive component of the public `forallCoords`. + void forallCoords(NNZConsumer yield, uint64_t stopLvl, uint64_t parentPos, + uint64_t l) const; // All of these are in the target storage-order. const std::vector &lvlSizes; @@ -944,10 +944,10 @@ }; //===----------------------------------------------------------------------===// -// Definitions of the ctors and factories of `SparseTensorStorage`. +// Definitions of the ctors and factories of `SparseTensorStorage`. -template -SparseTensorStorage *SparseTensorStorage::newFromCOO( +template +SparseTensorStorage *SparseTensorStorage::newFromCOO( uint64_t dimRank, const uint64_t *dimShape, uint64_t lvlRank, const DimLevelType *lvlTypes, const uint64_t *lvl2dim, SparseTensorCOO &lvlCOO) { @@ -966,12 +966,12 @@ "Dimension sizes do not match expected shape"); dimSizes[d] = lvlSizes[l]; } - return new SparseTensorStorage(dimRank, dimSizes.data(), lvlRank, + return new SparseTensorStorage(dimRank, dimSizes.data(), lvlRank, lvlTypes, lvl2dim, lvlCOO); } -template -SparseTensorStorage *SparseTensorStorage::newFromSparseTensor( +template +SparseTensorStorage *SparseTensorStorage::newFromSparseTensor( uint64_t dimRank, const uint64_t *dimShape, uint64_t lvlRank, const uint64_t *lvlSizes, const DimLevelType *lvlTypes, const uint64_t *lvl2dim, uint64_t srcRank, const uint64_t *src2lvl, @@ -989,24 +989,24 @@ #endif SparseTensorEnumeratorBase *lvlEnumerator; source.newEnumerator(&lvlEnumerator, lvlRank, lvlSizes, srcRank, src2lvl); - auto *tensor = new SparseTensorStorage( + auto *tensor = new SparseTensorStorage( dimRank, dimSizes.data(), lvlRank, lvlTypes, lvl2dim, *lvlEnumerator); delete lvlEnumerator; return tensor; } -template -SparseTensorStorage::SparseTensorStorage( +template +SparseTensorStorage::SparseTensorStorage( uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank, const uint64_t *lvlSizes, const DimLevelType *lvlTypes, const uint64_t *lvl2dim, bool initializeValuesIfAllDense) : SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, lvl2dim) { - // Provide hints on capacity of pointers and indices. + // Provide hints on capacity of positions and coordinates. // TODO: needs much fine-tuning based on actual sparsity; currently - // we reserve pointer/index space based on all previous dense - // dimensions, which works well up to first sparse dim; but - // we should really use nnz and dense/sparse distribution. + // we reserve position/coordinate space based on all previous dense + // levels, which works well up to first sparse level; but we should + // really use nnz and dense/sparse distribution. bool allDense = true; uint64_t sz = 1; for (uint64_t l = 0; l < lvlRank; ++l) { @@ -1014,16 +1014,16 @@ if (isCompressedDLT(dlt)) { // TODO: Take a parameter between 1 and `lvlSizes[l]`, and multiply // `sz` by that before reserving. (For now we just use 1.) - pointers[l].reserve(sz + 1); - pointers[l].push_back(0); - indices[l].reserve(sz); + positions[l].reserve(sz + 1); + positions[l].push_back(0); + coordinates[l].reserve(sz); sz = 1; allDense = false; } else if (isSingletonDLT(dlt)) { - indices[l].reserve(sz); + coordinates[l].reserve(sz); sz = 1; allDense = false; - } else { // Dense dimension. + } else { // Dense level. ASSERT_DENSE_DLT(dlt); sz = detail::checkedMul(sz, lvlSizes[l]); } @@ -1032,8 +1032,8 @@ values.resize(sz, 0); } -template -SparseTensorStorage::SparseTensorStorage( // NOLINT +template +SparseTensorStorage::SparseTensorStorage( // NOLINT uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank, const DimLevelType *lvlTypes, const uint64_t *lvl2dim, SparseTensorCOO &lvlCOO) @@ -1046,13 +1046,13 @@ lvlCOO.sort(); // Now actually insert the `elements`. const auto &elements = lvlCOO.getElements(); - uint64_t nnz = elements.size(); - values.reserve(nnz); - fromCOO(elements, 0, nnz, 0); + const uint64_t nse = elements.size(); + values.reserve(nse); + fromCOO(elements, 0, nse, 0); } -template -SparseTensorStorage::SparseTensorStorage( +template +SparseTensorStorage::SparseTensorStorage( uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank, const DimLevelType *lvlTypes, const uint64_t *lvl2dim, SparseTensorEnumeratorBase &lvlEnumerator) @@ -1064,64 +1064,64 @@ // Initialize the statistics structure. SparseTensorNNZ nnz(getLvlSizes(), getLvlTypes()); nnz.initialize(lvlEnumerator); - // Initialize "pointers" overhead (and allocate "indices", "values"). + // Initialize "positions" overhead (and allocate "coordinates", "values"). uint64_t parentSz = 1; // assembled-size of the `(l - 1)`-level. for (uint64_t l = 0; l < lvlRank; ++l) { const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking. if (isCompressedDLT(dlt)) { - pointers[l].reserve(parentSz + 1); - pointers[l].push_back(0); + positions[l].reserve(parentSz + 1); + positions[l].push_back(0); uint64_t currentPos = 0; - nnz.forallIndices(l, [this, ¤tPos, l](uint64_t n) { + nnz.forallCoords(l, [this, ¤tPos, l](uint64_t n) { currentPos += n; - appendPointer(l, currentPos); + appendPos(l, currentPos); }); - assert(pointers[l].size() == parentSz + 1 && - "Final pointers size doesn't match allocated size"); + assert(positions[l].size() == parentSz + 1 && + "Final positions size doesn't match allocated size"); // That assertion entails `assembledSize(parentSz, l)` - // is now in a valid state. That is, `pointers[l][parentSz]` + // is now in a valid state. That is, `positions[l][parentSz]` // equals the present value of `currentPos`, which is the - // correct assembled-size for `indices[l]`. + // correct assembled-size for `coordinates[l]`. } // Update assembled-size for the next iteration. parentSz = assembledSize(parentSz, l); - // Ideally we need only `indices[l].reserve(parentSz)`, however + // Ideally we need only `coordinates[l].reserve(parentSz)`, however // the `std::vector` implementation forces us to initialize it too. // That is, in the yieldPos loop we need random-access assignment - // to `indices[l]`; however, `std::vector`'s subscript-assignment + // to `coordinates[l]`; however, `std::vector`'s subscript-assignment // only allows assigning to already-initialized positions. if (isCompressedDLT(dlt) || isSingletonDLT(dlt)) - indices[l].resize(parentSz, 0); + coordinates[l].resize(parentSz, 0); else ASSERT_DENSE_DLT(dlt); // Future-proofing. } values.resize(parentSz, 0); // Both allocate and zero-initialize. } // The yieldPos loop - lvlEnumerator.forallElements([this](const auto &lvlInd, V val) { + lvlEnumerator.forallElements([this](const auto &lvlCoords, V val) { uint64_t parentSz = 1, parentPos = 0; for (uint64_t lvlRank = getLvlRank(), l = 0; l < lvlRank; ++l) { const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking. if (isCompressedDLT(dlt)) { // If `parentPos == parentSz` then it's valid as an array-lookup; // however, it's semantically invalid here since that entry - // does not represent a segment of `indices[l]`. Moreover, that + // does not represent a segment of `coordinates[l]`. Moreover, that // entry must be immutable for `assembledSize` to remain valid. - assert(parentPos < parentSz && "Pointers position is out of bounds"); - const uint64_t currentPos = pointers[l][parentPos]; + assert(parentPos < parentSz && "Parent position is out of bounds"); + const uint64_t currentPos = positions[l][parentPos]; // This increment won't overflow the `P` type, since it can't - // exceed the original value of `pointers[l][parentPos+1]` + // exceed the original value of `positions[l][parentPos+1]` // which was already verified to be within bounds for `P` // when it was written to the array. - pointers[l][parentPos]++; - writeIndex(l, currentPos, lvlInd[l]); + positions[l][parentPos]++; + writeCrd(l, currentPos, lvlCoords[l]); parentPos = currentPos; } else if (isSingletonDLT(dlt)) { - writeIndex(l, parentPos, lvlInd[l]); + writeCrd(l, parentPos, lvlCoords[l]); // the new parentPos equals the old parentPos. - } else { // Dense dimension. + } else { // Dense level. ASSERT_DENSE_DLT(dlt); - parentPos = parentPos * getLvlSizes()[l] + lvlInd[l]; + parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l]; } parentSz = assembledSize(parentSz, l); } @@ -1132,17 +1132,17 @@ for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) { const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking. if (isCompressedDLT(dlt)) { - assert(parentSz == pointers[l].size() - 1 && - "Actual pointers size doesn't match the expected size"); + assert(parentSz == positions[l].size() - 1 && + "Actual positions size doesn't match the expected size"); // Can't check all of them, but at least we can check the last one. - assert(pointers[l][parentSz - 1] == pointers[l][parentSz] && - "Pointers got corrupted"); + assert(positions[l][parentSz - 1] == positions[l][parentSz] && + "Positions got corrupted"); // TODO: optimize this by using `memmove` or similar. for (uint64_t n = 0; n < parentSz; ++n) { const uint64_t parentPos = parentSz - n; - pointers[l][parentPos] = pointers[l][parentPos - 1]; + positions[l][parentPos] = positions[l][parentPos - 1]; } - pointers[l][0] = 0; + positions[l][0] = 0; } else { // Both dense and singleton are no-ops for the finalizeYieldPos loop. // This assertion is for future-proofing. diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h @@ -61,8 +61,8 @@ StridedMemRefType *lvlSizesRef, StridedMemRefType *lvlTypesRef, StridedMemRefType *lvl2dimRef, - StridedMemRefType *dim2lvlRef, OverheadType ptrTp, - OverheadType indTp, PrimaryType valTp, Action action, void *ptr); + StridedMemRefType *dim2lvlRef, OverheadType posTp, + OverheadType crdTp, PrimaryType valTp, Action action, void *ptr); // TODO: document what all the arguments are/mean for the functions below, // especially with regards to "dim"-vs-"lvl" and mappings/permutations. @@ -74,43 +74,46 @@ MLIR_SPARSETENSOR_FOREVERY_V(DECL_SPARSEVALUES) #undef DECL_SPARSEVALUES -/// Tensor-storage method to obtain direct access to the pointers array -/// for the given dimension. -#define DECL_SPARSEPOINTERS(PNAME, P) \ - MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparsePointers##PNAME( \ - StridedMemRefType *out, void *tensor, index_type d); -MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEPOINTERS) -#undef DECL_SPARSEPOINTERS - -/// Tensor-storage method to obtain direct access to the indices array -/// for the given dimension. -#define DECL_SPARSEINDICES(INAME, I) \ - MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparseIndices##INAME( \ - StridedMemRefType *out, void *tensor, index_type d); -MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEINDICES) -#undef DECL_SPARSEINDICES +/// Tensor-storage method to obtain direct access to the positions array +/// for the given level. +#define DECL_SPARSEPOSITIONS(PNAME, P) \ + MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparsePositions##PNAME( \ + StridedMemRefType *out, void *tensor, index_type lvl); +MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEPOSITIONS) +#undef DECL_SPARSEPOSITIONS + +/// Tensor-storage method to obtain direct access to the coordinates array +/// for the given level. +#define DECL_SPARSECOORDINATES(CNAME, C) \ + MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparseCoordinates##CNAME( \ + StridedMemRefType *out, void *tensor, index_type lvl); +MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES) +#undef DECL_SPARSECOORDINATES /// Coordinate-scheme method for adding a new element. #define DECL_ADDELT(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_addElt##VNAME( \ - void *coo, StridedMemRefType *vref, \ - StridedMemRefType *iref, \ - StridedMemRefType *pref); + void *lvlCOO, StridedMemRefType *vref, \ + StridedMemRefType *dimCoordsRef, \ + StridedMemRefType *dim2lvlRef); MLIR_SPARSETENSOR_FOREVERY_V(DECL_ADDELT) #undef DECL_ADDELT /// Coordinate-scheme method for getting the next element while iterating. +/// The `cref` argument uses the same coordinate-space as the `iter` (which +/// can be either dim- or lvl-coords, depending on context). #define DECL_GETNEXT(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT bool _mlir_ciface_getNext##VNAME( \ - void *coo, StridedMemRefType *iref, \ + void *iter, StridedMemRefType *cref, \ StridedMemRefType *vref); MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT) #undef DECL_GETNEXT -/// Tensor-storage method to insert elements in lexicographical index order. +/// Tensor-storage method to insert elements in lexicographical +/// level-coordinate order. #define DECL_LEXINSERT(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_lexInsert##VNAME( \ - void *tensor, StridedMemRefType *cref, \ + void *tensor, StridedMemRefType *lvlCoordsRef, \ StridedMemRefType *vref); MLIR_SPARSETENSOR_FOREVERY_V(DECL_LEXINSERT) #undef DECL_LEXINSERT @@ -118,7 +121,7 @@ /// Tensor-storage method to insert using expansion. #define DECL_EXPINSERT(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_expInsert##VNAME( \ - void *tensor, StridedMemRefType *cref, \ + void *tensor, StridedMemRefType *lvlCoordsRef, \ StridedMemRefType *vref, StridedMemRefType *fref, \ StridedMemRefType *aref, index_type count); MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT) @@ -181,9 +184,10 @@ /// nse: number of specified elements (usually the nonzeros) /// shape: array with dimension size for each rank /// values: a "nse" array with values for all specified elements -/// indices: a flat "nse * rank" array with indices for all specified elements -/// perm: the permutation of the dimensions in the storage -/// sparse: the sparsity for the dimensions +/// coordinates: a flat "nse * rank" array with coordinates for all +/// specified elements +/// perm: the permutation of the levels in the storage +/// sparse: the sparsity for the levels /// /// For example, the sparse matrix /// | 1.0 0.0 0.0 | @@ -193,11 +197,11 @@ /// nse = 3 /// shape = [2, 3] /// values = [1.0, 5.0, 3.0] -/// indices = [ 0, 0, 1, 1, 1, 2] +/// coordinates = [ 0, 0, 1, 1, 1, 2] #define DECL_CONVERTTOMLIRSPARSETENSOR(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void *convertToMLIRSparseTensor##VNAME( \ - uint64_t rank, uint64_t nse, uint64_t *shape, V *values, \ - uint64_t *indices, uint64_t *perm, uint8_t *sparse); + uint64_t rank, uint64_t nse, uint64_t *dimSizes, V *values, \ + uint64_t *dimCoordinates, uint64_t *dim2lvl, uint8_t *lvlTypes); MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR) #undef DECL_CONVERTTOMLIRSPARSETENSOR @@ -207,16 +211,17 @@ /// /// rank: rank of tensor /// nse: number of specified elements (usually the nonzeros) -/// shape: array with dimension size for each rank +/// shape: array with size for each dimension /// values: a "nse" array with values for all specified elements -/// indices: a flat "nse * rank" array with indices for all specified elements +/// coordinates: a flat "nse * rank" array with coordinates for all +/// specified elements /// -/// The input is a pointer to `SparseTensorStorage`, typically +/// The input is a pointer to `SparseTensorStorage`, typically /// returned from `convertToMLIRSparseTensor`. #define DECL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void convertFromMLIRSparseTensor##VNAME( \ void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \ - V **pValues, uint64_t **pIndices); + V **pValues, uint64_t **pCoordinates); MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR) #undef DECL_CONVERTFROMMLIRSPARSETENSOR @@ -242,8 +247,8 @@ void *p, StridedMemRefType *lvlSizesRef, StridedMemRefType *lvlTypesRef, StridedMemRefType *lvl2dimRef, - StridedMemRefType *dim2lvlRef, OverheadType ptrTp, - OverheadType indTp, PrimaryType valTp); + StridedMemRefType *dim2lvlRef, OverheadType posTp, + OverheadType crdTp, PrimaryType valTp); /// Returns the rank of the sparse tensor being read. MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderRank(void *p); @@ -251,8 +256,8 @@ /// Returns the is_symmetric bit for the sparse tensor being read. MLIR_CRUNNERUTILS_EXPORT bool getSparseTensorReaderIsSymmetric(void *p); -/// Returns the number of non-zero values for the sparse tensor being read. -MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderNNZ(void *p); +/// Returns the number of stored elements for the sparse tensor being read. +MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderNSE(void *p); /// Returns the size of a dimension for the sparse tensor being read. MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderDimSize(void *p, @@ -278,7 +283,7 @@ /// Returns the next element for the sparse tensor being read. #define DECL_GETNEXT(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_getSparseTensorReaderNext##VNAME( \ - void *p, StridedMemRefType *iref, \ + void *p, StridedMemRefType *dimCoordsRef, \ StridedMemRefType *vref); MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT) #undef DECL_GETNEXT @@ -306,15 +311,16 @@ /// SparseTensorWriter. MLIR_CRUNNERUTILS_EXPORT void delSparseTensorWriter(void *p); -/// Outputs the sparse tensor rank, nnz and shape. +/// Outputs the sparse tensor dim-rank, nse, and dim-shape. MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_outSparseTensorWriterMetaData( - void *p, index_type rank, index_type nnz, - StridedMemRefType *dref); + void *p, index_type dimRank, index_type nse, + StridedMemRefType *dimSizesRef); /// Outputs an element for the sparse tensor. #define DECL_OUTNEXT(VNAME, V) \ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_outSparseTensorWriterNext##VNAME( \ - void *p, index_type rank, StridedMemRefType *iref, \ + void *p, index_type dimRank, \ + StridedMemRefType *dimCoordsRef, \ StridedMemRefType *vref); MLIR_SPARSETENSOR_FOREVERY_V(DECL_OUTNEXT) #undef DECL_OUTNEXT diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp --- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp +++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp @@ -35,27 +35,27 @@ [](py::object cls, std::vector dimLevelTypes, std::optional dimOrdering, - std::optional higherOrdering, int pointerBitWidth, - int indexBitWidth, MlirContext context) { + std::optional higherOrdering, int posWidth, + int crdWidth, MlirContext context) { return cls(mlirSparseTensorEncodingAttrGet( context, dimLevelTypes.size(), dimLevelTypes.data(), dimOrdering ? *dimOrdering : MlirAffineMap{nullptr}, higherOrdering ? *higherOrdering : MlirAffineMap{nullptr}, - pointerBitWidth, indexBitWidth)); + posWidth, crdWidth)); }, py::arg("cls"), py::arg("dim_level_types"), py::arg("dim_ordering"), - py::arg("higher_ordering"), py::arg("pointer_bit_width"), - py::arg("index_bit_width"), py::arg("context") = py::none(), + py::arg("higher_ordering"), py::arg("pos_width"), + py::arg("crd_width"), py::arg("context") = py::none(), "Gets a sparse_tensor.encoding from parameters.") .def_property_readonly( "dim_level_types", [](MlirAttribute self) { + const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self); std::vector ret; - for (int i = 0, - e = mlirSparseTensorEncodingGetNumDimLevelTypes(self); - i < e; ++i) + ret.reserve(lvlRank); + for (int l = 0; l < lvlRank; ++l) ret.push_back( - mlirSparseTensorEncodingAttrGetDimLevelType(self, i)); + mlirSparseTensorEncodingAttrGetDimLevelType(self, l)); return ret; }) .def_property_readonly( @@ -76,14 +76,10 @@ return {}; return ret; }) - .def_property_readonly( - "pointer_bit_width", - [](MlirAttribute self) { - return mlirSparseTensorEncodingAttrGetPointerBitWidth(self); - }) - .def_property_readonly("index_bit_width", [](MlirAttribute self) { - return mlirSparseTensorEncodingAttrGetIndexBitWidth(self); - }); + .def_property_readonly("pos_width", + mlirSparseTensorEncodingAttrGetPosWidth) + .def_property_readonly("crd_width", + mlirSparseTensorEncodingAttrGetCrdWidth); } PYBIND11_MODULE(_mlirDialectsSparseTensor, m) { diff --git a/mlir/lib/CAPI/Dialect/SparseTensor.cpp b/mlir/lib/CAPI/Dialect/SparseTensor.cpp --- a/mlir/lib/CAPI/Dialect/SparseTensor.cpp +++ b/mlir/lib/CAPI/Dialect/SparseTensor.cpp @@ -46,17 +46,17 @@ } MlirAttribute mlirSparseTensorEncodingAttrGet( - MlirContext ctx, intptr_t numDimLevelTypes, + MlirContext ctx, intptr_t lvlRank, MlirSparseTensorDimLevelType const *dimLevelTypes, - MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, - int pointerBitWidth, int indexBitWidth) { + MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, int posWidth, + int crdWidth) { SmallVector cppDimLevelTypes; - cppDimLevelTypes.resize(numDimLevelTypes); - for (intptr_t i = 0; i < numDimLevelTypes; ++i) - cppDimLevelTypes[i] = static_cast(dimLevelTypes[i]); + cppDimLevelTypes.reserve(lvlRank); + for (intptr_t l = 0; l < lvlRank; ++l) + cppDimLevelTypes.push_back(static_cast(dimLevelTypes[l])); return wrap(SparseTensorEncodingAttr::get( unwrap(ctx), cppDimLevelTypes, unwrap(dimOrdering), - unwrap(higherOrdering), pointerBitWidth, indexBitWidth)); + unwrap(higherOrdering), posWidth, crdWidth)); } MlirAffineMap mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr) { @@ -69,7 +69,7 @@ unwrap(attr).cast().getHigherOrdering()); } -intptr_t mlirSparseTensorEncodingGetNumDimLevelTypes(MlirAttribute attr) { +intptr_t mlirSparseTensorEncodingGetLvlRank(MlirAttribute attr) { return unwrap(attr).cast().getLvlRank(); } @@ -79,10 +79,10 @@ unwrap(attr).cast().getLvlType(lvl)); } -int mlirSparseTensorEncodingAttrGetPointerBitWidth(MlirAttribute attr) { - return unwrap(attr).cast().getPointerBitWidth(); +int mlirSparseTensorEncodingAttrGetPosWidth(MlirAttribute attr) { + return unwrap(attr).cast().getPosWidth(); } -int mlirSparseTensorEncodingAttrGetIndexBitWidth(MlirAttribute attr) { - return unwrap(attr).cast().getIndexBitWidth(); +int mlirSparseTensorEncodingAttrGetCrdWidth(MlirAttribute attr) { + return unwrap(attr).cast().getCrdWidth(); } diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -121,18 +121,18 @@ return IndexType::get(ctx); } -Type SparseTensorEncodingAttr::getPointerType() const { - return detail::getIntegerOrIndexType(getContext(), getPointerBitWidth()); +Type SparseTensorEncodingAttr::getPosType() const { + return detail::getIntegerOrIndexType(getContext(), getPosWidth()); } -Type SparseTensorEncodingAttr::getIndexType() const { - return detail::getIntegerOrIndexType(getContext(), getIndexBitWidth()); +Type SparseTensorEncodingAttr::getCrdType() const { + return detail::getIntegerOrIndexType(getContext(), getCrdWidth()); } SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutOrdering() const { - return SparseTensorEncodingAttr::get( - getContext(), getDimLevelType(), AffineMap(), AffineMap(), - getPointerBitWidth(), getIndexBitWidth()); + return SparseTensorEncodingAttr::get(getContext(), getDimLevelType(), + AffineMap(), AffineMap(), getPosWidth(), + getCrdWidth()); } SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutBitWidths() const { @@ -227,18 +227,18 @@ RETURN_ON_FAIL(parser.parseLBrace()) // Process the data from the parsed dictionary value into struct-like data. - SmallVector dlt; + SmallVector lvlTypes; SmallVector slices; AffineMap dimOrd = {}; AffineMap higherOrd = {}; - unsigned ptr = 0; - unsigned ind = 0; + unsigned posWidth = 0; + unsigned crdWidth = 0; StringRef attrName; // Exactly 6 keys. SmallVector keys = {"dimLevelType", "dimOrdering", - "higherOrdering", "pointerBitWidth", - "indexBitWidth", "slice"}; + "higherOrdering", "posWidth", + "crdWidth", "slice"}; while (succeeded(parser.parseOptionalKeyword(&attrName))) { if (!llvm::is_contained(keys, attrName)) { parser.emitError(parser.getNameLoc(), "unexpected key: ") << attrName; @@ -247,6 +247,10 @@ // Consume the `=` after keys RETURN_ON_FAIL(parser.parseEqual()) + // FIXME: using `operator==` below duplicates the string comparison + // cost of the `is_contained` check above. Should instead use some + // "find" function that returns the index into `keys` so that we can + // dispatch on that instead. if (attrName == "dimLevelType") { Attribute attr; RETURN_ON_FAIL(parser.parseAttribute(attr)); @@ -257,7 +261,7 @@ ERROR_IF(!strAttr, "expected a string value in dimension level types") auto strVal = strAttr.getValue(); if (auto optDLT = parseDLT(strVal)) { - dlt.push_back(optDLT.value()); + lvlTypes.push_back(optDLT.value()); } else { parser.emitError(parser.getNameLoc(), "unexpected dimension level type: ") @@ -277,18 +281,18 @@ auto affineAttr = attr.dyn_cast(); ERROR_IF(!affineAttr, "expected an affine map for higher ordering") higherOrd = affineAttr.getValue(); - } else if (attrName == "pointerBitWidth") { + } else if (attrName == "posWidth") { Attribute attr; RETURN_ON_FAIL(parser.parseAttribute(attr)) auto intAttr = attr.dyn_cast(); - ERROR_IF(!intAttr, "expected an integral pointer bitwidth") - ptr = intAttr.getInt(); - } else if (attrName == "indexBitWidth") { + ERROR_IF(!intAttr, "expected an integral position bitwidth") + posWidth = intAttr.getInt(); + } else if (attrName == "crdWidth") { Attribute attr; RETURN_ON_FAIL(parser.parseAttribute(attr)) auto intAttr = attr.dyn_cast(); ERROR_IF(!intAttr, "expected an integral index bitwidth") - ind = intAttr.getInt(); + crdWidth = intAttr.getInt(); } else if (attrName == "slice") { RETURN_ON_FAIL(parser.parseLSquare()) // Dispatches to DimSliceAttr to skip mnemonic @@ -319,7 +323,8 @@ // Construct struct-like storage for attribute. return parser.getChecked( - parser.getContext(), dlt, dimOrd, higherOrd, ptr, ind, slices); + parser.getContext(), lvlTypes, dimOrd, higherOrd, posWidth, crdWidth, + slices); } void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { @@ -334,10 +339,10 @@ printer << ", dimOrdering = affine_map<" << getDimOrdering() << ">"; if (getHigherOrdering()) printer << ", higherOrdering = affine_map<" << getHigherOrdering() << ">"; - if (getPointerBitWidth()) - printer << ", pointerBitWidth = " << getPointerBitWidth(); - if (getIndexBitWidth()) - printer << ", indexBitWidth = " << getIndexBitWidth(); + if (getPosWidth()) + printer << ", posWidth = " << getPosWidth(); + if (getCrdWidth()) + printer << ", crdWidth = " << getCrdWidth(); if (!getDimSlices().empty()) { printer << ", slice = [ "; llvm::interleaveComma(getDimSlices(), printer, @@ -355,12 +360,12 @@ LogicalResult SparseTensorEncodingAttr::verify( function_ref emitError, ArrayRef dimLevelType, AffineMap dimOrdering, - AffineMap higherOrdering, unsigned pointerBitWidth, unsigned indexBitWidth, + AffineMap higherOrdering, unsigned posWidth, unsigned crdWidth, ArrayRef dimSlices) { - if (!acceptBitWidth(pointerBitWidth)) - return emitError() << "unexpected pointer bitwidth: " << pointerBitWidth; - if (!acceptBitWidth(indexBitWidth)) - return emitError() << "unexpected index bitwidth: " << indexBitWidth; + if (!acceptBitWidth(posWidth)) + return emitError() << "unexpected position bitwidth: " << posWidth; + if (!acceptBitWidth(crdWidth)) + return emitError() << "unexpected coordinate bitwidth: " << crdWidth; // Before we can check that the level-rank is consistent/coherent // across all fields, we need to define it. The source-of-truth for // the `getLvlRank` method is the length of the level-types array, @@ -403,9 +408,9 @@ function_ref emitError) const { // Check structural integrity. In particular, this ensures that the // level-rank is coherent across all the fields. - RETURN_FAILURE_IF_FAILED(verify( - emitError, getDimLevelType(), getDimOrdering(), getHigherOrdering(), - getPointerBitWidth(), getIndexBitWidth(), getDimSlices())) + RETURN_FAILURE_IF_FAILED(verify(emitError, getDimLevelType(), + getDimOrdering(), getHigherOrdering(), + getPosWidth(), getCrdWidth(), getDimSlices())) // Check integrity with tensor type specifics. In particular, we // need only check that the dimension-rank of the tensor agrees with // the dimension-rank of the encoding. @@ -498,11 +503,10 @@ // TODO: Maybe pick the bitwidth based on input/output tensors (probably the // largest one among them) in the original operation instead of using the // default value. - unsigned pointerBitWidth = src.getPointerBitWidth(); - unsigned indexBitWidth = src.getIndexBitWidth(); + unsigned posWidth = src.getPosWidth(); + unsigned crdWidth = src.getCrdWidth(); auto enc = SparseTensorEncodingAttr::get(src.getContext(), lvlTypes, lvlPerm, - AffineMap(), pointerBitWidth, - indexBitWidth); + AffineMap(), posWidth, crdWidth); return RankedTensorType::get(src.getDimShape(), src.getElementType(), enc); } @@ -577,8 +581,8 @@ enc.getContext(), dlts, AffineMap(), // dimOrdering (irrelavant to storage speicifer) AffineMap(), // highLvlOrdering (irrelavant to storage specifer) - // Always use index for memSize, dimSize instead of reusing - // getBitwidth from pointers/indices. + // Always use `index` for memSize and lvlSize instead of reusing + // `getPosWidth`/`getCrdWidth`. // It allows us to reuse the same SSA value for different bitwidth, // It also avoids casting between index/integer (returned by DimOp) 0, 0, @@ -596,17 +600,17 @@ // SparseTensorDialect Operations. //===----------------------------------------------------------------------===// -static LogicalResult dimIsInBounds(Dimension dim, Value tensor) { - return success(dim < getDimRank(tensor)); +static LogicalResult lvlIsInBounds(Level lvl, Value tensor) { + return success(lvl < getSparseTensorType(tensor).getLvlRank()); } -static LogicalResult isMatchingWidth(Value result, unsigned width) { - const Type etp = getMemRefType(result).getElementType(); +static LogicalResult isMatchingWidth(Value mem, unsigned width) { + const Type etp = getMemRefType(mem).getElementType(); return success(width == 0 ? etp.isIndex() : etp.isInteger(width)); } static LogicalResult verifySparsifierGetterSetter( - StorageSpecifierKind mdKind, std::optional lvl, + StorageSpecifierKind mdKind, std::optional lvl, TypedValue md, Operation *op) { if (mdKind == StorageSpecifierKind::ValMemSize && lvl) { return op->emitError( @@ -620,58 +624,72 @@ if (!lvl) return op->emitError("missing level argument"); - const Level l = lvl.value().getZExtValue(); + const Level l = lvl.value(); if (l >= lvlRank) - return op->emitError("requested level out of bound"); + return op->emitError("requested level is out of bounds"); - if (mdKind == StorageSpecifierKind::PtrMemSize && enc.isSingletonLvl(l)) + if (mdKind == StorageSpecifierKind::PosMemSize && enc.isSingletonLvl(l)) return op->emitError( - "requested pointer memory size on a singleton level"); + "requested position memory size on a singleton level"); } return success(); } -static LogicalResult verifyPackUnPack(Operation *op, TensorType cooTp, - TensorType dataTp, TensorType idxTp) { - if (!isUniqueCOOType(cooTp)) - return op->emitError("must operate on a COO tensor"); - - auto enc = getSparseTensorEncoding(cooTp); - if (idxTp.getElementType() != enc.getIndexType() || - dataTp.getElementType() != cooTp.getElementType()) - return op->emitError("unmatched type between input and output"); - - auto dNOE = dataTp.getShape()[0]; - auto iNOE = idxTp.getShape()[0]; - if (!ShapedType::isDynamic(dNOE) && !ShapedType::isDynamic(iNOE) && - dNOE != iNOE) - return op->emitError("unmatched number of elements in data and indices"); - - // A tensor for indices means the input COO is rank N - auto inRank = idxTp.getShape()[1]; - auto ouRank = cooTp.getRank(); - if (!ShapedType::isDynamic(inRank) && inRank != ouRank) - return op->emitError("unmatched rank between input and output"); +static LogicalResult verifyPackUnPack(Operation *op, bool requiresStaticShape, + SparseTensorType tensorTp, + RankedTensorType valuesTp, + RankedTensorType coordinatesTp) { + if (requiresStaticShape && !tensorTp.hasStaticDimShape()) + return op->emitError("the sparse-tensor must have static shape"); + if (!tensorTp.hasEncoding()) + return op->emitError("the sparse-tensor must have an encoding attribute"); + if (!tensorTp.isIdentity()) + return op->emitError("the sparse-tensor must have the identity mapping"); + if (!isUniqueCOOType(tensorTp)) + return op->emitError("the sparse-tensor must have a COO type"); + + if (coordinatesTp.getRank() != 2) + return op->emitError("coordinates must have rank 2"); + if (requiresStaticShape && !coordinatesTp.hasStaticShape()) + return op->emitError("coordinates must have static shape"); + if (coordinatesTp.getElementType() != tensorTp.getCrdType()) + return op->emitError("input/output coordinate-types don't match"); + + if (valuesTp.getRank() != 1) + return op->emitError("values must have rank 1"); + if (requiresStaticShape && !valuesTp.hasStaticShape()) + return op->emitError("values must have static shape"); + if (valuesTp.getElementType() != tensorTp.getElementType()) + return op->emitError("input/output element-types don't match"); + + const auto valuesNSE = valuesTp.getShape()[0]; + const auto coordsNSE = coordinatesTp.getShape()[0]; + if (!ShapedType::isDynamic(valuesNSE) && !ShapedType::isDynamic(coordsNSE) && + valuesNSE != coordsNSE) + return op->emitError("values/coordinates number-of-elements don't match"); + + // NOTE: We use `getLvlRank` because the `coordinatesTp` is for + // level-coordinates (cf., the op documentation). + const auto coordsRank = coordinatesTp.getShape()[1]; + const auto tensorRank = tensorTp.getLvlRank(); + if (!ShapedType::isDynamic(coordsRank) && coordsRank != tensorRank) + return op->emitError("input/output level-ranks don't match"); return success(); } LogicalResult PackOp::verify() { - TensorType dataTp = getData().getType(), idxTp = getIndices().getType(); - TensorType retTp = getResult().getType(); - - if (!retTp.hasStaticShape() || !dataTp.hasStaticShape() || - !idxTp.hasStaticShape()) - return emitError("all input types must be statically shaped"); - - return verifyPackUnPack(*this, retTp, dataTp, idxTp); + const auto valuesTp = getRankedTensorType(getValues()); + const auto coordinatesTp = getRankedTensorType(getCoordinates()); + const auto resTp = getSparseTensorType(getResult()); + return verifyPackUnPack(*this, true, resTp, valuesTp, coordinatesTp); } LogicalResult UnpackOp::verify() { - TensorType dataTp = getData().getType(), idxTp = getIndices().getType(); - TensorType srcTp = getTensor().getType(); - - return verifyPackUnPack(*this, srcTp, dataTp, idxTp); + const auto valuesTp = getRankedTensorType(getValues()); + const auto coordinatesTp = getRankedTensorType(getCoordinates()); + const auto srcTp = getSparseTensorType(getTensor()); + return verifyPackUnPack(*this, false, srcTp, valuesTp, coordinatesTp); } LogicalResult ConvertOp::verify() { @@ -704,27 +722,25 @@ return {}; } -LogicalResult ToPointersOp::verify() { +LogicalResult ToPositionsOp::verify() { auto e = getSparseTensorEncoding(getTensor().getType()); - // FIXME: there seems to be some dim/lvl confusion here. - if (failed(dimIsInBounds(getDimension().getZExtValue(), getTensor()))) - return emitError("requested pointers dimension out of bounds"); - if (failed(isMatchingWidth(getResult(), e.getPointerBitWidth()))) - return emitError("unexpected type for pointers"); + if (failed(lvlIsInBounds(getLevel(), getTensor()))) + return emitError("requested level is out of bounds"); + if (failed(isMatchingWidth(getResult(), e.getPosWidth()))) + return emitError("unexpected type for positions"); return success(); } -LogicalResult ToIndicesOp::verify() { +LogicalResult ToCoordinatesOp::verify() { auto e = getSparseTensorEncoding(getTensor().getType()); - // FIXME: there seems to be some dim/lvl confusion here. - if (failed(dimIsInBounds(getDimension().getZExtValue(), getTensor()))) - return emitError("requested indices dimension out of bounds"); - if (failed(isMatchingWidth(getResult(), e.getIndexBitWidth()))) - return emitError("unexpected type for indices"); + if (failed(lvlIsInBounds(getLevel(), getTensor()))) + return emitError("requested level is out of bounds"); + if (failed(isMatchingWidth(getResult(), e.getCrdWidth()))) + return emitError("unexpected type for coordinates"); return success(); } -LogicalResult ToIndicesBufferOp::verify() { +LogicalResult ToCoordinatesBufferOp::verify() { auto e = getSparseTensorEncoding(getTensor().getType()); if (getCOOStart(e) >= e.getLvlRank()) return emitError("expected sparse tensor with a COO region"); @@ -755,7 +771,7 @@ LogicalResult GetStorageSpecifierOp::verify() { RETURN_FAILURE_IF_FAILED(verifySparsifierGetterSetter( - getSpecifierKind(), getDim(), getSpecifier(), getOperation())) + getSpecifierKind(), getLevel(), getSpecifier(), getOperation())) return success(); } @@ -765,17 +781,17 @@ } OpFoldResult GetStorageSpecifierOp::fold(FoldAdaptor adaptor) { - StorageSpecifierKind kind = getSpecifierKind(); - std::optional dim = getDim(); + const StorageSpecifierKind kind = getSpecifierKind(); + const auto lvl = getLevel(); for (auto op = getSpecifierSetDef(*this); op; op = getSpecifierSetDef(op)) - if (kind == op.getSpecifierKind() && dim == op.getDim()) + if (kind == op.getSpecifierKind() && lvl == op.getLevel()) return op.getValue(); return {}; } LogicalResult SetStorageSpecifierOp::verify() { RETURN_FAILURE_IF_FAILED(verifySparsifierGetterSetter( - getSpecifierKind(), getDim(), getSpecifier(), getOperation())) + getSpecifierKind(), getLevel(), getSpecifier(), getOperation())) return success(); } @@ -865,7 +881,7 @@ LogicalResult ConcatenateOp::verify() { const auto dstTp = getSparseTensorType(*this); - const Dimension concatDim = getDimension().getZExtValue(); + const Dimension concatDim = getDimension(); const Dimension dimRank = dstTp.getDimRank(); if (getInputs().size() <= 1) @@ -922,8 +938,9 @@ } LogicalResult InsertOp::verify() { - if (getDimRank(getTensor()) != static_cast(getIndices().size())) - return emitOpError("incorrect number of indices"); + const auto stt = getSparseTensorType(getTensor()); + if (stt.getLvlRank() != static_cast(getLvlCoords().size())) + return emitOpError("incorrect number of coordinates"); return success(); } @@ -942,9 +959,9 @@ } LogicalResult CompressOp::verify() { - if (getDimRank(getTensor()) != - 1 + static_cast(getIndices().size())) - return emitOpError("incorrect number of indices"); + const auto stt = getSparseTensorType(getTensor()); + if (stt.getLvlRank() != 1 + static_cast(getLvlCoords().size())) + return emitOpError("incorrect number of coordinates"); return success(); } @@ -960,7 +977,7 @@ const auto stt = getSparseTensorType(tensor); const Dimension dimRank = stt.getDimRank(); - // Starts with `dimRank`-many indices. + // Starts with `dimRank`-many coordinates. SmallVector blockArgTypes(dimRank, builder.getIndexType()); // Followed by one value. blockArgTypes.push_back(stt.getElementType()); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -139,8 +139,8 @@ const AnalysisState &state) const { assert(op->getNumResults() == 1); assert(isUniqueCOOType(op->getResultTypes()[0].cast())); - // PackOp reuses the input tensors as data/indices instead of creating new - // ones when packing into a COO format. + // PackOp reuses the input tensors as values/coordinates instead of + // creating new ones when packing into a COO format. return {{op->getOpResult(0), BufferRelation::Equivalent}}; } @@ -171,7 +171,7 @@ AliasingOpResultList getAliasingOpResults(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { - // Conceptually, UnpackOp equals to a list of toIndices/toValueOp + // Conceptually, UnpackOp equals to a list of toCoordinates/toValueOp return {}; } }; @@ -217,9 +217,10 @@ } }; -struct ToIndicesBufferOpInterface +struct ToCoordinatesBufferOpInterface : public BufferizableOpInterface::ExternalModel< - ToIndicesBufferOpInterface, sparse_tensor::ToIndicesBufferOp> { + ToCoordinatesBufferOpInterface, + sparse_tensor::ToCoordinatesBufferOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { return true; @@ -227,8 +228,8 @@ bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { - // Potential writes into memory through the result of sparse_tensor.indices - // are not considered. + // Potential writes into memory through the result of + // `sparse_tensor.coordinates` are not considered. return false; } @@ -238,9 +239,9 @@ } }; -struct ToIndicesOpInterface +struct ToCoordinatesOpInterface : public BufferizableOpInterface::ExternalModel< - ToIndicesOpInterface, sparse_tensor::ToIndicesOp> { + ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { return true; @@ -248,8 +249,8 @@ bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { - // Potential writes into memory through the result of sparse_tensor.indices - // are not considered. + // Potential writes into memory through the result of + // `sparse_tensor.coordinates` are not considered. return false; } @@ -259,9 +260,9 @@ } }; -struct ToPointersOpInterface +struct ToPositionsOpInterface : public BufferizableOpInterface::ExternalModel< - ToPointersOpInterface, sparse_tensor::ToPointersOp> { + ToPositionsOpInterface, sparse_tensor::ToPositionsOp> { bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { return true; @@ -269,8 +270,8 @@ bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, const AnalysisState &state) const { - // Potential writes into memory through the result of sparse_tensor.pointers - // are not considered. + // Potential writes into memory through the result of + // `sparse_tensor.positions` are not considered. return false; } @@ -318,10 +319,11 @@ NumberOfEntriesOpInterface>(*ctx); sparse_tensor::PackOp::attachInterface(*ctx); sparse_tensor::UnpackOp::attachInterface(*ctx); - sparse_tensor::ToIndicesBufferOp::attachInterface< - ToIndicesBufferOpInterface>(*ctx); - sparse_tensor::ToIndicesOp::attachInterface(*ctx); - sparse_tensor::ToPointersOp::attachInterface(*ctx); + sparse_tensor::ToCoordinatesBufferOp::attachInterface< + ToCoordinatesBufferOpInterface>(*ctx); + sparse_tensor::ToCoordinatesOp::attachInterface( + *ctx); + sparse_tensor::ToPositionsOp::attachInterface(*ctx); sparse_tensor::ToValuesOp::attachInterface(*ctx); }); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h @@ -47,17 +47,11 @@ /// Converts the internal type-encoding for overhead storage to an mlir::Type. Type getOverheadType(Builder &builder, OverheadType ot); -/// Returns the OverheadType for pointer overhead storage. -OverheadType pointerOverheadTypeEncoding(SparseTensorEncodingAttr enc); +/// Returns the OverheadType for position overhead storage. +OverheadType posTypeEncoding(SparseTensorEncodingAttr enc); -/// Returns the OverheadType for index overhead storage. -OverheadType indexOverheadTypeEncoding(SparseTensorEncodingAttr enc); - -/// Returns the mlir::Type for pointer overhead storage. -Type getPointerOverheadType(Builder &builder, SparseTensorEncodingAttr enc); - -/// Returns the mlir::Type for index overhead storage. -Type getIndexOverheadType(Builder &builder, SparseTensorEncodingAttr enc); +/// Returns the OverheadType for coordinate overhead storage. +OverheadType crdTypeEncoding(SparseTensorEncodingAttr enc); /// Convert OverheadType to its function-name suffix. StringRef overheadTypeFunctionSuffix(OverheadType ot); @@ -98,15 +92,14 @@ void genReshapeDstShape(Location loc, PatternRewriter &rewriter, SmallVectorImpl &dstShape, ArrayRef srcShape, - ArrayRef staticDstShape, + ArrayRef staticDstShape, ArrayRef reassociation); -/// Translate indices during a reshaping operation. -void translateIndicesArray(OpBuilder &builder, Location loc, - ArrayRef reassociation, - ValueRange srcIndices, ArrayRef srcShape, - ArrayRef dstShape, - SmallVectorImpl &dstIndices); +/// Reshape coordinates during a reshaping operation. +void reshapeCvs(OpBuilder &builder, Location loc, + ArrayRef reassociation, + ValueRange srcSizes, ValueRange srcCvs, // NOLINT + ValueRange dstSizes, SmallVectorImpl &dstCvs); /// Returns a function reference (first hit also inserts into module). Sets /// the "_emit_c_interface" on the function declaration when requested, @@ -155,9 +148,9 @@ /// Generates code to deallocate a dense buffer. void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer); -/// Generates the code to read the value from tensor[ivs]. The generated code +/// Generates code to read the value from `tensor[ivs]`. The generated code /// looks like the following and the insertion point after this routine is -/// inside the if-then branch behind the assignment to ind. +/// inside the then-branch. /// if (tensor[ivs] != 0) /// insert_point Value genValueForDense(OpBuilder &builder, Location loc, Value tensor, @@ -177,7 +170,7 @@ // The loop to iterate a sparse tensor constant: // for i in range(NNZ) // val = values[i] -// [i1,..,ik] = indices[i] +// [i1,..,ik] = coordinates[i] // loop-body void genDenseTensorOrSparseConstantIterLoop( OpBuilder &builder, Location loc, Value src, unsigned rank, @@ -201,8 +194,8 @@ /// Scans to top of generated loop. Operation *getTop(Operation *op); -/// Iterate over a sparse constant, generates constantOp for value and indices. -/// E.g., +/// Iterate over a sparse constant, generates constantOp for value +/// and coordinates. E.g., /// sparse<[ [0], [28], [31] ], /// [ (-5.13, 2.0), (3.0, 4.0), (5.0, 6.0) ] > /// => @@ -221,18 +214,27 @@ Location loc, RewriterBase &rewriter, SparseElementsAttr attr, AffineMap order, function_ref, Value)> callback); -/// Converts the vector indices and store it into the memory pointed by -/// `ind`, apply (optional) `offset` on `offsetDim`. -void storeIndices(OpBuilder &builder, Location loc, unsigned rank, Value ind, - ValueRange ivs, unsigned offsetDim = 0, - Value offset = Value()); +/// Loads `size`-many values from the memref, which must have rank-1 and +/// size greater-or-equal to `size`. If the optional `(offsetIdx,offsetVal)` +/// arguments are provided, then the `offsetVal` will be added to the +/// `offsetIdx`-th value after loading. +SmallVector loadAll(OpBuilder &builder, Location loc, size_t size, + Value mem, size_t offsetIdx = 0, + Value offsetVal = Value()); + +/// Stores all the values of `vs` into the memref `mem`, which must have +/// rank-1 and size greater-or-equal to `vs.size()`. If the optional +/// `(offsetIdx,offsetVal)` arguments are provided, then the `offsetVal` +/// will be added to the `offsetIdx`-th value before storing. +void storeAll(OpBuilder &builder, Location loc, Value mem, ValueRange vs, + size_t offsetIdx = 0, Value offsetVal = Value()); /// Reshapes the linear values buffer for an annotated all dense sparse tensor /// to match the shape of the corresponding dense tensor to support direct -/// access of the buffer through indices. +/// access of the buffer through `lvlCoords`. Value reshapeValuesToLevels(OpBuilder &builder, Location loc, SparseTensorEncodingAttr enc, ValueRange dimSizes, - Value valuesBuffer, Value idxBuffer); + Value valuesBuffer, Value lvlCoords); //===----------------------------------------------------------------------===// // Inlined constant generators. @@ -248,7 +250,7 @@ //===----------------------------------------------------------------------===// /// Generates a 0-valued constant of the given type. In addition to -/// the scalar types (`ComplexType`, ``FloatType`, `IndexType`, +/// the scalar types (`ComplexType`, `FloatType`, `IndexType`, /// `IntegerType`), this also works for `RankedTensorType` and `VectorType` /// (for which it generates a constant `DenseElementsAttr` of zeros). inline Value constantZero(OpBuilder &builder, Location loc, Type tp) { @@ -314,18 +316,18 @@ static_cast(overheadTypeEncoding(width))); } -/// Generates a constant of the internal type-encoding for pointer +/// Generates a constant of the internal type-encoding for position /// overhead storage. -inline Value constantPointerTypeEncoding(OpBuilder &builder, Location loc, - SparseTensorEncodingAttr enc) { - return constantOverheadTypeEncoding(builder, loc, enc.getPointerBitWidth()); +inline Value constantPosTypeEncoding(OpBuilder &builder, Location loc, + SparseTensorEncodingAttr enc) { + return constantOverheadTypeEncoding(builder, loc, enc.getPosWidth()); } -/// Generates a constant of the internal type-encoding for index overhead -/// storage. -inline Value constantIndexTypeEncoding(OpBuilder &builder, Location loc, - SparseTensorEncodingAttr enc) { - return constantOverheadTypeEncoding(builder, loc, enc.getIndexBitWidth()); +/// Generates a constant of the internal type-encoding for coordinate +/// overhead storage. +inline Value constantCrdTypeEncoding(OpBuilder &builder, Location loc, + SparseTensorEncodingAttr enc) { + return constantOverheadTypeEncoding(builder, loc, enc.getCrdWidth()); } /// Generates a constant of the internal type-encoding for primary storage. @@ -346,16 +348,17 @@ return !rtp || rtp.getRank() == 0; } -/// Infers the result type and generates ToPointersOp. -Value genToPointers(OpBuilder &builder, Location loc, Value tensor, Level lvl); +/// Infers the result type and generates `ToPositionsOp`. +Value genToPositions(OpBuilder &builder, Location loc, Value tensor, Level lvl); -/// Infers the result type and generates ToIndicesOp. If the lvl is within a COO -/// region, the result type is a memref with unknown stride and offset. -/// Otherwise, the result type is a memref without any specified layout. -Value genToIndices(OpBuilder &builder, Location loc, Value tensor, Level lvl, - Level cooStart); +/// Infers the result type and generates `ToCoordinatesOp`. If the +/// level is within a COO region, the result type is a memref with unknown +/// stride and offset. Otherwise, the result type is a memref without +/// any specified layout. +Value genToCoordinates(OpBuilder &builder, Location loc, Value tensor, + Level lvl, Level cooStart); -/// Infers the result type and generates ToValuesOp. +/// Infers the result type and generates `ToValuesOp`. Value genToValues(OpBuilder &builder, Location loc, Value tensor); /// Generates code to retrieve the values size for the sparse tensor. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -24,52 +24,50 @@ using namespace mlir::sparse_tensor; /// If the tensor is a sparse constant, generates and returns the pair of -/// the constants for the indices and the values. +/// the constants for the coordinates and the values. static std::optional> genSplitSparseConstant(OpBuilder &builder, Location loc, Value tensor) { if (auto constOp = tensor.getDefiningOp()) { - if (auto attr = constOp.getValue().dyn_cast()) { - DenseElementsAttr indicesAttr = attr.getIndices(); - Value indices = builder.create(loc, indicesAttr); - DenseElementsAttr valuesAttr = attr.getValues(); - Value values = builder.create(loc, valuesAttr); - return std::make_pair(indices, values); + if (auto a = constOp.getValue().dyn_cast()) { + auto coordinates = builder.create(loc, a.getIndices()); + auto values = builder.create(loc, a.getValues()); + return std::make_pair(coordinates, values); } } return {}; } -/// Generates the code to copy the index at indices[ivs] to ind, and return -/// the value at value[ivs]. -static Value genIndexAndValueForSparse(OpBuilder &builder, Location loc, - Value indices, Value values, - SmallVectorImpl &indicesArray, - ValueRange ivs, unsigned rank) { - for (unsigned i = 0; i < rank; i++) { - Value idx = constantIndex(builder, loc, i); - Value val = builder.create(loc, indices, - ValueRange{ivs[0], idx}); - val = builder.create(loc, builder.getIndexType(), val); - // builder.create(loc, val, ind, idx); - indicesArray.push_back(val); +/// Reads `coordinates[k][0..rank-1]` and `value[k]`, appending the +/// former onto `cvs` and returning the latter. +// FIXME: Change the `rank` argument to `Dimension dimRank` or `Level lvlRank`, +// to clarify its intended meaning. +static Value genCoordsAndValueForSparse(OpBuilder &builder, Location loc, + Value coordinates, Value values, + SmallVectorImpl &cvs, Value k, + unsigned rank) { + for (unsigned d = 0; d < rank; d++) { + Value dim = constantIndex(builder, loc, d); + Value crd = + builder.create(loc, coordinates, ValueRange{k, dim}); + crd = builder.create(loc, builder.getIndexType(), crd); + // builder.create(loc, crd, cvs, dim); + cvs.push_back(crd); } - return builder.create(loc, values, ivs[0]); + return builder.create(loc, values, k); } -/// Generates the code to read the value from tensor[ivs], and conditionally -/// stores the indices ivs to the memory in ind. The generated code looks like -/// the following and the insertion point after this routine is inside the -/// if-then branch behind the assignment to ind. This is to ensure that the -/// code that uses the ind, such as an addEltX call generated after, is inside -/// the if-then branch. +/// Generates code to read the value from `tensor[ivs]`, and open +/// a conditional for whether the value is non-zero. The generated code +/// looks like the following and the insertion point after this routine +/// is inside the then-branch. /// if (tensor[ivs] != 0) -/// ind = ivs -static Value genIndexAndValueForDense(OpBuilder &builder, Location loc, - Value tensor, - SmallVectorImpl &indicesArray, - ValueRange ivs) { +/// insert_point +static Value genCoordsAndValueForDense(OpBuilder &builder, Location loc, + Value tensor, + SmallVectorImpl &cvs, + ValueRange ivs) { Value val = genValueForDense(builder, loc, tensor, ivs); - indicesArray.append(ivs.begin(), ivs.end()); + cvs.append(ivs.begin(), ivs.end()); return val; } @@ -101,6 +99,8 @@ llvm_unreachable("Unknown overhead type"); } +// TODO: should offer an overload of this that takes a `MLIRContext*` +// instead of the builder, similar to `detail::getIntegerOrIndexType`. Type mlir::sparse_tensor::getOverheadType(Builder &builder, OverheadType ot) { switch (ot) { case OverheadType::kIndex: @@ -118,24 +118,18 @@ } OverheadType -mlir::sparse_tensor::pointerOverheadTypeEncoding(SparseTensorEncodingAttr enc) { - return overheadTypeEncoding(enc.getPointerBitWidth()); +mlir::sparse_tensor::posTypeEncoding(SparseTensorEncodingAttr enc) { + return overheadTypeEncoding(enc.getPosWidth()); } OverheadType -mlir::sparse_tensor::indexOverheadTypeEncoding(SparseTensorEncodingAttr enc) { - return overheadTypeEncoding(enc.getIndexBitWidth()); -} - -Type mlir::sparse_tensor::getPointerOverheadType(Builder &builder, - SparseTensorEncodingAttr enc) { - return getOverheadType(builder, pointerOverheadTypeEncoding(enc)); +mlir::sparse_tensor::crdTypeEncoding(SparseTensorEncodingAttr enc) { + return overheadTypeEncoding(enc.getCrdWidth()); } -Type mlir::sparse_tensor::getIndexOverheadType(Builder &builder, - SparseTensorEncodingAttr enc) { - return getOverheadType(builder, indexOverheadTypeEncoding(enc)); -} +// TODO: we ought to add some `static_assert` tests to ensure that the +// `STEA::get{Pos,Crd}Type` methods agree with `getOverheadType(builder, +// {pos,crd}OverheadTypeEncoding(enc))` // TODO: Adjust the naming convention for the constructors of // `OverheadType` so we can use the `MLIR_SPARSETENSOR_FOREVERY_O` x-macro @@ -323,53 +317,52 @@ assert(start == staticDstShape.size()); } -void mlir::sparse_tensor::translateIndicesArray( +void mlir::sparse_tensor::reshapeCvs( OpBuilder &builder, Location loc, - ArrayRef reassociation, ValueRange srcIndices, - ArrayRef srcShape, ArrayRef dstShape, - SmallVectorImpl &dstIndices) { + ArrayRef reassociation, // NOLINT + ValueRange srcSizes, ValueRange srcCvs, // NOLINT + ValueRange dstSizes, SmallVectorImpl &dstCvs) { + const unsigned srcRank = srcSizes.size(); + const unsigned dstRank = dstSizes.size(); + assert(srcRank == srcCvs.size() && "Source rank mismatch"); + const bool isCollapse = srcRank > dstRank; + const ValueRange sizes = isCollapse ? srcSizes : dstSizes; + // Iterate over reassociation map. unsigned i = 0; unsigned start = 0; - unsigned dstRank = dstShape.size(); - unsigned srcRank = srcShape.size(); - assert(srcRank == srcIndices.size()); - bool isCollapse = srcRank > dstRank; - ArrayRef shape = isCollapse ? srcShape : dstShape; - // Iterate over reassociation map. for (const auto &map : llvm::enumerate(reassociation)) { // Prepare strides information in dimension slice. Value linear = constantIndex(builder, loc, 1); for (unsigned j = start, end = start + map.value().size(); j < end; j++) { - linear = builder.create(loc, linear, shape[j]); + linear = builder.create(loc, linear, sizes[j]); } // Start expansion. Value val; if (!isCollapse) - val = srcIndices[i]; + val = srcCvs[i]; // Iterate over dimension slice. for (unsigned j = start, end = start + map.value().size(); j < end; j++) { - linear = builder.create(loc, linear, shape[j]); + linear = builder.create(loc, linear, sizes[j]); if (isCollapse) { - Value old = srcIndices[j]; - Value mul = builder.create(loc, old, linear); + const Value mul = builder.create(loc, srcCvs[j], linear); val = val ? builder.create(loc, val, mul) : mul; } else { - Value old = val; + const Value old = val; val = builder.create(loc, val, linear); - assert(dstIndices.size() == j); - dstIndices.push_back(val); + assert(dstCvs.size() == j); + dstCvs.push_back(val); val = builder.create(loc, old, linear); } } // Finalize collapse. if (isCollapse) { - assert(dstIndices.size() == i); - dstIndices.push_back(val); + assert(dstCvs.size() == i); + dstCvs.push_back(val); } start += map.value().size(); i++; } - assert(dstIndices.size() == dstRank); + assert(dstCvs.size() == dstRank); } FlatSymbolRefAttr mlir::sparse_tensor::getFunc(ModuleOp module, StringRef name, @@ -471,46 +464,51 @@ // FIXME: // 1. Dense tensors loop should be generated by loop emitter. // 2. Support reduction variables to propagate SSA chains properly. +// 3. Change the `rank` argument to `Dimension dimRank` or `Level lvlRank`, +// to clarify its meaning. void mlir::sparse_tensor::genDenseTensorOrSparseConstantIterLoop( OpBuilder &builder, Location loc, Value src, unsigned rank, function_ref bodyBuilder) { - SmallVector indicesArray; + // `cvs` is actually the flattened coordinates array for all elements, + // not just for one element (since we do not `SmallVector::clear` after + // each iteration of the body of the loopnest. + SmallVector cvs; SmallVector lo; SmallVector hi; SmallVector st; - Value zero = constantIndex(builder, loc, 0); - Value one = constantIndex(builder, loc, 1); - auto indicesValues = genSplitSparseConstant(builder, loc, src); - bool isCOOConstant = indicesValues.has_value(); - Value indices; - Value values; - if (isCOOConstant) { - indices = indicesValues->first; - values = indicesValues->second; + const Value zero = constantIndex(builder, loc, 0); + const Value one = constantIndex(builder, loc, 1); + const auto splitSrc = genSplitSparseConstant(builder, loc, src); + if (splitSrc.has_value()) { + const Value srcCoordinates = splitSrc->first; + const Value srcValues = splitSrc->second; lo.push_back(zero); - hi.push_back(linalg::createOrFoldDimOp(builder, loc, values, 0)); + hi.push_back(linalg::createOrFoldDimOp(builder, loc, srcValues, 0)); st.push_back(one); + scf::buildLoopNest(builder, loc, lo, hi, st, {}, + [&](OpBuilder &builder, Location loc, ValueRange ivs, + ValueRange /*args*/) -> scf::ValueVector { + Value val = genCoordsAndValueForSparse( + builder, loc, srcCoordinates, srcValues, cvs, + ivs[0], rank); + bodyBuilder(builder, loc, val, cvs); + return {}; + }); } else { for (unsigned i = 0; i < rank; i++) { lo.push_back(zero); hi.push_back(linalg::createOrFoldDimOp(builder, loc, src, i)); st.push_back(one); } + scf::buildLoopNest(builder, loc, lo, hi, st, {}, + [&](OpBuilder &builder, Location loc, ValueRange ivs, + ValueRange /*args*/) -> scf::ValueVector { + Value val = genCoordsAndValueForDense(builder, loc, + src, cvs, ivs); + bodyBuilder(builder, loc, val, cvs); + return {}; + }); } - - scf::buildLoopNest( - builder, loc, lo, hi, st, {}, - [&](OpBuilder &builder, Location loc, ValueRange ivs, - ValueRange args) -> scf::ValueVector { - Value val; - if (isCOOConstant) - val = genIndexAndValueForSparse(builder, loc, indices, values, - indicesArray, ivs, rank); - else - val = genIndexAndValueForDense(builder, loc, src, indicesArray, ivs); - bodyBuilder(builder, loc, val, indicesArray); - return {}; - }); } void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder, @@ -534,85 +532,108 @@ void sparse_tensor::foreachInSparseConstant( Location loc, RewriterBase &rewriter, SparseElementsAttr attr, AffineMap order, function_ref, Value)> callback) { - Dimension dimRank = getSparseTensorType(attr).getDimRank(); - // Foreach on constant. - DenseElementsAttr indicesAttr = attr.getIndices(); - DenseElementsAttr valuesAttr = attr.getValues(); - - using CooValuePair = std::pair, Attribute>; - SmallVector cooV; - for (size_t i = 0, nse = valuesAttr.size(); i < nse; i++) { - cooV.emplace_back(); - for (Dimension j = 0; j < dimRank; j++) { - auto coordAttr = indicesAttr.getValues()[i * dimRank + j]; - cooV.back().first.push_back(coordAttr); - } - auto valAttr = valuesAttr.getValues()[i]; - cooV.back().second = valAttr; + const Dimension dimRank = getSparseTensorType(attr).getDimRank(); + const auto coordinates = attr.getIndices().getValues(); + const auto values = attr.getValues().getValues(); + + // This is like the `Element` class in the runtime library, but for + // MLIR attributes. In the future we may want to move this out into + // a proper class definition to help improve code legibility (e.g., + // `first` -> `coords`, `second` -> `value`) as well as being able + // to factor out analogues of `ElementLT` for the sort below, etc. + using ElementAttr = std::pair, Attribute>; + + // Construct the COO from the SparseElementsAttr. + SmallVector elems; + for (size_t i = 0, nse = values.size(); i < nse; i++) { + elems.emplace_back(); + elems.back().second = values[i]; + auto &coords = elems.back().first; + coords.reserve(dimRank); + for (Dimension d = 0; d < dimRank; d++) + coords.push_back(coordinates[i * dimRank + d]); } // Sorts the sparse element attribute based on coordinates. - std::sort(cooV.begin(), cooV.end(), - [order](const CooValuePair &lhs, const CooValuePair &rhs) { - const SmallVectorImpl &lc = lhs.first; - const SmallVectorImpl &rc = rhs.first; - for (size_t i = 0, e = lc.size(); i < e; i++) { - auto l = - order - ? order.getResult(i).cast().getPosition() - : i; - if (lc[l].getInt() == rc[l].getInt()) + std::sort(elems.begin(), elems.end(), + [order, dimRank](const ElementAttr &lhs, const ElementAttr &rhs) { + const auto &lhsCoords = lhs.first; + const auto &rhsCoords = rhs.first; + for (Dimension d = 0; d < dimRank; d++) { + // FIXME: This only makes sense for permutations. + // And since we don't check that `order` is a permutation, + // it can also cause OOB errors when we use `l`. + const Level l = order ? order.getDimPosition(d) : d; + if (lhsCoords[l].getInt() == rhsCoords[l].getInt()) continue; - return lc[l].getInt() < rc[l].getInt(); + return lhsCoords[l].getInt() < rhsCoords[l].getInt(); } llvm_unreachable("no equal coordinate in sparse element attr"); }); - SmallVector coords; - for (size_t i = 0, nse = valuesAttr.size(); i < nse; i++) { - coords.clear(); - for (Dimension j = 0; j < dimRank; j++) { - auto coordAttr = cooV[i].first[j]; - auto coord = - rewriter.create(loc, coordAttr.getInt()); - // Remaps coordinates. - coords.push_back(coord); + SmallVector cvs; + cvs.reserve(dimRank); + for (size_t i = 0, nse = values.size(); i < nse; i++) { + // Remap coordinates. + cvs.clear(); + for (Dimension d = 0; d < dimRank; d++) { + auto crd = elems[i].first[d].getInt(); + cvs.push_back(rewriter.create(loc, crd)); } + // Remap value. Value val; if (attr.getElementType().isa()) { - auto valAttr = cooV[i].second.cast(); + auto valAttr = elems[i].second.cast(); val = rewriter.create(loc, attr.getElementType(), valAttr); } else { - auto valAttr = cooV[i].second.cast(); - // Remaps value. + auto valAttr = elems[i].second.cast(); val = rewriter.create(loc, valAttr); } assert(val); - callback(coords, val); + callback(cvs, val); } } -void sparse_tensor::storeIndices(OpBuilder &builder, Location loc, - unsigned size, Value ind, ValueRange ivs, - unsigned offsetDim, Value offset) { +SmallVector sparse_tensor::loadAll(OpBuilder &builder, Location loc, + size_t size, Value mem, + size_t offsetIdx, Value offsetVal) { #ifndef NDEBUG - const auto memTp = ind.getType().cast(); - (void)memTp; + const auto memTp = mem.getType().cast(); assert(memTp.getRank() == 1); const DynSize memSh = memTp.getDimSize(0); - (void)memSh; - assert(ShapedType::isDynamic(memSh) || memSh == static_cast(size)); - assert(ivs.size() == static_cast(size)); - assert(offsetDim < size); + assert(ShapedType::isDynamic(memSh) || memSh >= static_cast(size)); + assert(offsetIdx == 0 || offsetIdx < size); #endif // NDEBUG - + SmallVector vs; + vs.reserve(size); for (unsigned i = 0; i < size; i++) { - Value idx = ivs[i]; - if (offsetDim == i && offset) - idx = builder.create(loc, idx, offset); - builder.create(loc, idx, ind, - constantIndex(builder, loc, i)); + Value v = builder.create(loc, mem, + constantIndex(builder, loc, i)); + if (i == offsetIdx && offsetVal) + v = builder.create(loc, v, offsetVal); + vs.push_back(v); + } + return vs; +} + +void sparse_tensor::storeAll(OpBuilder &builder, Location loc, Value mem, + ValueRange vs, size_t offsetIdx, Value offsetVal) { +#ifndef NDEBUG + const size_t vsize = vs.size(); + const auto memTp = mem.getType().cast(); + assert(memTp.getRank() == 1); + const DynSize memSh = memTp.getDimSize(0); + assert(ShapedType::isDynamic(memSh) || memSh >= static_cast(vsize)); + assert(offsetIdx == 0 || offsetIdx < vsize); +#endif // NDEBUG + for (const auto &v : llvm::enumerate(vs)) { + const Value w = + (offsetIdx == v.index() && offsetVal) + ? builder.create(loc, v.value(), offsetVal) + : v.value(); + builder.create(loc, w, mem, + constantIndex(builder, loc, v.index())); } } @@ -620,43 +641,44 @@ SparseTensorEncodingAttr enc, ValueRange dimSizes, Value valuesBuffer, - Value idxBuffer) { - // Use the `idxBuffer` to store the level sizes. + Value lvlCoords) { + // Reuse the `lvlCoords` buffer to store the level-sizes. const Level lvlRank = enc.getLvlRank(); SmallVector lvlSizes; lvlSizes.reserve(lvlRank); for (Level l = 0; l < lvlRank; l++) // FIXME: `toOrigDim` is deprecated. lvlSizes.push_back(dimSizes[toOrigDim(enc, l)]); - storeIndices(builder, loc, lvlRank, idxBuffer, lvlSizes); + storeAll(builder, loc, lvlCoords, lvlSizes); // The memref ReshapeOp requires the sizes buffer to have a static // shape. const auto iTp = builder.getIndexType(); - const SmallVector idxBufferShape{static_cast(lvlRank)}; - const auto idxBufferTp = MemRefType::get(idxBufferShape, iTp); - idxBuffer = builder.create(loc, idxBufferTp, idxBuffer); + const SmallVector lvlSizesShape{static_cast(lvlRank)}; + const auto lvlSizesTp = MemRefType::get(lvlSizesShape, iTp); + lvlCoords = builder.create(loc, lvlSizesTp, lvlCoords); + // Finally, create the ReshapeOp. const SmallVector resShape(lvlRank, ShapedType::kDynamic); const Type elemTp = getMemRefType(valuesBuffer).getElementType(); const auto resTp = MemRefType::get(resShape, elemTp); - return builder.create(loc, resTp, valuesBuffer, idxBuffer); + return builder.create(loc, resTp, valuesBuffer, lvlCoords); } -Value sparse_tensor::genToPointers(OpBuilder &builder, Location loc, - Value tensor, Level lvl) { +Value sparse_tensor::genToPositions(OpBuilder &builder, Location loc, + Value tensor, Level lvl) { const auto srcTp = getSparseTensorType(tensor); - const Type ptrTp = getPointerOverheadType(builder, srcTp.getEncoding()); - const Type memTp = get1DMemRefType(ptrTp, /*withLayout=*/false); - return builder.create(loc, memTp, tensor, - builder.getIndexAttr(lvl)); + const Type posTp = srcTp.getEncoding().getPosType(); + const Type memTp = get1DMemRefType(posTp, /*withLayout=*/false); + return builder.create(loc, memTp, tensor, + builder.getIndexAttr(lvl)); } -Value sparse_tensor::genToIndices(OpBuilder &builder, Location loc, - Value tensor, Level lvl, Level cooStart) { +Value sparse_tensor::genToCoordinates(OpBuilder &builder, Location loc, + Value tensor, Level lvl, Level cooStart) { const auto srcTp = getSparseTensorType(tensor); - const Type idxTp = getIndexOverheadType(builder, srcTp.getEncoding()); - const Type memTp = get1DMemRefType(idxTp, /*withLayout=*/lvl >= cooStart); - return builder.create(loc, memTp, tensor, - builder.getIndexAttr(lvl)); + const Type crdTp = srcTp.getEncoding().getCrdType(); + const Type memTp = get1DMemRefType(crdTp, /*withLayout=*/lvl >= cooStart); + return builder.create(loc, memTp, tensor, + builder.getIndexAttr(lvl)); } Value sparse_tensor::genToValues(OpBuilder &builder, Location loc, diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h @@ -155,11 +155,11 @@ const std::vector> &getPidxs() const { return pidxs; }; const std::vector> &getCoord() const { return coord; }; const std::vector> &getHighs() const { return highs; }; - const std::vector> &getPtrBuffer() const { - return ptrBuffer; + const std::vector> &getPosBuffer() const { + return posBuffer; }; - const std::vector> &getIdxBuffer() const { - return idxBuffer; + const std::vector> &getCrdBuffer() const { + return crdBuffer; }; const std::vector &getValBuffer() const { return valBuffer; }; @@ -190,9 +190,12 @@ Value genAddress(OpBuilder &builder, Location loc, size_t tid, size_t dim, Value iv); - /// Generates instructions to compute the coordinate of tesnors[tid] on `l` - /// under the current loop context. - Value genSparseCoord(OpBuilder &builder, Location loc, size_t tid, size_t l); + /// Generates instructions to compute the coordinate of tensors[tid][lvl] + /// under the current loop context. The final argument is the + /// collapsed-output level, whereas this function handles converting + /// that to the uncollapsed-input level + Value genSparseCrd(OpBuilder &builder, Location loc, size_t tid, + size_t dstLvl); bool isOutputTensor(size_t tid) { return hasOutput && tid == tensors.size() - 1; @@ -257,12 +260,13 @@ std::vector> dimTypes; /// Sparse iteration information (by tensor and dim). These arrays /// are updated to remain current within the current loop. + // TODO: we may want to rename "pidx(s)" to `posCursor(s)` or similar. std::vector> pidxs; std::vector> coord; std::vector> highs; std::vector> lvlSizes; - std::vector> ptrBuffer; // to_pointers - std::vector> idxBuffer; // to_indices + std::vector> posBuffer; // to_positions + std::vector> crdBuffer; // to_coordinates std::vector valBuffer; // to_value /// Whether the sparse input is a slice. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp @@ -24,16 +24,16 @@ // File local helper functions. //===----------------------------------------------------------------------===// -/// Generates a pointer/index load from the sparse storage scheme. Narrower -/// data types need to be zero extended before casting the value into the -/// index type used for looping and indexing. -static Value genIndexLoad(OpBuilder &builder, Location loc, Value ptr, +/// Generates a position/coordinate load from the sparse storage scheme. +/// Narrower data types need to be zero extended before casting the +/// value into the `Index` type used for looping and indexing. +static Value genIndexLoad(OpBuilder &builder, Location loc, Value mem, Value s) { // For the scalar case, we simply zero extend narrower indices into 64-bit // values before casting to index without a performance penalty. Here too, // however, indices that already are 64-bit, in theory, cannot express the // full range as explained above. - Value load = builder.create(loc, ptr, s); + Value load = builder.create(loc, mem, s); if (!load.getType().isa()) { if (load.getType().getIntOrFloatBitWidth() < 64) load = builder.create(loc, builder.getI64Type(), load); @@ -74,10 +74,10 @@ // Converts a coordinate relative to the underlying tensor to the coordinate // relative to the slice, returns a extra reminder value -static std::pair fromSliceCoord(OpBuilder &builder, Location loc, - Value v, - SparseTensorEncodingAttr enc, - unsigned lvl) { +static std::pair fromSliceCrd(OpBuilder &builder, Location loc, + Value v, + SparseTensorEncodingAttr enc, + unsigned lvl) { Value stride = getSliceStride(builder, loc, enc, lvl); Value offset = getSliceOffset(builder, loc, enc, lvl); // iv = (iv - offset) / stride @@ -88,13 +88,13 @@ } static std::pair -genSliceLegitPredicate(OpBuilder &builder, Location loc, Value coord, +genSliceLegitPredicate(OpBuilder &builder, Location loc, Value crd, SparseTensorEncodingAttr enc, unsigned lvl) { - std::pair trans = fromSliceCoord(builder, loc, coord, enc, lvl); - // First, coord >= offset (TODO: seems unsigned >= 0 won't be folded, skip + std::pair trans = fromSliceCrd(builder, loc, crd, enc, lvl); + // First, crd >= offset (TODO: seems unsigned >= 0 won't be folded, skip // the check if the offset is zero). auto geOffset = - builder.create(loc, arith::CmpIPredicate::uge, coord, + builder.create(loc, arith::CmpIPredicate::uge, crd, getSliceOffset(builder, loc, enc, lvl)); // Second, coord_in_slice < length auto ltLength = @@ -127,23 +127,24 @@ return add; } -Value LoopEmitter::genSparseCoord(OpBuilder &builder, Location loc, size_t tid, - size_t l) { - Value c = constantIndex(builder, loc, 0); - auto reass = getCollapseReassociation(tid, l); - for (unsigned i = 0; i < reass.size(); i++) { - auto lvl = reass[i]; - // A load on the indices array yields the coordinate. - Value ptr = idxBuffer[tid][lvl]; - Value off = genIndexLoad(builder, loc, ptr, pidxs[tid][l]); +Value LoopEmitter::genSparseCrd(OpBuilder &builder, Location loc, size_t tid, + size_t dstLvl) { + Value crd = constantIndex(builder, loc, 0); + const auto reassoc = getCollapseReassociation(tid, dstLvl); + for (unsigned i = 0; i < reassoc.size(); i++) { + const auto srcLvl = reassoc[i]; + // A load on the coordinates array yields the coordinate. + const Value mem = crdBuffer[tid][srcLvl]; + const Value pos = pidxs[tid][dstLvl]; + const Value off = genIndexLoad(builder, loc, mem, pos); // Linearized the coordinates within the same collapse reassociation. - c = builder.create(loc, c, off); - if (i != reass.size() - 1) { - c = builder.create(loc, c, - this->lvlSizes[tid][reass[i + 1]]); + crd = builder.create(loc, crd, off); + if (i != reassoc.size() - 1) { + crd = builder.create(loc, crd, + this->lvlSizes[tid][reassoc[i + 1]]); } } - return c; + return crd; } LoopEmitter::LoopEmitter(ValueRange tensors, StringAttr loopTag, bool hasOutput, @@ -164,8 +165,8 @@ this->coord.assign(tensors.size(), std::vector()); this->highs.assign(tensors.size(), std::vector()); this->lvlSizes.assign(tensors.size(), std::vector()); - this->ptrBuffer.assign(tensors.size(), std::vector()); - this->idxBuffer.assign(tensors.size(), std::vector()); + this->posBuffer.assign(tensors.size(), std::vector()); + this->crdBuffer.assign(tensors.size(), std::vector()); this->valBuffer.assign(tensors.size(), nullptr); this->loopStack.reserve(topSort.size()); this->sparsiferLoopLvlMap.assign(topSort.size(), 0); @@ -204,8 +205,8 @@ coord[tid].assign(rank, Value()); highs[tid].assign(rank, Value()); lvlSizes[tid].assign(rank, Value()); - ptrBuffer[tid].assign(rank, Value()); - idxBuffer[tid].assign(rank, Value()); + posBuffer[tid].assign(rank, Value()); + crdBuffer[tid].assign(rank, Value()); } // FIXME: This map should be maintained outside loop emitter. @@ -238,18 +239,18 @@ // Scan all levels of current tensor. for (Level l = 0; l < lvlRank; l++) { // This should be called only once at beginning. - assert(!ptrBuffer[t][l] && !idxBuffer[t][l] && !highs[t][l]); + assert(!posBuffer[t][l] && !crdBuffer[t][l] && !highs[t][l]); const auto dlt = dimTypes[t][l]; // Handle sparse storage schemes. if (isCompressedDLT(dlt)) { - // Generate sparse primitives to obtains pointer and indices. - ptrBuffer[t][l] = genToPointers(builder, loc, tensor, l); - idxBuffer[t][l] = genToIndices(builder, loc, tensor, l, cooStart); + // Generate sparse primitives to obtains positions and coordinates. + posBuffer[t][l] = genToPositions(builder, loc, tensor, l); + crdBuffer[t][l] = genToCoordinates(builder, loc, tensor, l, cooStart); } else if (isSingletonDLT(dlt)) { - // Singleton dimension, fetch indices. - idxBuffer[t][l] = genToIndices(builder, loc, tensor, l, cooStart); + // Singleton level, fetch coordinates. + crdBuffer[t][l] = genToCoordinates(builder, loc, tensor, l, cooStart); } else { - // Dense dimension, nothing to fetch. + // Dense level, nothing to fetch. assert(isDenseDLT(dlt)); } @@ -358,8 +359,8 @@ } auto enc = getSparseTensorEncoding(tensors[tid].getType()); - auto reass = getCollapseReassociation(tid, dim); - dim = reass.front(); + const auto reassoc = getCollapseReassociation(tid, dim); + dim = reassoc.front(); // TODO: support dynamic slices. Value step = constantIndex(builder, loc, 1); Value lo = isSparseInput ? pidxs[tid][dim] // current offset @@ -399,15 +400,16 @@ } assert(loop && iv); - Value c; + Value crd; if (isSparseInput) { - assert(reass.size() == 1 || isUniqueCOOType(tensors[tid].getType())); + assert(reassoc.size() == 1 || isUniqueCOOType(tensors[tid].getType())); // For COO, the position is the same across consecutive levels. - llvm::for_each(reass, [this, tid, iv](int lvl) { pidxs[tid][lvl] = iv; }); - c = genSparseCoord(builder, loc, tid, dim); + llvm::for_each(reassoc, + [this, tid, iv](Level lvl) { pidxs[tid][lvl] = iv; }); + crd = genSparseCrd(builder, loc, tid, dim); } else { - // Dense tensor, the coordinates is the inducation variable. - c = iv; + // Dense tensor, the coordinate is the inducation variable. + crd = iv; } if (isSparseSlices[tid] && isSparseInput) { @@ -417,7 +419,7 @@ for (Value red : reduc) types.push_back(red.getType()); - auto [trans, pred] = genSliceLegitPredicate(builder, loc, c, enc, dim); + auto [trans, pred] = genSliceLegitPredicate(builder, loc, crd, enc, dim); bool hasReduc = !types.empty(); scf::IfOp ifOp = builder.create(loc, types, pred, /*else*/ hasReduc); @@ -435,11 +437,11 @@ } // Set the insertion point to matched branch. builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); - c = trans; + crd = trans; } - assert(c); - coord[tid][dim] = c; + assert(crd); + coord[tid][dim] = crd; // NOTE: we can also prepare for next dim here in advance // Push the loop into stack loopStack.emplace_back(ArrayRef(tid), ArrayRef(dim), loop, @@ -466,7 +468,7 @@ // TODO: We should instead use a whileOp for filter loop to allow early // break when exceeding (for ordered dimensions). // TODO: There are many other potiential opportunities that we might apply in - // the future. E.g., we could use binary search to located the pointer index. + // the future. E.g., we could use binary search to located the position index. scf::ForOp forOp = builder.create(loc, lo, hi, step, reduc); // In-place update on the reduction variable vector. @@ -478,12 +480,12 @@ Value iv = forOp.getInductionVar(); pidxs[tid][dim] = iv; - // Generating a load on the indices array yields the coordinate. - Value ptr = idxBuffer[tid][dim]; - coord[tid][dim] = genIndexLoad(builder, loc, ptr, iv); + // Generating a load on the coordinates array yields the coordinate. + Value mem = crdBuffer[tid][dim]; + coord[tid][dim] = genIndexLoad(builder, loc, mem, iv); - // Generate an if condition to filter out indices that is not equal to the - // result of the affine expression. + // Generate an if-condition to filter out coordinates that are not + // equal to the result of the affine expression. Value expected = genAffine(builder, affine, loc); auto pred = builder.create(loc, arith::CmpIPredicate::eq, coord[tid][dim], expected); @@ -531,7 +533,7 @@ assert(tids.size() == dims.size()); SmallVector types; SmallVector operands; - // Construct the while-loop with a parameter for each index. + // Construct the while-loop with a parameter for each coordinate. Type indexType = builder.getIndexType(); for (auto [tid, dim] : llvm::zip(tids, dims)) { if (isCompressedDLT(dimTypes[tid][dim]) || @@ -574,11 +576,11 @@ cond = cond ? builder.create(loc, cond, opc) : opc; // Update positions Value pos = after->getArgument(o++); - auto reass = getCollapseReassociation(tid, lvl); - assert(reass.size() == 1 || isUniqueCOOType(tensors[tid].getType())); + const auto reassoc = getCollapseReassociation(tid, lvl); + assert(reassoc.size() == 1 || isUniqueCOOType(tensors[tid].getType())); // For COO, the position is the same across consecutive levels. - llvm::for_each(reass, - [this, tid, pos](int lvl) { pidxs[tid][lvl] = pos; }); + llvm::for_each(reassoc, + [this, tid, pos](Level lvl) { pidxs[tid][lvl] = pos; }); } } builder.create(loc, cond, before->getArguments()); @@ -592,10 +594,10 @@ // Prepares for next level. if (isCompressedDLT(dimTypes[tid][dim]) || isSingletonDLT(dimTypes[tid][dim])) { - coord[tid][dim] = genSparseCoord(builder, loc, tid, dim); + coord[tid][dim] = genSparseCrd(builder, loc, tid, dim); if (isSparseSlices[tid]) { Value load = - genIndexLoad(builder, loc, idxBuffer[tid][dim], pidxs[tid][dim]); + genIndexLoad(builder, loc, crdBuffer[tid][dim], pidxs[tid][dim]); auto enc = getSparseTensorEncoding(tensors[tid].getType()); auto [trans, pred] = genSliceLegitPredicate(builder, loc, load, enc, dim); @@ -684,20 +686,19 @@ if (isDenseDLT(dimType)) return; - auto reassoc = getCollapseReassociation(tid, dim); - for (auto lvl : reassoc) { - // Either the first dimension, or the previous dimension has been set. + for (auto lvl : getCollapseReassociation(tid, dim)) { + // Either the first level, or the previous level has been set. assert(lvl == 0 || pidxs[tid][lvl - 1]); Value c0 = constantIndex(builder, loc, 0); Value c1 = constantIndex(builder, loc, 1); if (isCompressedDLT(dimType)) { - Value ptr = ptrBuffer[tid][lvl]; + Value mem = posBuffer[tid][lvl]; Value pLo = lvl == 0 ? c0 : pidxs[tid][lvl - 1]; - pidxs[tid][lvl] = genIndexLoad(builder, loc, ptr, pLo); + pidxs[tid][lvl] = genIndexLoad(builder, loc, mem, pLo); Value pHi = builder.create(loc, pLo, c1); - highs[tid][lvl] = genIndexLoad(builder, loc, ptr, pHi); + highs[tid][lvl] = genIndexLoad(builder, loc, mem, pHi); return; } if (isSingletonDLT(dimType)) { @@ -717,7 +718,7 @@ Location loc, ArrayRef tids, ArrayRef dims) { - // Initialize dense positions. Note that we generate dense indices of the + // Initialize dense positions. Note that we generate dense coordinates of the // output tensor unconditionally, since they may not appear in the lattice, // but may be needed for linearized codegen. for (auto [tid, dim] : llvm::zip(tids, dims)) { diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseStorageSpecifierToLLVM.cpp @@ -29,11 +29,11 @@ SmallVector result; // TODO: how can we get the lowering type for index type in the later pipeline // to be consistent? LLVM::StructureType does not allow index fields. - auto indexType = IntegerType::get(tp.getContext(), 64); - auto dimSizes = LLVM::LLVMArrayType::get(ctx, indexType, lvlRank); - auto memSizes = LLVM::LLVMArrayType::get(ctx, indexType, + auto sizeType = IntegerType::get(tp.getContext(), 64); + auto lvlSizes = LLVM::LLVMArrayType::get(ctx, sizeType, lvlRank); + auto memSizes = LLVM::LLVMArrayType::get(ctx, sizeType, getNumDataFieldsFromEncoding(enc)); - result.push_back(dimSizes); + result.push_back(lvlSizes); result.push_back(memSizes); return result; } @@ -47,7 +47,7 @@ // Specifier struct builder. //===----------------------------------------------------------------------===// -constexpr uint64_t kDimSizePosInSpecifier = 0; +constexpr uint64_t kLvlSizePosInSpecifier = 0; constexpr uint64_t kMemSizePosInSpecifier = 1; class SpecifierStructBuilder : public StructBuilder { @@ -71,14 +71,15 @@ assert(value); } - // Undef value for dimension sizes, all zero value for memory sizes. + // Undef value for level-sizes, all zero values for memory-sizes. static Value getInitValue(OpBuilder &builder, Location loc, Type structType); - Value dimSize(OpBuilder &builder, Location loc, unsigned dim); - void setDimSize(OpBuilder &builder, Location loc, unsigned dim, Value size); + Value lvlSize(OpBuilder &builder, Location loc, Level lvl); + void setLvlSize(OpBuilder &builder, Location loc, Level lvl, Value size); - Value memSize(OpBuilder &builder, Location loc, unsigned pos); - void setMemSize(OpBuilder &builder, Location loc, unsigned pos, Value size); + Value memSize(OpBuilder &builder, Location loc, FieldIndex fidx); + void setMemSize(OpBuilder &builder, Location loc, FieldIndex fidx, + Value size); }; Value SpecifierStructBuilder::getInitValue(OpBuilder &builder, Location loc, @@ -97,32 +98,38 @@ return md; } -/// Builds IR inserting the pos-th size into the descriptor. -Value SpecifierStructBuilder::dimSize(OpBuilder &builder, Location loc, - unsigned dim) { - return extractField(builder, loc, - ArrayRef{kDimSizePosInSpecifier, dim}); +/// Builds IR extracting the `lvl`-th level-size from the descriptor. +Value SpecifierStructBuilder::lvlSize(OpBuilder &builder, Location loc, + Level lvl) { + // This static_cast makes the narrowing of `lvl` explicit, as required + // by the braces notation for the ctor. + return extractField( + builder, loc, + ArrayRef{kLvlSizePosInSpecifier, static_cast(lvl)}); } -/// Builds IR inserting the pos-th size into the descriptor. -void SpecifierStructBuilder::setDimSize(OpBuilder &builder, Location loc, - unsigned dim, Value size) { - - insertField(builder, loc, ArrayRef{kDimSizePosInSpecifier, dim}, - size); +/// Builds IR inserting the `lvl`-th level-size into the descriptor. +void SpecifierStructBuilder::setLvlSize(OpBuilder &builder, Location loc, + Level lvl, Value size) { + // This static_cast makes the narrowing of `lvl` explicit, as required + // by the braces notation for the ctor. + insertField( + builder, loc, + ArrayRef{kLvlSizePosInSpecifier, static_cast(lvl)}, + size); } -/// Builds IR extracting the pos-th memory size into the descriptor. +/// Builds IR extracting the `fidx`-th memory-size from the descriptor. Value SpecifierStructBuilder::memSize(OpBuilder &builder, Location loc, - unsigned pos) { + FieldIndex fidx) { return extractField(builder, loc, - ArrayRef{kMemSizePosInSpecifier, pos}); + ArrayRef{kMemSizePosInSpecifier, fidx}); } -/// Builds IR inserting the pos-th memory size into the descriptor. +/// Builds IR inserting the `fidx`-th memory-size into the descriptor. void SpecifierStructBuilder::setMemSize(OpBuilder &builder, Location loc, - unsigned pos, Value size) { - insertField(builder, loc, ArrayRef{kMemSizePosInSpecifier, pos}, + FieldIndex fidx, Value size) { + insertField(builder, loc, ArrayRef{kMemSizePosInSpecifier, fidx}, size); } @@ -134,7 +141,7 @@ StorageSpecifierToLLVMTypeConverter::StorageSpecifierToLLVMTypeConverter() { addConversion([](Type type) { return type; }); - addConversion([](StorageSpecifierType tp) { return convertSpecifier(tp); }); + addConversion(convertSpecifier); } //===----------------------------------------------------------------------===// @@ -152,17 +159,15 @@ ConversionPatternRewriter &rewriter) const override { SpecifierStructBuilder spec(adaptor.getSpecifier()); Value v; - if (op.getSpecifierKind() == StorageSpecifierKind::DimSize) { - v = Base::onDimSize(rewriter, op, spec, - op.getDim().value().getZExtValue()); + if (op.getSpecifierKind() == StorageSpecifierKind::LvlSize) { + assert(op.getLevel().has_value()); + v = Base::onLvlSize(rewriter, op, spec, op.getLevel().value()); } else { auto enc = op.getSpecifier().getType().getEncoding(); StorageLayout layout(enc); - std::optional dim; - if (op.getDim()) - dim = op.getDim().value().getZExtValue(); - unsigned idx = layout.getMemRefFieldIndex(op.getSpecifierKind(), dim); - v = Base::onMemSize(rewriter, op, spec, idx); + FieldIndex fidx = + layout.getMemRefFieldIndex(op.getSpecifierKind(), op.getLevel()); + v = Base::onMemSize(rewriter, op, spec, fidx); } rewriter.replaceOp(op, v); @@ -174,15 +179,15 @@ : public SpecifierGetterSetterOpConverter { using SpecifierGetterSetterOpConverter::SpecifierGetterSetterOpConverter; - static Value onDimSize(OpBuilder &builder, SetStorageSpecifierOp op, - SpecifierStructBuilder &spec, unsigned d) { - spec.setDimSize(builder, op.getLoc(), d, op.getValue()); + static Value onLvlSize(OpBuilder &builder, SetStorageSpecifierOp op, + SpecifierStructBuilder &spec, Level lvl) { + spec.setLvlSize(builder, op.getLoc(), lvl, op.getValue()); return spec; } static Value onMemSize(OpBuilder &builder, SetStorageSpecifierOp op, - SpecifierStructBuilder &spec, unsigned i) { - spec.setMemSize(builder, op.getLoc(), i, op.getValue()); + SpecifierStructBuilder &spec, FieldIndex fidx) { + spec.setMemSize(builder, op.getLoc(), fidx, op.getValue()); return spec; } }; @@ -191,13 +196,13 @@ : public SpecifierGetterSetterOpConverter { using SpecifierGetterSetterOpConverter::SpecifierGetterSetterOpConverter; - static Value onDimSize(OpBuilder &builder, GetStorageSpecifierOp op, - SpecifierStructBuilder &spec, unsigned d) { - return spec.dimSize(builder, op.getLoc(), d); + static Value onLvlSize(OpBuilder &builder, GetStorageSpecifierOp op, + SpecifierStructBuilder &spec, Level lvl) { + return spec.lvlSize(builder, op.getLoc(), lvl); } static Value onMemSize(OpBuilder &builder, GetStorageSpecifierOp op, - SpecifierStructBuilder &spec, unsigned i) { - return spec.memSize(builder, op.getLoc(), i); + SpecifierStructBuilder &spec, FieldIndex fidx) { + return spec.memSize(builder, op.getLoc(), fidx); } }; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -67,13 +67,13 @@ } } -/// Generates a load with proper index typing. +/// Generates a load with proper `index` typing. static Value genLoad(OpBuilder &builder, Location loc, Value mem, Value idx) { idx = genCast(builder, loc, idx, builder.getIndexType()); return builder.create(loc, mem, idx); } -/// Generates a store with proper index typing and (for indices) proper value. +/// Generates a store with proper `index` typing and proper value. static void genStore(OpBuilder &builder, Location loc, Value val, Value mem, Value idx) { idx = genCast(builder, loc, idx, builder.getIndexType()); @@ -111,8 +111,7 @@ // accounting for the reordering applied to the sparse storage. // FIXME: `toStoredDim` is deprecated. const Level lvl = toStoredDim(stt, dim); - // FIXME: this method seems to get *level* sizes, but the name is confusing - return desc.getDimSize(builder, loc, lvl); + return desc.getLvlSize(builder, loc, lvl); } // Gets the dimension size at the given stored level 'lvl', either as a @@ -150,13 +149,12 @@ for (Level l = startLvl; l < lvlRank; l++) { const auto dlt = stt.getLvlType(l); if (isCompressedDLT(dlt)) { - // Append linear x pointers, initialized to zero. Since each compressed + // Append linear x positions, initialized to zero. Since each compressed // dimension initially already has a single zero entry, this maintains // the desired "linear + 1" length property at all times. - Type ptrType = stt.getPointerType(); - Value ptrZero = constantZero(builder, loc, ptrType); - createPushback(builder, loc, desc, SparseTensorFieldKind::PtrMemRef, l, - ptrZero, linear); + Value posZero = constantZero(builder, loc, stt.getPosType()); + createPushback(builder, loc, desc, SparseTensorFieldKind::PosMemRef, l, + posZero, linear); return; } if (isSingletonDLT(dlt)) { @@ -215,32 +213,32 @@ // size based on available information. Otherwise we just // initialize a few elements to start the reallocation chain. // TODO: refine this - Value ptrHeuristic, idxHeuristic, valHeuristic; + Value posHeuristic, crdHeuristic, valHeuristic; if (stt.isAllDense()) { valHeuristic = dimSizes[0]; for (const Value sz : ArrayRef{dimSizes}.drop_front()) valHeuristic = builder.create(loc, valHeuristic, sz); } else if (sizeHint) { if (getCOOStart(stt.getEncoding()) == 0) { - ptrHeuristic = constantIndex(builder, loc, 2); - idxHeuristic = builder.create( + posHeuristic = constantIndex(builder, loc, 2); + crdHeuristic = builder.create( loc, constantIndex(builder, loc, dimRank), sizeHint); // AOS } else if (dimRank == 2 && stt.isDenseLvl(0) && stt.isCompressedLvl(1)) { - ptrHeuristic = builder.create( + posHeuristic = builder.create( loc, sizeHint, constantIndex(builder, loc, 1)); - idxHeuristic = sizeHint; + crdHeuristic = sizeHint; } else { - ptrHeuristic = idxHeuristic = constantIndex(builder, loc, 16); + posHeuristic = crdHeuristic = constantIndex(builder, loc, 16); } valHeuristic = sizeHint; } else { - ptrHeuristic = idxHeuristic = valHeuristic = + posHeuristic = crdHeuristic = valHeuristic = constantIndex(builder, loc, 16); } foreachFieldAndTypeInSparseTensor( stt, - [&builder, &fields, stt, loc, ptrHeuristic, idxHeuristic, valHeuristic, + [&builder, &fields, stt, loc, posHeuristic, crdHeuristic, valHeuristic, enableInit](Type fType, FieldIndex fIdx, SparseTensorFieldKind fKind, Level /*lvl*/, DimLevelType /*dlt*/) -> bool { assert(fields.size() == fIdx); @@ -249,13 +247,13 @@ case SparseTensorFieldKind::StorageSpec: field = SparseTensorSpecifier::getInitValue(builder, loc, stt); break; - case SparseTensorFieldKind::PtrMemRef: - case SparseTensorFieldKind::IdxMemRef: + case SparseTensorFieldKind::PosMemRef: + case SparseTensorFieldKind::CrdMemRef: case SparseTensorFieldKind::ValMemRef: field = createAllocation( builder, loc, fType.cast(), - (fKind == SparseTensorFieldKind::PtrMemRef) ? ptrHeuristic - : (fKind == SparseTensorFieldKind::IdxMemRef) ? idxHeuristic + (fKind == SparseTensorFieldKind::PosMemRef) ? posHeuristic + : (fKind == SparseTensorFieldKind::CrdMemRef) ? crdHeuristic : valHeuristic, enableInit); break; @@ -269,87 +267,89 @@ MutSparseTensorDescriptor desc(stt, fields); // Initialize the storage scheme to an empty tensor. Initialized memSizes - // to all zeros, sets the dimSizes to known values and gives all pointer + // to all zeros, sets the dimSizes to known values and gives all position // fields an initial zero entry, so that it is easier to maintain the // "linear + 1" length property. - Value ptrZero = constantZero(builder, loc, stt.getPointerType()); + Value posZero = constantZero(builder, loc, stt.getPosType()); for (Level lvlRank = stt.getLvlRank(), l = 0; l < lvlRank; l++) { // Fills dim sizes array. - // FIXME: this method seems to set *level* sizes, but the name is confusing // FIXME: `toOrigDim` is deprecated. - desc.setDimSize(builder, loc, l, dimSizes[toOrigDim(stt, l)]); - // Pushes a leading zero to pointers memref. + desc.setLvlSize(builder, loc, l, dimSizes[toOrigDim(stt, l)]); + // Pushes a leading zero to positions memref. if (stt.isCompressedLvl(l)) - createPushback(builder, loc, desc, SparseTensorFieldKind::PtrMemRef, l, - ptrZero); + createPushback(builder, loc, desc, SparseTensorFieldKind::PosMemRef, l, + posZero); } allocSchemeForRank(builder, loc, desc, /*rank=*/0); } /// Helper method that generates block specific to compressed case: /// -/// plo = pointers[l][pos[l-1]] -/// phi = pointers[l][pos[l-1]+1] -/// msz = indices[l].size() -/// if (plo < phi) { -/// present = indices[l][phi-1] == i[l] +/// // given: parentPos = posCursor[lvl-1] +/// pstart = desc.positions[lvl][parentPos] +/// pstop = desc.positions[lvl][parentPos+1] +/// plast = pstop - 1 +/// msz = desc.coordinates[lvl].size() +/// if (pstart < pstop) { +/// isPresent = (desc.coordinates[lvl][plast] == lvlCoords[lvl]) /// } else { // first insertion -/// present = false -/// pointers[l][pos[l-1]] = msz +/// isPresent = false +/// desc.positions[lvl][parentPos] = msz /// } -/// if (present) { // index already present -/// next = phi-1 +/// if (isPresent) { // coordinate is already present +/// pnext = plast /// } else { -/// indices[l].push_back(i[l]) -/// pointers[l][pos[l-1]+1] = msz+1 -/// next = msz -/// +/// desc.coordinates[lvl].push_back(lvlCoords[lvl]) +/// desc.positions[lvl][parentPos+1] = msz+1 +/// pnext = msz +/// /// } -/// pos[l] = next +/// posCursor[lvl] = pnext static Value genCompressed(OpBuilder &builder, Location loc, - MutSparseTensorDescriptor desc, ValueRange indices, - Value value, Value pos, Level lvl) { + MutSparseTensorDescriptor desc, ValueRange lvlCoords, + Value /*unused*/, Value parentPos, Level lvl) { const SparseTensorType stt(desc.getRankedTensorType()); const Level lvlRank = stt.getLvlRank(); assert(lvl < lvlRank && "Level is out of bounds"); - assert(indices.size() == static_cast(lvlRank) && + assert(lvlCoords.size() == static_cast(lvlRank) && "Level-rank mismatch"); SmallVector types; Type indexType = builder.getIndexType(); Type boolType = builder.getIntegerType(1); - unsigned idxIndex; - unsigned idxStride; - std::tie(idxIndex, idxStride) = desc.getIdxMemRefIndexAndStride(lvl); - Value one = constantIndex(builder, loc, 1); - Value pp1 = builder.create(loc, pos, one); - Value plo = genLoad(builder, loc, desc.getPtrMemRef(lvl), pos); - Value phi = genLoad(builder, loc, desc.getPtrMemRef(lvl), pp1); - Value msz = desc.getIdxMemSize(builder, loc, lvl); - Value idxStrideC; - if (idxStride > 1) { - idxStrideC = constantIndex(builder, loc, idxStride); - msz = builder.create(loc, msz, idxStrideC); - } - Value phim1 = builder.create( - loc, genCast(builder, loc, phi, indexType), one); + unsigned crdFidx; + unsigned crdStride; + std::tie(crdFidx, crdStride) = desc.getCrdMemRefIndexAndStride(lvl); + const Value one = constantIndex(builder, loc, 1); + const Value pp1 = builder.create(loc, parentPos, one); + const Value positionsAtLvl = desc.getPosMemRef(lvl); + const Value pstart = genLoad(builder, loc, positionsAtLvl, parentPos); + const Value pstop = genLoad(builder, loc, positionsAtLvl, pp1); + const Value crdMsz = desc.getCrdMemSize(builder, loc, lvl); + const Value crdStrideC = + crdStride > 1 ? constantIndex(builder, loc, crdStride) : Value(); + const Value msz = + crdStrideC ? builder.create(loc, crdMsz, crdStrideC) + : crdMsz; + const Value plast = builder.create( + loc, genCast(builder, loc, pstop, indexType), one); // Conditional expression. - Value lt = - builder.create(loc, arith::CmpIPredicate::ult, plo, phi); + Value lt = builder.create(loc, arith::CmpIPredicate::ult, + pstart, pstop); types.push_back(boolType); scf::IfOp ifOp1 = builder.create(loc, types, lt, /*else*/ true); types.pop_back(); builder.setInsertionPointToStart(&ifOp1.getThenRegion().front()); - Value crd = genLoad( - builder, loc, desc.getMemRefField(idxIndex), - idxStride > 1 ? builder.create(loc, phim1, idxStrideC) - : phim1); + Value crd = + genLoad(builder, loc, desc.getMemRefField(crdFidx), + crdStrideC ? builder.create(loc, plast, crdStrideC) + : plast); Value eq = builder.create( loc, arith::CmpIPredicate::eq, genCast(builder, loc, crd, indexType), - indices[lvl]); + lvlCoords[lvl]); builder.create(loc, eq); builder.setInsertionPointToStart(&ifOp1.getElseRegion().front()); if (lvl > 0) - genStore(builder, loc, msz, desc.getPtrMemRef(lvl), pos); + genStore(builder, loc, msz, positionsAtLvl, parentPos); builder.create(loc, constantI1(builder, loc, false)); builder.setInsertionPointAfter(ifOp1); // If present construct. Note that for a non-unique dimension level, we @@ -363,22 +363,22 @@ const Value p = stt.isUniqueLvl(lvl) ? ifOp1.getResult(0) : constantI1(builder, loc, false); scf::IfOp ifOp2 = builder.create(loc, types, p, /*else*/ true); - // If present (fields unaffected, update next to phim1). + // If present (fields unaffected, update pnext to plast). builder.setInsertionPointToStart(&ifOp2.getThenRegion().front()); // FIXME: This does not looks like a clean way, but probably the most // efficient way. - desc.getFields().push_back(phim1); + desc.getFields().push_back(plast); builder.create(loc, desc.getFields()); desc.getFields().pop_back(); - // If !present (changes fields, update next). + // If !present (changes fields, update pnext). builder.setInsertionPointToStart(&ifOp2.getElseRegion().front()); Value mszp1 = builder.create(loc, msz, one); - genStore(builder, loc, mszp1, desc.getPtrMemRef(lvl), pp1); - createPushback(builder, loc, desc, SparseTensorFieldKind::IdxMemRef, lvl, - indices[lvl]); - // Prepare the next dimension "as needed". + genStore(builder, loc, mszp1, positionsAtLvl, pp1); + createPushback(builder, loc, desc, SparseTensorFieldKind::CrdMemRef, lvl, + lvlCoords[lvl]); + // Prepare the next level "as needed". if ((lvl + 1) < lvlRank) allocSchemeForRank(builder, loc, desc, lvl + 1); @@ -415,40 +415,41 @@ const SparseTensorType stt(rtp); const Level lvlRank = stt.getLvlRank(); - // Construct fields and indices arrays from parameters. + // Extract fields and coordinates from args. SmallVector fields = llvm::to_vector(args.drop_back(lvlRank + 1)); MutSparseTensorDescriptor desc(rtp, fields); - const SmallVector indices = + const SmallVector coordinates = llvm::to_vector(args.take_back(lvlRank + 1).drop_back()); Value value = args.back(); - Value pos = constantZero(builder, loc, builder.getIndexType()); + Value parentPos = constantZero(builder, loc, builder.getIndexType()); // Generate code for every level. for (Level l = 0; l < lvlRank; l++) { const auto dlt = stt.getLvlType(l); if (isCompressedDLT(dlt)) { // Create: // if (!present) { - // indices[l].push_back(i[l]) - // + // coordinates[l].push_back(coords[l]) + // // } - // pos[l] = indices.size() - 1 - // - pos = genCompressed(builder, loc, desc, indices, value, pos, l); + // positions[l] = coordinates.size() - 1 + // + parentPos = + genCompressed(builder, loc, desc, coordinates, value, parentPos, l); } else if (isSingletonDLT(dlt)) { // Create: - // indices[l].push_back(i[l]) - // pos[l] = pos[l-1] - // - createPushback(builder, loc, desc, SparseTensorFieldKind::IdxMemRef, l, - indices[l]); + // coordinates[l].push_back(coords[l]) + // positions[l] = positions[l-1] + // + createPushback(builder, loc, desc, SparseTensorFieldKind::CrdMemRef, l, + coordinates[l]); } else { assert(isDenseDLT(dlt)); // Construct the new position as: - // pos[l] = size * pos[l-1] + i[l] - // + // positions[l] = size * positions[l-1] + coords[l] + // Value size = sizeFromTensorAtLvl(builder, loc, desc, l); - Value mult = builder.create(loc, size, pos); - pos = builder.create(loc, mult, indices[l]); + Value mult = builder.create(loc, size, parentPos); + parentPos = builder.create(loc, mult, coordinates[l]); } } // Reached the actual value append/insert. @@ -456,7 +457,7 @@ createPushback(builder, loc, desc, SparseTensorFieldKind::ValMemRef, std::nullopt, value); else - genStore(builder, loc, value, desc.getValMemRef(), pos); + genStore(builder, loc, value, desc.getValMemRef(), parentPos); builder.create(loc, fields); } @@ -464,19 +465,18 @@ /// function doesn't exist yet, call `createFunc` to generate the function. static void genInsertionCallHelper(OpBuilder &builder, MutSparseTensorDescriptor desc, - SmallVectorImpl &indices, Value value, + SmallVectorImpl &lcvs, Value value, func::FuncOp insertPoint, StringRef namePrefix, FuncGeneratorType createFunc) { // The mangled name of the function has this format: - // ____ - // __ + // ______ const SparseTensorType stt(desc.getRankedTensorType()); SmallString<32> nameBuffer; llvm::raw_svector_ostream nameOstream(nameBuffer); nameOstream << namePrefix; - assert(static_cast(stt.getLvlRank()) == indices.size()); const Level lvlRank = stt.getLvlRank(); + assert(lcvs.size() == static_cast(lvlRank)); for (Level l = 0; l < lvlRank; l++) nameOstream << toMLIRString(stt.getLvlType(l)) << "_"; // Static dim sizes are used in the generated code while dynamic sizes are @@ -488,7 +488,7 @@ if (!stt.isIdentity()) nameOstream << stt.getDimToLvlMap() << "_"; nameOstream << stt.getElementType() << "_"; - nameOstream << stt.getIndexBitWidth() << "_" << stt.getPointerBitWidth(); + nameOstream << stt.getCrdWidth() << "_" << stt.getPosWidth(); // Look up the function. ModuleOp module = insertPoint->getParentOfType(); @@ -496,9 +496,9 @@ auto result = SymbolRefAttr::get(context, nameOstream.str()); auto func = module.lookupSymbol(result.getAttr()); - // Construct parameters for fields and indices. + // Construct operands: fields, coords, and value. SmallVector operands = llvm::to_vector(desc.getFields()); - operands.append(indices); + operands.append(lcvs); operands.push_back(value); Location loc = insertPoint.getLoc(); @@ -531,31 +531,31 @@ for (Level l = 0; l < lvlRank; l++) { const auto dlt = stt.getLvlType(l); if (isCompressedDLT(dlt)) { - // Compressed dimensions need a pointer cleanup for all entries + // Compressed dimensions need a position cleanup for all entries // that were not visited during the insertion pass. // // TODO: avoid cleanup and keep compressed scheme consistent at all // times? // if (l > 0) { - Type ptrType = stt.getPointerType(); - Value ptrMemRef = desc.getPtrMemRef(l); - Value hi = desc.getPtrMemSize(builder, loc, l); + Type posType = stt.getPosType(); + Value posMemRef = desc.getPosMemRef(l); + Value hi = desc.getPosMemSize(builder, loc, l); Value zero = constantIndex(builder, loc, 0); Value one = constantIndex(builder, loc, 1); // Vector of only one, but needed by createFor's prototype. - SmallVector inits{genLoad(builder, loc, ptrMemRef, zero)}; + SmallVector inits{genLoad(builder, loc, posMemRef, zero)}; scf::ForOp loop = createFor(builder, loc, hi, inits, one); Value i = loop.getInductionVar(); Value oldv = loop.getRegionIterArg(0); - Value newv = genLoad(builder, loc, ptrMemRef, i); - Value ptrZero = constantZero(builder, loc, ptrType); + Value newv = genLoad(builder, loc, posMemRef, i); + Value posZero = constantZero(builder, loc, posType); Value cond = builder.create( - loc, arith::CmpIPredicate::eq, newv, ptrZero); - scf::IfOp ifOp = builder.create(loc, TypeRange(ptrType), + loc, arith::CmpIPredicate::eq, newv, posZero); + scf::IfOp ifOp = builder.create(loc, TypeRange(posType), cond, /*else*/ true); builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); - genStore(builder, loc, oldv, ptrMemRef, i); + genStore(builder, loc, oldv, posMemRef, i); builder.create(loc, oldv); builder.setInsertionPointToStart(&ifOp.getElseRegion().front()); builder.create(loc, newv); @@ -684,12 +684,12 @@ LogicalResult matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - std::optional index = op.getConstantIndex(); - if (!index || !getSparseTensorEncoding(adaptor.getSource().getType())) + std::optional dim = op.getConstantIndex(); + if (!dim || !getSparseTensorEncoding(adaptor.getSource().getType())) return failure(); auto desc = getDescriptorFromTensorTuple(adaptor.getSource()); - auto sz = sizeFromTensorAtDim(rewriter, op.getLoc(), desc, *index); + auto sz = sizeFromTensorAtDim(rewriter, op.getLoc(), desc, *dim); rewriter.replaceOp(op, sz); return success(); @@ -843,7 +843,7 @@ rewriter.create( loc, ValueRange{constantZero(rewriter, loc, boolType)}, ValueRange{filled}); - // Replace expansion op with these buffers and initial index. + // Replace expansion op with these buffers and initial coordinate. assert(op.getNumResults() == 4); rewriter.replaceOp(op, {values, filled, added, zero}); return success(); @@ -866,9 +866,9 @@ Value count = adaptor.getCount(); const SparseTensorType dstType(desc.getRankedTensorType()); Type eltType = dstType.getElementType(); - // Prepare indices. - SmallVector indices(adaptor.getIndices()); - // If the innermost level is ordered, we need to sort the indices + // Prepare level-coords. + SmallVector lcvs(adaptor.getLvlCoords()); + // If the innermost level is ordered, we need to sort the coordinates // in the "added" array prior to applying the compression. if (dstType.isOrderedLvl(dstType.getLvlRank() - 1)) rewriter.create(loc, count, ValueRange{added}, ValueRange{}, @@ -880,26 +880,25 @@ // // Generate // out_memrefs = for (i = 0; i < count; i++)(in_memrefs) { - // index = added[i]; - // value = values[index]; - // insert({prev_indices, index}, value); - // new_memrefs = insert(in_memrefs, {prev_indices, index}, value); - // values[index] = 0; - // filled[index] = false; + // crd = added[i]; + // value = values[crd]; + // insert({lvlCoords, crd}, value); + // new_memrefs = insert(in_memrefs, {lvlCoords, crd}, value); + // values[crd] = 0; + // filled[crd] = false; // yield new_memrefs // } scf::ForOp loop = createFor(rewriter, loc, count, desc.getFields()); Value i = loop.getInductionVar(); - Value index = genLoad(rewriter, loc, added, i); - Value value = genLoad(rewriter, loc, values, index); - indices.push_back(index); + Value crd = genLoad(rewriter, loc, added, i); + Value value = genLoad(rewriter, loc, values, crd); + lcvs.push_back(crd); // TODO: faster for subsequent insertions? auto insertPoint = op->template getParentOfType(); - genInsertionCallHelper(rewriter, desc, indices, value, insertPoint, + genInsertionCallHelper(rewriter, desc, lcvs, value, insertPoint, kInsertFuncNamePrefix, genInsertBody); - genStore(rewriter, loc, constantZero(rewriter, loc, eltType), values, - index); - genStore(rewriter, loc, constantI1(rewriter, loc, false), filled, index); + genStore(rewriter, loc, constantZero(rewriter, loc, eltType), values, crd); + genStore(rewriter, loc, constantI1(rewriter, loc, false), filled, crd); rewriter.create(loc, desc.getFields()); rewriter.setInsertionPointAfter(loop); Value result = genTuple(rewriter, loc, dstType, loop->getResults()); @@ -924,12 +923,11 @@ ConversionPatternRewriter &rewriter) const override { SmallVector fields; auto desc = getMutDescriptorFromTensorTuple(adaptor.getTensor(), fields); - // Prepare and indices. - SmallVector indices(adaptor.getIndices()); + SmallVector lcvs(adaptor.getLvlCoords()); // Generate insertion. Value value = adaptor.getValue(); auto insertPoint = op->template getParentOfType(); - genInsertionCallHelper(rewriter, desc, indices, value, insertPoint, + genInsertionCallHelper(rewriter, desc, lcvs, value, insertPoint, kInsertFuncNamePrefix, genInsertBody); // Replace operation with resulting memrefs. @@ -938,39 +936,38 @@ } }; -/// Sparse codegen rule for pointer accesses. -class SparseToPointersConverter : public OpConversionPattern { +/// Sparse codegen rule for position accesses. +class SparseToPositionsConverter : public OpConversionPattern { public: - using OpAdaptor = typename ToPointersOp::Adaptor; - using OpConversionPattern::OpConversionPattern; + using OpAdaptor = typename ToPositionsOp::Adaptor; + using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(ToPointersOp op, OpAdaptor adaptor, + matchAndRewrite(ToPositionsOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - // Replace the requested pointer access with corresponding field. + // Replace the requested position access with corresponding field. // The cast_op is inserted by type converter to intermix 1:N type // conversion. auto desc = getDescriptorFromTensorTuple(adaptor.getTensor()); - uint64_t dim = op.getDimension().getZExtValue(); - rewriter.replaceOp(op, desc.getPtrMemRef(dim)); + rewriter.replaceOp(op, desc.getPosMemRef(op.getLevel())); return success(); } }; -/// Sparse codegen rule for index accesses. -class SparseToIndicesConverter : public OpConversionPattern { +/// Sparse codegen rule for accessing the coordinates arrays. +class SparseToCoordinatesConverter + : public OpConversionPattern { public: - using OpAdaptor = typename ToIndicesOp::Adaptor; - using OpConversionPattern::OpConversionPattern; + using OpAdaptor = typename ToCoordinatesOp::Adaptor; + using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(ToIndicesOp op, OpAdaptor adaptor, + matchAndRewrite(ToCoordinatesOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - // Replace the requested pointer access with corresponding field. + // Replace the requested coordinates access with corresponding field. // The cast_op is inserted by type converter to intermix 1:N type // conversion. Location loc = op.getLoc(); auto desc = getDescriptorFromTensorTuple(adaptor.getTensor()); - uint64_t dim = op.getDimension().getZExtValue(); - Value field = desc.getIdxMemRefOrView(rewriter, loc, dim); + Value field = desc.getCrdMemRefOrView(rewriter, loc, op.getLevel()); // Insert a cast to bridge the actual type to the user expected type. If the // actual type and the user expected type aren't compatible, the compiler or @@ -984,16 +981,16 @@ } }; -/// Sparse codegen rule for accessing the linear indices buffer. -class SparseToIndicesBufferConverter - : public OpConversionPattern { +/// Sparse codegen rule for accessing the linear coordinates buffer. +class SparseToCoordinatesBufferConverter + : public OpConversionPattern { public: - using OpAdaptor = typename ToIndicesBufferOp::Adaptor; - using OpConversionPattern::OpConversionPattern; + using OpAdaptor = typename ToCoordinatesBufferOp::Adaptor; + using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(ToIndicesBufferOp op, OpAdaptor adaptor, + matchAndRewrite(ToCoordinatesBufferOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - // Replace the requested pointer access with corresponding field. + // Replace the requested coordinates access with corresponding field. // The cast_op is inserted by type converter to intermix 1:N type // conversion. auto desc = getDescriptorFromTensorTuple(adaptor.getTensor()); @@ -1011,7 +1008,7 @@ LogicalResult matchAndRewrite(ToValuesOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - // Replace the requested pointer access with corresponding field. + // Replace the requested values access with corresponding field. // The cast_op is inserted by type converter to intermix 1:N type // conversion. auto desc = getDescriptorFromTensorTuple(adaptor.getTensor()); @@ -1118,8 +1115,8 @@ assert(srcEnc.getDimLevelType() == dstEnc.getDimLevelType()); assert(srcEnc.getDimOrdering() == dstEnc.getDimOrdering()); assert(srcEnc.getHigherOrdering() == dstEnc.getHigherOrdering()); - assert(srcEnc.getPointerBitWidth() == dstEnc.getPointerBitWidth()); - assert(srcEnc.getIndexBitWidth() == dstEnc.getIndexBitWidth()); + assert(srcEnc.getPosWidth() == dstEnc.getPosWidth()); + assert(srcEnc.getCrdWidth() == dstEnc.getCrdWidth()); // TODO: support dynamic slices. for (int i = 0, e = op.getSourceType().getRank(); i < e; i++) { @@ -1174,11 +1171,12 @@ case SparseTensorFieldKind::StorageSpec: field = SparseTensorSpecifier::getInitValue(rewriter, loc, rtp); break; - case SparseTensorFieldKind::PtrMemRef: { - // TACO-style COO starts with a PtrBuffer + case SparseTensorFieldKind::PosMemRef: { + // TACO-style COO starts with a PosBuffer // By creating a constant value for it, we avoid the complexity of // memory management. - auto tensorType = RankedTensorType::get({2}, enc.getPointerType()); + const auto posTp = enc.getPosType(); + auto tensorType = RankedTensorType::get({2}, posTp); auto memrefType = MemRefType::get(tensorType.getShape(), tensorType.getElementType()); auto cstPtr = rewriter.create( @@ -1186,35 +1184,34 @@ DenseElementsAttr::get( tensorType, ArrayRef{ - IntegerAttr::get(enc.getPointerType(), 0), + IntegerAttr::get(posTp, 0), IntegerAttr::get( - enc.getPointerType(), - op.getData().getType().getShape()[0])})); + posTp, op.getValues().getType().getShape()[0])})); field = rewriter.create(loc, memrefType, cstPtr); break; } - case SparseTensorFieldKind::IdxMemRef: { - auto tensorType = op.getIndices().getType(); + case SparseTensorFieldKind::CrdMemRef: { + auto tensorType = op.getCoordinates().getType(); auto memrefType = MemRefType::get(tensorType.getShape(), tensorType.getElementType()); - auto idxMemRef = rewriter.create( - op->getLoc(), memrefType, op.getIndices()); + auto crdMemRef = rewriter.create( + op->getLoc(), memrefType, op.getCoordinates()); ReassociationIndices reassociation; for (int i = 0, e = tensorType.getRank(); i < e; i++) reassociation.push_back(i); // Flattened the indices buffer to rank 1. field = rewriter.create( - loc, idxMemRef, ArrayRef(reassociation)); + loc, crdMemRef, ArrayRef(reassociation)); break; } case SparseTensorFieldKind::ValMemRef: { - auto tensorType = op.getData().getType(); + auto tensorType = op.getValues().getType(); auto memrefType = MemRefType::get(tensorType.getShape(), tensorType.getElementType()); field = rewriter.create( - op->getLoc(), memrefType, op.getData()); + op->getLoc(), memrefType, op.getValues()); break; } } @@ -1228,15 +1225,18 @@ }); MutSparseTensorDescriptor desc(rtp, fields); - auto noe = linalg::createOrFoldDimOp(rewriter, loc, op.getData(), 0); - for (unsigned i = 0, e = rtp.getRank(); i < e; i++) { - int dim = rtp.getShape()[i]; - assert(!ShapedType::isDynamic(dim)); - desc.setDimSize(rewriter, loc, i, constantIndex(rewriter, loc, dim)); - if (i == 0) - desc.setPtrMemSize(rewriter, loc, i, constantIndex(rewriter, loc, 2)); - - desc.setIdxMemSize(rewriter, loc, i, noe); + auto noe = linalg::createOrFoldDimOp(rewriter, loc, op.getValues(), 0); + // FIXME: should use `SparseTensorType::getLvlRank` in lieu of + // `RankedTensorType::getRank`, because the latter introduces dim/lvl + // ambiguity. + for (Level lvl = 0, lvlRank = rtp.getRank(); lvl < lvlRank; lvl++) { + const auto sh = rtp.getShape()[lvl]; + assert(!ShapedType::isDynamic(sh)); + desc.setLvlSize(rewriter, loc, lvl, constantIndex(rewriter, loc, sh)); + if (lvl == 0) + desc.setPosMemSize(rewriter, loc, lvl, constantIndex(rewriter, loc, 2)); + + desc.setCrdMemSize(rewriter, loc, lvl, noe); } desc.setValMemSize(rewriter, loc, noe); @@ -1252,39 +1252,43 @@ ConversionPatternRewriter &rewriter) const override { auto desc = getDescriptorFromTensorTuple(adaptor.getTensor()); Location loc = op.getLoc(); - int64_t rank = op.getTensor().getType().getRank(); + const auto srcTp = getSparseTensorType(op.getTensor()); + const Level lvlRank = srcTp.getLvlRank(); - assert(isUniqueCOOType(op.getTensor().getType()) && - desc.getFields().size() == 4); + assert(isUniqueCOOType(srcTp) && desc.getFields().size() == 4); - Value flatBuf = rank == 1 ? desc.getIdxMemRefOrView(rewriter, loc, 0) - : desc.getAOSMemRef(); - Value dataBuf = desc.getValMemRef(); + Value flatBuf = lvlRank == 1 ? desc.getCrdMemRefOrView(rewriter, loc, 0) + : desc.getAOSMemRef(); + Value valuesBuf = desc.getValMemRef(); - // If frontend requests a static buffer, we reallocate the data/indices - // to ensure that we meet their need. - TensorType dataTp = op.getData().getType(); - if (dataTp.hasStaticShape()) { - dataBuf = reallocOrSubView(rewriter, loc, dataTp.getShape()[0], dataBuf); + // If frontend requests a static buffer, we reallocate the + // values/coordinates to ensure that we meet their need. + const auto valuesTp = getRankedTensorType(op.getValues()); + if (valuesTp.hasStaticShape()) { + valuesBuf = + reallocOrSubView(rewriter, loc, valuesTp.getShape()[0], valuesBuf); } - TensorType indicesTp = op.getIndices().getType(); - if (indicesTp.hasStaticShape()) { - auto len = indicesTp.getShape()[0] * indicesTp.getShape()[1]; + const auto coordinatesTp = getRankedTensorType(op.getCoordinates()); + if (coordinatesTp.hasStaticShape()) { + auto len = coordinatesTp.getShape()[0] * coordinatesTp.getShape()[1]; flatBuf = reallocOrSubView(rewriter, loc, len, flatBuf); } - Value idxBuf = rewriter.create( - loc, MemRefType::get(indicesTp.getShape(), indicesTp.getElementType()), + Value coordinatesBuf = rewriter.create( + loc, + MemRefType::get(coordinatesTp.getShape(), + coordinatesTp.getElementType()), flatBuf, ArrayRef{ReassociationIndices{0, 1}}); // Converts MemRefs back to Tensors. - Value data = rewriter.create(loc, dataBuf); - Value indices = rewriter.create(loc, idxBuf); - Value nnz = genCast(rewriter, loc, desc.getValMemSize(rewriter, loc), - op.getNnz().getType()); + Value values = rewriter.create(loc, valuesBuf); + Value coordinates = + rewriter.create(loc, coordinatesBuf); + Value nse = genCast(rewriter, loc, desc.getValMemSize(rewriter, loc), + op.getNse().getType()); - rewriter.replaceOp(op, {data, indices, nnz}); + rewriter.replaceOp(op, {values, coordinates, nse}); return success(); } }; @@ -1296,52 +1300,54 @@ ConversionPatternRewriter &rewriter) const override { Location loc = op.getLoc(); const auto dstTp = getSparseTensorType(op.getResult()); - const auto encDst = dstTp.getEncoding(); // Creating COO with NewOp is handled by direct IR codegen. All other cases // are handled by rewriting. - if (!dstTp.hasEncoding() || getCOOStart(encDst) != 0) + if (!dstTp.hasEncoding() || getCOOStart(dstTp.getEncoding()) != 0) return failure(); // Implement the NewOp(filename) as follows: - // reader = getSparseTensorReader(filename) - // nse = getSparseTensorNNZ() - // tmp = bufferization.alloc_tensor an ordered COO with - // dst dim ordering, size_hint = nse - // indices = to_indices_buffer(tmp) - // values = to_values(tmp) - // isSorted = getSparseTensorReaderRead(indices, values, dimOrdering) - // if (!isSorted) sort_coo(nse, indices, values) + // %reader = @getSparseTensorReader(%filename) + // %nse = @getSparseTensorNSE(%reader) + // %coo = bufferization.alloc_tensor an ordered COO with + // dst dim ordering, size_hint = %nse + // %coordinates = sparse_tensor.coordinates_buffer(%coo) + // %values = sparse_tensor.values(%coo) + // %isSorted = @sparseTensorReaderReadToBuffers(%coordinates, %values) + // if (! %isSorted) sparse_tensor.sort_coo(%nse, %coordinates, %values) // update storage specifier - // dst = sparse_tensor.ConvertOp tmp + // @delSparseTensorReader(%reader) // Create a sparse tensor reader. - Value fileName = op.getSource(); - Type opaqueTp = getOpaquePointerType(rewriter); + const Value fileName = op.getSource(); + const Type opaqueTp = getOpaquePointerType(rewriter); + // FIXME: use `createCheckedSparseTensorReader` instead, because + // `createSparseTensorReader` is unsafe. Value reader = createFuncCall(rewriter, loc, "createSparseTensorReader", {opaqueTp}, {fileName}, EmitCInterface::Off) .getResult(0); - Type indexTp = rewriter.getIndexType(); + const Type indexTp = rewriter.getIndexType(); const Dimension dimRank = dstTp.getDimRank(); + const Level lvlRank = dstTp.getLvlRank(); // If the result tensor has dynamic dimensions, get the dynamic sizes from // the sparse tensor reader. SmallVector dynSizes; if (dstTp.hasDynamicDimShape()) { + // FIXME: call `getSparseTensorReaderDimSizes` instead, because + // `copySparseTensorReaderDimSizes` copies the memref over, + // instead of just accessing the reader's memory directly. Value dimSizes = genAlloca(rewriter, loc, dimRank, indexTp); createFuncCall(rewriter, loc, "copySparseTensorReaderDimSizes", {}, {reader, dimSizes}, EmitCInterface::On) .getResult(0); - ArrayRef dstShape = dstTp.getRankedTensorType().getShape(); - for (auto &d : llvm::enumerate(dstShape)) { - if (d.value() == ShapedType::kDynamic) { + for (const auto &d : llvm::enumerate(dstTp.getDimShape())) + if (ShapedType::isDynamic(d.value())) dynSizes.push_back(rewriter.create( loc, dimSizes, constantIndex(rewriter, loc, d.index()))); - } - } } - Value nse = createFuncCall(rewriter, loc, "getSparseTensorReaderNNZ", + Value nse = createFuncCall(rewriter, loc, "getSparseTensorReaderNSE", {indexTp}, {reader}, EmitCInterface::Off) .getResult(0); // Construct allocation for each field. @@ -1350,63 +1356,71 @@ fields, nse); MutSparseTensorDescriptor desc(dstTp, fields); - // Read the COO tensor data. - Type eltTp = dstTp.getElementType(); - Type indBufEleTp = getIndexOverheadType(rewriter, encDst); - SmallString<32> getReadFuncName{"getSparseTensorReaderRead", - overheadTypeFunctionSuffix(indBufEleTp), - primaryTypeFunctionSuffix(eltTp)}; - - Value xs = desc.getAOSMemRef(); - Value ys = desc.getValMemRef(); - SmallVector dim2lvlValues(dimRank, Value()); - if (auto dimOrder = encDst.getDimOrdering()) { + // Construct the `dim2lvl` buffer for handing off to the runtime library. + // FIXME: This code is (mostly) copied from the SparseTensorConversion.cpp + // handling of `NewOp`, and only handles permutations. Fixing this + // requires waiting for wrengr to finish redoing the CL that handles + // all dim<->lvl stuff more robustly. + SmallVector dim2lvlValues(dimRank); + if (!dstTp.isIdentity()) { + const auto dimOrder = dstTp.getDimToLvlMap(); assert(dimOrder.isPermutation() && "Got non-permutation"); - for (uint64_t l = 0; l < dimRank; l++) { - uint64_t d = dimOrder.getDimPosition(l); + for (Level l = 0; l < lvlRank; l++) { + const Dimension d = dimOrder.getDimPosition(l); dim2lvlValues[d] = constantIndex(rewriter, loc, l); } } else { - for (uint64_t l = 0; l < dimRank; l++) - dim2lvlValues[l] = constantIndex(rewriter, loc, l); + // The `SparseTensorType` ctor already ensures `dimRank == lvlRank` + // when `isIdentity`; so no need to re-assert it here. + for (Dimension d = 0; d < dimRank; d++) + dim2lvlValues[d] = constantIndex(rewriter, loc, d); } Value dim2lvl = allocaBuffer(rewriter, loc, dim2lvlValues); - Value f = constantI1(rewriter, loc, false); + // Read the COO tensor data. + Value xs = desc.getAOSMemRef(); + Value ys = desc.getValMemRef(); + + const Type boolTp = rewriter.getIntegerType(1); + const Type elemTp = dstTp.getElementType(); + const Type crdTp = dstTp.getCrdType(); + // FIXME: This function name is weird; should rename to + // "sparseTensorReaderReadToBuffers". + SmallString<32> readToBuffersFuncName{"getSparseTensorReaderRead", + overheadTypeFunctionSuffix(crdTp), + primaryTypeFunctionSuffix(elemTp)}; Value isSorted = - createFuncCall(rewriter, loc, getReadFuncName, {f.getType()}, + createFuncCall(rewriter, loc, readToBuffersFuncName, {boolTp}, {reader, dim2lvl, xs, ys}, EmitCInterface::On) .getResult(0); // If the destination tensor is a sorted COO, we need to sort the COO tensor // data if the input elements aren't sorted yet. - if (encDst.isOrderedLvl(dimRank - 1)) { + if (dstTp.isOrderedLvl(lvlRank - 1)) { + Value kFalse = constantI1(rewriter, loc, false); Value notSorted = rewriter.create( - loc, arith::CmpIPredicate::eq, isSorted, f); + loc, arith::CmpIPredicate::eq, isSorted, kFalse); scf::IfOp ifOp = rewriter.create(loc, notSorted, /*else*/ false); rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); rewriter.create( - loc, nse, xs, ValueRange{ys}, rewriter.getIndexAttr(dimRank), + loc, nse, xs, ValueRange{ys}, rewriter.getIndexAttr(lvlRank), rewriter.getIndexAttr(0), SparseTensorSortKind::HybridQuickSort); rewriter.setInsertionPointAfter(ifOp); } - // Set PtrMemRef0[1] = nse. - Value c1 = constantIndex(rewriter, loc, 1); - Value ptrMemref0 = desc.getPtrMemRef(0); - Type ptrEleTy = getMemRefType(ptrMemref0).getElementType(); - Value ptrNse = - ptrEleTy == nse.getType() - ? nse - : rewriter.create(loc, ptrEleTy, nse); - rewriter.create(loc, ptrNse, ptrMemref0, c1); + // Set PosMemRef0[1] = nse. + const Value c1 = constantIndex(rewriter, loc, 1); + const Value posMemref0 = desc.getPosMemRef(0); + const Type posTp = dstTp.getPosType(); + const Value posNse = genCast(rewriter, loc, nse, posTp); + rewriter.create(loc, posNse, posMemref0, c1); // Update storage specifier. - Value idxSize = rewriter.create( - loc, nse, constantIndex(rewriter, loc, dimRank)); - desc.setSpecifierField(rewriter, loc, StorageSpecifierKind::IdxMemSize, 0, - idxSize); + Value coordinatesSize = rewriter.create( + loc, nse, constantIndex(rewriter, loc, lvlRank)); + desc.setSpecifierField(rewriter, loc, StorageSpecifierKind::CrdMemSize, 0, + coordinatesSize); desc.setSpecifierField(rewriter, loc, StorageSpecifierKind::ValMemSize, std::nullopt, nse); @@ -1436,8 +1450,8 @@ SparseCastConverter, SparseTensorDeallocConverter, SparseExtractSliceCoverter, SparseTensorLoadConverter, SparseExpandConverter, SparseCompressConverter, - SparseInsertConverter, SparseToPointersConverter, - SparseToIndicesConverter, SparseToIndicesBufferConverter, + SparseInsertConverter, SparseToPositionsConverter, + SparseToCoordinatesConverter, SparseToCoordinatesBufferConverter, SparseToValuesConverter, SparseConvertConverter, SparseNewOpConverter, SparseNumberOfEntriesConverter>( typeConverter, patterns.getContext()); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -239,8 +239,8 @@ // is there a better way to handle that than this one-off setter method? NewCallParams &setTemplateTypes(SparseTensorType stt) { const auto enc = stt.getEncoding(); - params[kParamPtrTp] = constantPointerTypeEncoding(builder, loc, enc); - params[kParamIndTp] = constantIndexTypeEncoding(builder, loc, enc); + params[kParamPosTp] = constantPosTypeEncoding(builder, loc, enc); + params[kParamCrdTp] = constantCrdTypeEncoding(builder, loc, enc); params[kParamValTp] = constantPrimaryTypeEncoding(builder, loc, stt.getElementType()); return *this; @@ -284,8 +284,8 @@ static constexpr unsigned kParamLvlTypes = 2; static constexpr unsigned kParamLvl2Dim = 3; static constexpr unsigned kParamDim2Lvl = 4; - static constexpr unsigned kParamPtrTp = 5; - static constexpr unsigned kParamIndTp = 6; + static constexpr unsigned kParamPosTp = 5; + static constexpr unsigned kParamCrdTp = 6; static constexpr unsigned kParamValTp = 7; static constexpr unsigned kParamAction = 8; static constexpr unsigned kParamPtr = 9; @@ -382,53 +382,38 @@ /// if val != 0 /// t->add(&val, [i1,..,ik], [p1,..,pk]); static void genAddEltCall(OpBuilder &builder, Location loc, Type eltType, - Value lvlCOO, Value valPtr, Value dimInd, + Value lvlCOO, Value valPtr, Value dimCoords, Value dim2lvl) { SmallString<9> name{"addElt", primaryTypeFunctionSuffix(eltType)}; - SmallVector params{lvlCOO, valPtr, dimInd, dim2lvl}; + SmallVector params{lvlCOO, valPtr, dimCoords, dim2lvl}; Type pTp = getOpaquePointerType(builder); createFuncCall(builder, loc, name, pTp, params, EmitCInterface::On); } /// Generates a call to `iter->getNext()`. If there is a next element, -/// then it is copied into the out-parameters `ind` and `elemPtr`, +/// then it is copied into the out-parameters `coords` and `elemPtr`, /// and the return value is true. If there isn't a next element, then /// the return value is false. +/// +/// The `coords` argument uses the same coordinate-space as the `iter` +/// (which can be either dim- or lvl-coords, depending on context). static Value genGetNextCall(OpBuilder &builder, Location loc, Value iter, - Value ind, Value elemPtr) { + Value coords, Value elemPtr) { Type elemTp = elemPtr.getType().cast().getElementType(); SmallString<10> name{"getNext", primaryTypeFunctionSuffix(elemTp)}; - SmallVector params{iter, ind, elemPtr}; + SmallVector params{iter, coords, elemPtr}; Type i1 = builder.getI1Type(); return createFuncCall(builder, loc, name, i1, params, EmitCInterface::On) .getResult(0); } -/// Converts a pointer to COO (from calls to iter->next()) into a vector of -/// indices, apply (optional) `offset` on `offsetDim`. -static SmallVector loadIndices(OpBuilder &builder, Location loc, - unsigned rank, Value ind, - unsigned offsetDim = 0, - Value offset = Value()) { - SmallVector ivs; - ivs.reserve(rank); - for (unsigned i = 0; i < rank; i++) { - Value idx = constantIndex(builder, loc, i); - idx = builder.create(loc, ind, idx); - if (offsetDim == i && offset) - idx = builder.create(loc, idx, offset); - ivs.push_back(idx); - } - return ivs; -} - -/// Inserts a value stored in `elemPtr` into a dense tensor created by -/// allocDenseTensor(). +/// Loads the value stored in `elemPtr`, and stores it at the coordinates +/// `cvs` into a dense tensor created by `allocDenseTensor`. static void insertScalarIntoDenseTensor(OpBuilder &builder, Location loc, Value elemPtr, Value tensor, - ValueRange ivs) { + ValueRange cvs) { Value elemV = builder.create(loc, elemPtr); - builder.create(loc, elemV, tensor, ivs); + builder.create(loc, elemV, tensor, cvs); } /// Determine if the runtime library supports direct conversion to the @@ -453,31 +438,17 @@ return true; } -/// Helper method to translate indices during a reshaping operation. +/// Helper method to translate coordinates during a reshaping operation. /// TODO: provide as general utility to MLIR at large? -static void translateIndices(Location loc, ConversionPatternRewriter &rewriter, - ArrayRef reassociation, - TensorType dstTp, TensorType srcTp, Value dstIdx, - Value srcIdx, ArrayRef dstShape, - ArrayRef srcShape) { - const Dimension dstRank = dstTp.getRank(); - const Dimension srcRank = srcTp.getRank(); - - SmallVector srcIndices; - srcIndices.reserve(srcRank); - for (Dimension d = 0; d < srcRank; d++) { - Value idx = rewriter.create( - loc, srcIdx, constantIndex(rewriter, loc, d)); - srcIndices.push_back(idx); - } - - SmallVector dstIndices; - translateIndicesArray(rewriter, loc, reassociation, srcIndices, srcShape, - dstShape, dstIndices); - - for (Dimension d = 0; d < dstRank; d++) - rewriter.create(loc, dstIndices[d], dstIdx, - constantIndex(rewriter, loc, d)); +static void reshapeCoords(Location loc, OpBuilder &builder, + ArrayRef reassociation, + ValueRange srcSizes, Value srcCoords, + ValueRange dstSizes, Value dstCoords) { + const auto srcCvs = loadAll(builder, loc, srcSizes.size(), srcCoords); + SmallVector dstCvs; + reshapeCvs(builder, loc, reassociation, srcSizes, srcCvs, dstSizes, dstCvs); + assert(dstCvs.size() == dstSizes.size()); + storeAll(builder, loc, dstCoords, dstCvs); } /// Generate code for a general sparse to sparse reshaping operation. @@ -491,7 +462,7 @@ /// iter = src->toCOO(); /// coo = newSparseCOO() /// while (elem = iter->getNext()) { -/// coo->add(reshape(elem.indices), elem.value) +/// coo->add(reshape(elem.coords), elem.value) /// } /// s = newSparseTensor(coo) template @@ -506,7 +477,7 @@ Type elemTp = srcTp.getElementType(); assert(elemTp == dstTp.getElementType() && "reshape should not change element type"); - // Start an iterator over the source tensor (in original index order). + // Start an iterator over the source tensor (in coordinate order). SmallVector srcDimSizes = getDimSizes(rewriter, loc, srcTp, adaptor.getSrc()); NewCallParams params(rewriter, loc); @@ -520,28 +491,31 @@ else genReshapeDstShape(loc, rewriter, dstDimSizes, srcDimSizes, dstTp.getDimShape(), op.getReassociationIndices()); - Value coo = + const Value coo = params.genBuffers(dstTp, dstDimSizes).genNewCall(Action::kEmptyCOO); - Value dstPerm = params.getDim2LvlMap(); + const Value dstPerm = params.getDim2LvlMap(); // Construct a while loop over the iterator. - Type iTp = rewriter.getIndexType(); - Value srcIdx = genAlloca(rewriter, loc, srcTp.getDimRank(), iTp); - Value dstIdx = genAlloca(rewriter, loc, dstTp.getDimRank(), iTp); - Value elemPtr = genAllocaScalar(rewriter, loc, elemTp); - SmallVector noArgs; - SmallVector noTypes; + const Type iTp = rewriter.getIndexType(); + const Value srcDimCoords = genAlloca(rewriter, loc, srcTp.getDimRank(), iTp); + const Value dstDimCoords = genAlloca(rewriter, loc, dstTp.getDimRank(), iTp); + const Value elemPtr = genAllocaScalar(rewriter, loc, elemTp); + const SmallVector noArgs; + const SmallVector noTypes; auto whileOp = rewriter.create(loc, noTypes, noArgs); Block *before = rewriter.createBlock(&whileOp.getBefore(), {}, noTypes); rewriter.setInsertionPointToEnd(before); - Value cond = genGetNextCall(rewriter, loc, iter, srcIdx, elemPtr); + Value cond = genGetNextCall(rewriter, loc, iter, srcDimCoords, elemPtr); rewriter.create(loc, cond, before->getArguments()); - // Translate indices from source to target and insert. Note that we do + // Translate coordinates from source to target and insert. Note that we do // not need to store the value in elemPtr, as the value is still there. Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, noTypes); rewriter.setInsertionPointToStart(after); - translateIndices(loc, rewriter, op.getReassociationIndices(), dstTp, srcTp, - dstIdx, srcIdx, dstDimSizes, srcDimSizes); - genAddEltCall(rewriter, loc, elemTp, coo, elemPtr, dstIdx, dstPerm); + // We probably don't need these assertions, but better safe than sorry. + assert(srcTp.getDimRank() == srcDimSizes.size()); + assert(dstTp.getDimRank() == dstDimSizes.size()); + reshapeCoords(loc, rewriter, op.getReassociationIndices(), srcDimSizes, + srcDimCoords, dstDimSizes, dstDimCoords); + genAddEltCall(rewriter, loc, elemTp, coo, elemPtr, dstDimCoords, dstPerm); rewriter.create(loc); // Final call to construct sparse tensor storage and free temporary resources. rewriter.setInsertionPointAfter(whileOp); @@ -569,7 +543,7 @@ const Dimension dimRank = stt.getDimRank(); const Type elemTp = stt.getElementType(); - // Start an iterator over the tensor (in original index order). + // Start an iterator over the tensor (in coordinate order). const auto noPerm = stt.withoutOrdering(); SmallVector dimSizes = getDimSizes(rewriter, loc, noPerm, t); Value iter = NewCallParams(rewriter, loc) @@ -577,14 +551,15 @@ .genNewCall(Action::kToIterator, t); // Construct a while loop over the iterator. - Value srcIdx = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType()); + const Type iTp = rewriter.getIndexType(); + Value srcDimCoords = genAlloca(rewriter, loc, dimRank, iTp); Value elemPtr = genAllocaScalar(rewriter, loc, elemTp); - SmallVector noArgs; - SmallVector noTypes; + const SmallVector noArgs; + const SmallVector noTypes; auto whileOp = rewriter.create(loc, noTypes, noArgs); Block *before = rewriter.createBlock(&whileOp.getBefore(), {}, noTypes); rewriter.setInsertionPointToEnd(before); - Value cond = genGetNextCall(rewriter, loc, iter, srcIdx, elemPtr); + Value cond = genGetNextCall(rewriter, loc, iter, srcDimCoords, elemPtr); rewriter.create(loc, cond, before->getArguments()); Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, noTypes); rewriter.setInsertionPointToStart(after); @@ -598,7 +573,7 @@ rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); } // Callback here to build loop body. - bodyBuilder(rewriter, loc, srcIdx, elemPtr); + bodyBuilder(rewriter, loc, srcDimCoords, elemPtr); // Exit the scope from the IfOp. if (hasDenseDim) @@ -811,8 +786,8 @@ genLvlTypesBuffer(rewriter, loc, stt), lvl2dimBuffer, dim2lvlBuffer, - constantPointerTypeEncoding(rewriter, loc, stt.getEncoding()), - constantIndexTypeEncoding(rewriter, loc, stt.getEncoding()), + constantPosTypeEncoding(rewriter, loc, stt.getEncoding()), + constantCrdTypeEncoding(rewriter, loc, stt.getEncoding()), valTp}; Value tensor = createFuncCall(rewriter, loc, "newSparseTensorFromReader", opaqueTp, params, EmitCInterface::On) @@ -922,8 +897,8 @@ // the correct sparsity information to either of them. const auto mixedEnc = SparseTensorEncodingAttr::get( op->getContext(), dstEnc.getDimLevelType(), dstEnc.getDimOrdering(), - dstEnc.getHigherOrdering(), srcEnc.getPointerBitWidth(), - srcEnc.getIndexBitWidth()); + dstEnc.getHigherOrdering(), srcEnc.getPosWidth(), + srcEnc.getCrdWidth()); // TODO: This is the only place where `kToCOO` (or `kToIterator`) // is called with a non-identity permutation. Is there any clean // way to push the permutation over to the `kFromCOO` side instead? @@ -942,38 +917,39 @@ // dst = new Tensor(0); // iter = new SparseTensorIterator(src); // while (elem = iter->getNext()) { - // dst[elem.indices] = elem.value; + // dst[elem.coords] = elem.value; // } // delete iter; // // Fabricate a no-permutation encoding for NewCallParams - // The pointer/index types must be those of `src`. + // The position/coordinate types must be those of `src`. // The dimLevelTypes aren't actually used by Action::kToIterator. const auto dstEnc = SparseTensorEncodingAttr::get( op->getContext(), SmallVector(dimRank, DimLevelType::Dense), AffineMap(), - AffineMap(), srcEnc.getPointerBitWidth(), srcEnc.getIndexBitWidth()); + AffineMap(), srcEnc.getPosWidth(), srcEnc.getCrdWidth()); SmallVector dimSizes = getDimSizes(rewriter, loc, srcTp, src); Value iter = NewCallParams(rewriter, loc) .genBuffers(dstTp.withEncoding(dstEnc), dimSizes) .genNewCall(Action::kToIterator, src); - Value ind = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType()); + const Type iTp = rewriter.getIndexType(); + Value dimCoords = genAlloca(rewriter, loc, dimRank, iTp); Value elemPtr = genAllocaScalar(rewriter, loc, elemTp); Block *insertionBlock = rewriter.getInsertionBlock(); // TODO: Dense buffers should be allocated/deallocated via the callback // in BufferizationOptions. Value dst = allocDenseTensor(rewriter, loc, dstTp, dimSizes); - SmallVector noArgs; - SmallVector noTypes; + const SmallVector noArgs; + const SmallVector noTypes; auto whileOp = rewriter.create(loc, noTypes, noArgs); Block *before = rewriter.createBlock(&whileOp.getBefore(), {}, noTypes); rewriter.setInsertionPointToEnd(before); - Value cond = genGetNextCall(rewriter, loc, iter, ind, elemPtr); + Value cond = genGetNextCall(rewriter, loc, iter, dimCoords, elemPtr); rewriter.create(loc, cond, before->getArguments()); Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, noTypes); rewriter.setInsertionPointToStart(after); - SmallVector ivs = loadIndices(rewriter, loc, dimRank, ind); - insertScalarIntoDenseTensor(rewriter, loc, elemPtr, dst, ivs); + const auto dcvs = loadAll(rewriter, loc, dimRank, dimCoords); + insertScalarIntoDenseTensor(rewriter, loc, elemPtr, dst, dcvs); rewriter.create(loc); rewriter.setInsertionPointAfter(whileOp); genDelIteratorCall(rewriter, loc, elemTp, iter); @@ -1004,7 +980,7 @@ // To fill the COO tensor from a sparse constant in COO format: // for i in range(NNZ) // val = values[i] - // [i1,..,ik] = indices[i] + // [i1,..,ik] = coordinates[i] // t->add(val, [i1,..,ik], [p1,..,pk]) // // Note that the dense tensor traversal code is actually implemented @@ -1018,18 +994,17 @@ NewCallParams params(rewriter, loc); Value coo = params.genBuffers(dstTp, dimSizes).genNewCall(Action::kEmptyCOO); - Value ind = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType()); + const Type iTp = rewriter.getIndexType(); + Value dimCoords = genAlloca(rewriter, loc, dimRank, iTp); Value perm = params.getDim2LvlMap(); Value elemPtr = genAllocaScalar(rewriter, loc, elemTp); genDenseTensorOrSparseConstantIterLoop( rewriter, loc, src, dimRank, - [&](OpBuilder &builder, Location loc, Value val, ValueRange ivs) { - for (Dimension d = 0; d < dimRank; d++) { - Value dim = constantIndex(builder, loc, d); - builder.create(loc, ivs[d], ind, dim); - } + [&](OpBuilder &builder, Location loc, Value val, ValueRange dcvs) { + assert(dcvs.size() == static_cast(dimRank)); + storeAll(builder, loc, dimCoords, dcvs); builder.create(loc, val, elemPtr); - genAddEltCall(builder, loc, elemTp, coo, elemPtr, ind, perm); + genAddEltCall(builder, loc, elemTp, coo, elemPtr, dimCoords, perm); }); // Final call to construct sparse tensor storage. Value dst = params.genNewCall(Action::kFromCOO, coo); @@ -1061,41 +1036,43 @@ } }; -/// Sparse conversion rule for pointer accesses. -class SparseTensorToPointersConverter - : public OpConversionPattern { +/// Sparse conversion rule for position accesses. +class SparseTensorToPositionsConverter + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(ToPointersOp op, OpAdaptor adaptor, + matchAndRewrite(ToPositionsOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - Type resType = op.getType(); - Type ptrType = resType.cast().getElementType(); - SmallString<16> name{"sparsePointers", overheadTypeFunctionSuffix(ptrType)}; - Value dim = - constantIndex(rewriter, op->getLoc(), op.getDimension().getZExtValue()); - replaceOpWithFuncCall(rewriter, op, name, resType, - {adaptor.getTensor(), dim}, EmitCInterface::On); + Type resTp = op.getType(); + Type posTp = resTp.cast().getElementType(); + SmallString<17> name{"sparsePositions", overheadTypeFunctionSuffix(posTp)}; + Value lvl = constantIndex(rewriter, op->getLoc(), op.getLevel()); + replaceOpWithFuncCall(rewriter, op, name, resTp, {adaptor.getTensor(), lvl}, + EmitCInterface::On); return success(); } }; -/// Sparse conversion rule for index accesses. -class SparseTensorToIndicesConverter : public OpConversionPattern { +/// Sparse conversion rule for coordinate accesses. +class SparseTensorToCoordinatesConverter + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(ToIndicesOp op, OpAdaptor adaptor, + matchAndRewrite(ToCoordinatesOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { + // TODO: use `SparseTensorType::getCrdType` instead. Type resType = op.getType(); - Type indType = resType.cast().getElementType(); - SmallString<15> name{"sparseIndices", overheadTypeFunctionSuffix(indType)}; + const Type crdTp = resType.cast().getElementType(); + SmallString<19> name{"sparseCoordinates", + overheadTypeFunctionSuffix(crdTp)}; Location loc = op->getLoc(); - Value dim = constantIndex(rewriter, loc, op.getDimension().getZExtValue()); + Value lvl = constantIndex(rewriter, loc, op.getLevel()); // The function returns a MemRef without a layout. - MemRefType callRetType = get1DMemRefType(indType, false); - SmallVector operands{adaptor.getTensor(), dim}; + MemRefType callRetType = get1DMemRefType(crdTp, false); + SmallVector operands{adaptor.getTensor(), lvl}; auto fn = getFunc(op->getParentOfType(), name, callRetType, operands, EmitCInterface::On); Value callRet = @@ -1171,21 +1148,19 @@ matchAndRewrite(InsertOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { // Note that the current regime only allows for strict lexicographic - // index order. All values are passed by reference through stack + // coordinate order. All values are passed by reference through stack // allocated memrefs. Location loc = op->getLoc(); const auto stt = getSparseTensorType(op.getTensor()); const auto elemTp = stt.getElementType(); - const Dimension dimRank = stt.getDimRank(); - auto mref = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType()); + const Level lvlRank = stt.getLvlRank(); + auto lvlCoords = genAlloca(rewriter, loc, lvlRank, rewriter.getIndexType()); auto vref = genAllocaScalar(rewriter, loc, elemTp); - for (Dimension d = 0; d < dimRank; d++) - rewriter.create(loc, adaptor.getIndices()[d], mref, - constantIndex(rewriter, loc, d)); + storeAll(rewriter, loc, lvlCoords, adaptor.getLvlCoords()); rewriter.create(loc, adaptor.getValue(), vref); SmallString<12> name{"lexInsert", primaryTypeFunctionSuffix(elemTp)}; - createFuncCall(rewriter, loc, name, {}, {adaptor.getTensor(), mref, vref}, - EmitCInterface::On); + createFuncCall(rewriter, loc, name, {}, + {adaptor.getTensor(), lvlCoords, vref}, EmitCInterface::On); rewriter.replaceOp(op, adaptor.getTensor()); return success(); } @@ -1208,12 +1183,12 @@ // Get the cardinality of valid coordinates for the innermost level. Value sz = createOrFoldLvlCall(rewriter, loc, srcTp, adaptor.getTensor(), srcTp.getLvlRank() - 1); - // Allocate temporary buffers for values, filled-switch, and indices. + // Allocate temporary buffers for values, filled-switch, and coordinates. // We do not use stack buffers for this, since the expanded size may // be rather large (as it envelops a single expanded dense dimension). Value values = genAlloc(rewriter, loc, sz, eltType); Value filled = genAlloc(rewriter, loc, sz, boolType); - Value indices = genAlloc(rewriter, loc, sz, idxType); + Value lastLvlCoordinates = genAlloc(rewriter, loc, sz, idxType); Value zero = constantZero(rewriter, loc, idxType); // Reset the values/filled-switch to all-zero/false. Note that this // introduces an O(N) operation into the computation, but this reset @@ -1226,9 +1201,9 @@ rewriter.create( loc, ValueRange{constantZero(rewriter, loc, boolType)}, ValueRange{filled}); - // Replace expansion op with these buffers and initial index. + // Replace expansion op with these buffers and initial coordinate. assert(op.getNumResults() == 4); - rewriter.replaceOp(op, {values, filled, indices, zero}); + rewriter.replaceOp(op, {values, filled, lastLvlCoordinates, zero}); return success(); } }; @@ -1252,14 +1227,12 @@ Value tensor = adaptor.getTensor(); const auto stt = getSparseTensorType(op.getTensor()); const Type elemTp = stt.getElementType(); - const Dimension dimRank = stt.getDimRank(); - auto mref = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType()); - for (Dimension d = 0; d < dimRank - 1; d++) - rewriter.create(loc, adaptor.getIndices()[d], mref, - constantIndex(rewriter, loc, d)); + const Level lvlRank = stt.getLvlRank(); + auto lvlCoords = genAlloca(rewriter, loc, lvlRank, rewriter.getIndexType()); + storeAll(rewriter, loc, lvlCoords, adaptor.getLvlCoords()); SmallString<12> name{"expInsert", primaryTypeFunctionSuffix(elemTp)}; createFuncCall(rewriter, loc, name, {}, - {tensor, mref, values, filled, added, count}, + {tensor, lvlCoords, values, filled, added, count}, EmitCInterface::On); rewriter.replaceOp(op, adaptor.getTensor()); // Deallocate the buffers on exit of the loop nest. @@ -1288,7 +1261,7 @@ // coo->add(adjustForOffset(i,j,k), b[i,j,k]) // // for elem in sparse_input - // coo->add(adjustForOffset(elem.indices), elem.value) + // coo->add(adjustForOffset(elem.coords), elem.value) // ... // a = newSparseTensor(coo_for_a) // return a @@ -1301,22 +1274,22 @@ // a[ adjustForOffset(i,j,k) ] = b[i,j,k] // // for elem in sparse_input - // a[ adjustForOffset(elem.indices) ] = elem.value + // a[ adjustForOffset(elem.coords) ] = elem.value // return a Location loc = op.getLoc(); const auto dstTp = getSparseTensorType(op); const auto dstEnc = dstTp.getEncoding(); const Type elemTp = dstTp.getElementType(); - const Dimension concatDim = op.getDimension().getZExtValue(); + const Dimension concatDim = op.getDimension(); const Dimension dimRank = dstTp.getDimRank(); Value dst; // destination tensor Value dstPerm; // destination tensor permutation (if sparse out) // A pointer to the value being inserted (if dense => sparse) Value elemPtr; - // Memory that holds the dim-indices for destination tensor (if sparse out) - Value dstInd; - // The offset applied to the dimenstion to be concated (starting from 0) + // Memory that holds the dim-coords for destination tensor (if sparse out) + Value dstDimCoords; + // The offset applied to the dimension to be concated (starting from 0) Value offset = constantIndex(rewriter, loc, 0); SmallVector dimSizes; @@ -1330,7 +1303,7 @@ // Start a new COO or an initialized annotated all dense sparse tensor. dst = params.genBuffers(dstTp, dimSizes) .genNewCall(allDense ? Action::kEmpty : Action::kEmptyCOO); - dstInd = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType()); + dstDimCoords = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType()); if (allDense) { dstTensor = dst; // Get the values buffer for the sparse tensor and reshape it to the @@ -1338,9 +1311,12 @@ dst = genValuesCall(rewriter, loc, MemRefType::get({ShapedType::kDynamic}, elemTp), {dst}); - // Use the dstInd to store the level sizes. - dst = - reshapeValuesToLevels(rewriter, loc, dstEnc, dimSizes, dst, dstInd); + // Pass the `dstDimCoords` buffer for `reshapeValuesToLevels` + // to reuse for storing level-sizes (yes, "level-sizes"). + // This is safe to do because `dstTp` is a dense-tensor type, + // and therefore lvlRank == dimRank. + dst = reshapeValuesToLevels(rewriter, loc, dstEnc, dimSizes, dst, + dstDimCoords); } else { dstPerm = params.getDim2LvlMap(); elemPtr = genAllocaScalar(rewriter, loc, elemTp); @@ -1351,13 +1327,13 @@ dst = allocDenseTensor(rewriter, loc, dstTp, dimSizes); } const Level lvlRank = dstTp.getLvlRank(); - const auto dimIvs2LvlIvs = [&](ValueRange dimIvs) -> SmallVector { - SmallVector lvlIvs; - lvlIvs.reserve(lvlRank); + const auto dcvs2lcvs = [&](ValueRange dcvs) -> SmallVector { + SmallVector lcvs; + lcvs.reserve(lvlRank); for (Level l = 0; l < lvlRank; l++) // FIXME: `toOrigDim` is deprecated - lvlIvs.push_back(dimIvs[toOrigDim(dstEnc, l)]); - return lvlIvs; + lcvs.push_back(dcvs[toOrigDim(dstEnc, l)]); + return lcvs; }; for (const auto &it : llvm::zip(op.getInputs(), adaptor.getInputs())) { Value orignalOp = std::get<0>(it); // Input (with encoding) from Op @@ -1366,45 +1342,45 @@ if (srcTp.hasEncoding()) { genSparseCOOIterationLoop( rewriter, loc, adaptedOp, srcTp, - [&](OpBuilder &builder, Location loc, Value idx, + [&](OpBuilder &builder, Location loc, Value dimCoords, Value elemPtr) -> void { - SmallVector dimIvs = - loadIndices(builder, loc, dimRank, idx, concatDim, offset); + const auto dcvs = + loadAll(builder, loc, dimRank, dimCoords, concatDim, offset); if (dstTp.hasEncoding() && !allDense) { // Case: sparse => sparse, except for annotated all dense. - storeIndices(builder, loc, dimRank, dstInd, dimIvs); - genAddEltCall(builder, loc, elemTp, dst, elemPtr, dstInd, + storeAll(builder, loc, dstDimCoords, dcvs); + genAddEltCall(builder, loc, elemTp, dst, elemPtr, dstDimCoords, dstPerm); } else { // Case: sparse => dense, or annotated all dense. - const auto lvlIvs = allDense ? dimIvs2LvlIvs(dimIvs) : dimIvs; - insertScalarIntoDenseTensor(builder, loc, elemPtr, dst, lvlIvs); + const auto lcvs = allDense ? dcvs2lcvs(dcvs) : dcvs; + insertScalarIntoDenseTensor(builder, loc, elemPtr, dst, lcvs); } }); } else { genDenseTensorIterationLoop( rewriter, loc, adaptedOp, srcTp, - [&](OpBuilder &builder, Location loc, ValueRange dimIvs) -> void { + [&](OpBuilder &builder, Location loc, ValueRange dcvs) -> void { if (dstTp.hasEncoding() && !allDense) { // Case: dense => sparse, except for annotated all dense. - storeIndices(builder, loc, dimRank, dstInd, dimIvs, concatDim, - offset); - Value val = genValueForDense(builder, loc, adaptedOp, dimIvs); + assert(dcvs.size() == static_cast(dimRank)); + storeAll(builder, loc, dstDimCoords, dcvs, concatDim, offset); + Value val = genValueForDense(builder, loc, adaptedOp, dcvs); builder.create(loc, val, elemPtr); - genAddEltCall(builder, loc, elemTp, dst, elemPtr, dstInd, + genAddEltCall(builder, loc, elemTp, dst, elemPtr, dstDimCoords, dstPerm); } else { // Case: dense => dense, or annotated all dense. - Value val = genValueForDense(builder, loc, adaptedOp, dimIvs); - // Despite the name, this isn't actually level-ivs until - // after the `dimIvs2LvlIvs` call. - SmallVector lvlIvs(dimIvs); + Value val = genValueForDense(builder, loc, adaptedOp, dcvs); + // Despite the name, this isn't actually level-cvs until + // after the `dcvs2lcvs` call. + SmallVector lcvs(dcvs); // Apply offset. - lvlIvs[concatDim] = builder.create( - loc, lvlIvs[concatDim], offset); + lcvs[concatDim] = + builder.create(loc, lcvs[concatDim], offset); if (allDense) - lvlIvs = dimIvs2LvlIvs(lvlIvs); - builder.create(loc, val, dst, lvlIvs); + lcvs = dcvs2lcvs(lcvs); + builder.create(loc, val, dst, lcvs); } }); } @@ -1446,11 +1422,11 @@ Value coo = NewCallParams(rewriter, loc) .genBuffers(srcTp.withoutOrdering(), dimSizes) .genNewCall(Action::kToCOO, src); - // Then output the tensor to external file with indices in the externally - // visible lexicographic index order. A sort is required if the source was - // not in that order yet (note that the sort can be dropped altogether if - // external format does not care about the order at all, but here we assume - // it does). + // Then output the tensor to external file with coordinates in the + // externally visible lexicographic coordinate order. A sort is + // required if the source was not in that order yet (note that the + // sort can be dropped altogether if external format does not care + // about the order at all, but here we assume it does). const Value sort = constantI1(rewriter, loc, !srcTp.isIdentity()); SmallVector outParams{coo, adaptor.getOperands()[1], sort}; const Type elemTp = srcTp.getElementType(); @@ -1482,17 +1458,18 @@ void mlir::populateSparseTensorConversionPatterns( TypeConverter &typeConverter, RewritePatternSet &patterns, const SparseTensorConversionOptions &options) { - patterns.add, - SparseReshapeConverter, - SparseTensorConcatConverter, SparseTensorAllocConverter, - SparseTensorDeallocConverter, SparseTensorToPointersConverter, - SparseTensorToIndicesConverter, SparseTensorToValuesConverter, - SparseNumberOfEntriesConverter, SparseTensorLoadConverter, - SparseTensorInsertConverter, SparseTensorExpandConverter, - SparseTensorCompressConverter, SparseTensorOutConverter>( - typeConverter, patterns.getContext()); + patterns + .add, + SparseReshapeConverter, + SparseTensorConcatConverter, SparseTensorAllocConverter, + SparseTensorDeallocConverter, SparseTensorToPositionsConverter, + SparseTensorToCoordinatesConverter, SparseTensorToValuesConverter, + SparseNumberOfEntriesConverter, SparseTensorLoadConverter, + SparseTensorInsertConverter, SparseTensorExpandConverter, + SparseTensorCompressConverter, SparseTensorOutConverter>( + typeConverter, patterns.getContext()); patterns.add(typeConverter, patterns.getContext(), options); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -159,9 +159,9 @@ // Foreach on constant. foreachInSparseConstant( loc, rewriter, attr, op.getOrder().value_or(AffineMap()), - [&reduc, &rewriter, op](ArrayRef coords, Value v) mutable { + [&reduc, &rewriter, op](ArrayRef cvs, Value v) mutable { SmallVector args; - args.append(coords.begin(), coords.end()); + args.append(cvs.begin(), cvs.end()); args.push_back(v); args.append(reduc); // Clones the foreach op to get a copy of the loop body. @@ -386,7 +386,7 @@ // Implement the sparse2sparse reshape as follows: // %tmp = bufferization.alloc_tensor : unordered COO // foreach srcCoords %srcTensor - // insert translateIndicesArray(srcCoords), %tmp + // insert reshapeCvs(srcCoords), %tmp // %t = sparse_tensor.cast %tmp Value nnz = rewriter.create(loc, srcTensor); RankedTensorType cooTp = getUnorderedCOOFromType(dstTp); @@ -398,18 +398,20 @@ ForeachOp foreachOp = rewriter.create( loc, srcTensor, cooBuffer, - [&](OpBuilder &builder, Location loc, ValueRange args, Value v, + [&](OpBuilder &builder, Location loc, ValueRange srcLcvs, Value v, ValueRange reduc) { - SmallVector srcIndices; - SmallVector dstIndices; - for (Dimension d = 0, dimRank = srcTp.getRank(); d < dimRank; d++) { + const Dimension dimRank = srcTp.getRank(); + SmallVector srcDcvs; + srcDcvs.reserve(dimRank); + for (Dimension d = 0; d < dimRank; d++) { // FIXME: `toStoredDim` is deprecated Level lvl = toStoredDim(encSrc, d); - srcIndices.push_back(args[lvl]); + srcDcvs.push_back(srcLcvs[lvl]); } - translateIndicesArray(builder, loc, op.getReassociationIndices(), - srcIndices, srcSizes, dstSizes, dstIndices); - auto t = builder.create(loc, v, reduc.front(), dstIndices); + SmallVector dstDcvs; + reshapeCvs(builder, loc, op.getReassociationIndices(), srcSizes, + srcDcvs, dstSizes, dstDcvs); + auto t = builder.create(loc, v, reduc.front(), dstDcvs); builder.create(loc, t); }); auto t = rewriter.create(loc, foreachOp.getResult(0), true); @@ -468,7 +470,7 @@ const Location loc = op.getLoc(); const auto dstTp = getSparseTensorType(op); const Dimension dimRank = dstTp.getDimRank(); - const Dimension conDim = op.getDimension().getZExtValue(); + const Dimension conDim = op.getDimension(); SmallVector sizes; concatSizesFromInputs(rewriter, sizes, loc, dstTp, op.getInputs(), conDim); @@ -523,12 +525,12 @@ // Create a view of the values buffer to match the unannotated dense // tensor. Value valuesBuffer = genToValues(rewriter, loc, dst); - Value idxBuffer = + Value dimCoords = genAlloca(rewriter, loc, dimRank, rewriter.getIndexType(), /*staticShape=*/true); annotatedDenseDst = dst; dst = reshapeValuesToLevels(rewriter, loc, encDst, sizes, valuesBuffer, - idxBuffer); + dimCoords); } } else { // TODO: Dense buffers should be allocated/deallocated via the callback @@ -546,16 +548,16 @@ // output tensor. foreachOp = rewriter.create( loc, input, initArgs, - [&](OpBuilder &builder, Location loc, ValueRange args, Value v, + [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, ValueRange reduc) { - SmallVector indices(dstTp.getLvlRank()); + SmallVector dstLcvs(dstTp.getLvlRank()); for (Dimension d = 0; d < dimRank; d++) { - Value idx = args[d]; + Value crd = dcvs[d]; if (d == conDim) // Transform coordinates for the concatenating dim. - idx = builder.create(loc, idx, offset); + crd = builder.create(loc, crd, offset); // FIXME: `toStoredDim` is deprecated - indices[toStoredDim(encDst, d)] = idx; + dstLcvs[toStoredDim(encDst, d)] = crd; } if (encDst && !allDense) { Value cond = genIsNonzero(rewriter, loc, v); @@ -563,14 +565,14 @@ loc, TypeRange(reduc.front().getType()), cond, /*else*/ true); builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); Value t = - builder.create(loc, v, reduc.front(), indices); + builder.create(loc, v, reduc.front(), dstLcvs); rewriter.create(loc, t); rewriter.setInsertionPointToStart(&ifOp.getElseRegion().front()); rewriter.create(loc, reduc.front()); rewriter.setInsertionPointAfter(ifOp); rewriter.create(loc, ifOp.getResult(0)); } else { - builder.create(loc, v, dst, indices); + builder.create(loc, v, dst, dstLcvs); builder.create(loc); } }); @@ -654,7 +656,7 @@ // To fill the COO tensor from a sparse constant in COO format: // for i in range(NNZ) // val = values[i] - // [i1,..,ik] = indices[i] + // [i1,..,ik] = coordinates[i] // t->add(val, [i1,..,ik], [p1,..,pk]) LogicalResult dense2SparseRewrite(ConvertOp op, PatternRewriter &rewriter) const { @@ -687,23 +689,23 @@ rewriter.create(loc, bufferTp, dynSizes).getResult(); auto foreachOp = rewriter.create( loc, src, buffer, - [&](OpBuilder &builder, Location loc, ValueRange indices, Value v, + [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, ValueRange reduc) { Value input = reduc.front(); const Dimension dimRank = dstTp.getDimRank(); - SmallVector indicesArray(dimRank); + const Level lvlRank = dstTp.getLvlRank(); + SmallVector lcvs(lvlRank); for (Dimension d = 0; d < dimRank; d++) // FIXME: `toStoredDim` is deprecated - indicesArray[toStoredDim(encDst, d)] = indices[d]; + lcvs[toStoredDim(encDst, d)] = dcvs[d]; if (fromSparseConst) { - input = builder.create(loc, v, input, indicesArray); + input = builder.create(loc, v, input, lcvs); } else { Value cond = genIsNonzero(builder, loc, v); auto ifOp = builder.create( loc, TypeRange(input.getType()), cond, /*else*/ true); builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); - Value insert = - builder.create(loc, v, input, indicesArray); + Value insert = builder.create(loc, v, input, lcvs); builder.create(loc, insert); builder.setInsertionPointToStart(&ifOp.getElseRegion().front()); builder.create(loc, input); @@ -728,7 +730,7 @@ // Handles sparse tensor to dense tensor conversion as follows: // dst = new dense tensor; // foreach elemment in src - // dst[elemment.indices] = element.value + // dst[element.coords] = element.value LogicalResult sparse2DenseRewrite(ConvertOp op, PatternRewriter &rewriter) const { Location loc = op->getLoc(); @@ -811,16 +813,15 @@ .getResult(); auto foreachOp = rewriter.create( loc, src, tmpCoo, - [&](OpBuilder &builder, Location loc, ValueRange args, Value v, + [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, ValueRange reduc) { - SmallVector dstIndices(dstLvlRank); + SmallVector dstLcvs(dstLvlRank); for (Dimension d = 0; d < dimRank; d++) { // FIXME: `toStoredDim` is deprecated Level l = toStoredDim(encDst, d); - dstIndices[l] = args[d]; + dstLcvs[l] = dcvs[d]; } - auto t = - builder.create(loc, v, reduc.front(), dstIndices); + auto t = builder.create(loc, v, reduc.front(), dstLcvs); builder.create(loc, t); }); src = rewriter.create(loc, foreachOp.getResult(0), true); @@ -836,18 +837,17 @@ Value y = genToValues(rewriter, loc, src); const auto encSrc = srcTp.getEncoding(); // Sort the COO tensor so that its elements are ordered via increasing - // indices for the storage ordering of the dst tensor. Use SortCoo if the - // COO tensor has the same dim ordering as the dst tensor. + // coordinates for the storage ordering of the dst tensor. Use SortCoo + // if the COO tensor has the same ordering as the dst tensor. if (dimRank > 1 && srcTp.hasSameDimToLvlMap(dstTp)) { - MemRefType indTp = - get1DMemRefType(getIndexOverheadType(rewriter, encSrc), - /*withLayout=*/false); - Value xs = rewriter.create(loc, indTp, src); + MemRefType coordsTp = + get1DMemRefType(encSrc.getCrdType(), /*withLayout=*/false); + Value xs = rewriter.create(loc, coordsTp, src); rewriter.create( loc, nnz, xs, ValueRange{y}, rewriter.getIndexAttr(dimRank), rewriter.getIndexAttr(0), SparseTensorSortKind::HybridQuickSort); } else { - // Gather the indices-arrays in the dst tensor storage order. + // Gather the coordinates-arrays in the dst tensor storage order. SmallVector xs(dstLvlRank); const Level srcLvlRank = srcTp.getLvlRank(); for (Level srcLvl = 0; srcLvl < srcLvlRank; srcLvl++) { @@ -855,7 +855,8 @@ Dimension dim = toOrigDim(encSrc, srcLvl); // FIXME: `toStoredDim` is deprecated Level dstLvl = toStoredDim(encDst, dim); - xs[dstLvl] = genToIndices(rewriter, loc, src, srcLvl, /*cooStart=*/0); + xs[dstLvl] = + genToCoordinates(rewriter, loc, src, srcLvl, /*cooStart=*/0); } rewriter.create(loc, nnz, xs, ValueRange{y}, SparseTensorSortKind::HybridQuickSort); @@ -870,17 +871,17 @@ dynDstSizes, Value(), /*sizeHint=*/nnz, Attribute()) .getResult(); - SmallVector indices(dstLvlRank); + SmallVector dstLcvs(dstLvlRank); auto foreachOp = rewriter.create( loc, src, dst, - [&](OpBuilder &builder, Location loc, ValueRange args, Value v, + [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, ValueRange reduc) { for (Dimension d = 0; d < dimRank; d++) { // FIXME: `toStoredDim` is deprecated Level l = toStoredDim(encDst, d); - indices[l] = args[d]; + dstLcvs[l] = dcvs[d]; } - auto t = builder.create(loc, v, reduc.front(), indices); + auto t = builder.create(loc, v, reduc.front(), dstLcvs); builder.create(loc, t); }); @@ -913,6 +914,7 @@ SmallVector reduc = op.getInitArgs(); const auto stt = getSparseTensorType(input); const Dimension dimRank = stt.getDimRank(); + const Level lvlRank = stt.getLvlRank(); // Special-case: for each over a sparse constant uses its own rewriting // rule. @@ -933,34 +935,32 @@ for (Dimension d = 0; d < dimRank; d++) { // TODO: provide utility function for loop sequences that only contains // one for loop? - Dimension ld = - op.getOrder() - ? op.getOrder()->getResult(d).cast().getPosition() - : d; - loopEmitter.enterNewLoopSeq(rewriter, loc, 0, static_cast(ld)); + const Level l = op.getOrder() ? op.getOrder()->getDimPosition(d) : d; + loopEmitter.enterNewLoopSeq(rewriter, loc, 0, static_cast(l)); // Note that reduc will be taken care of by loop emitter and get updated // in place. - loopEmitter.enterLoopOverTensorAtDim(rewriter, loc, 0, ld, reduc); + loopEmitter.enterLoopOverTensorAtDim(rewriter, loc, 0, l, reduc); } - SmallVector coords; - coords.reserve(dimRank); - loopEmitter.getCoordinateArray(coords); + SmallVector lcvs; + lcvs.reserve(lvlRank); + loopEmitter.getCoordinateArray(lcvs); if (op.getOrder()) { - SmallVector tmp = coords; // keep a copy + // FIXME: There is some dim/lvl confusion here since `dimRank != lvlRank` + SmallVector dcvs = lcvs; // keep a copy for (Dimension d = 0; d < dimRank; d++) { auto l = op.getOrder()->getDimPosition(d); - coords[l] = tmp[d]; + lcvs[l] = dcvs[d]; } } Value vals = loopEmitter.getValBuffer()[0]; Value pidx = loopEmitter.getPidxs()[0].back(); - // Loads the value from sparse tensor using pointer index; - // loads the value from dense tensor using coordinate array. + // Loads the value from sparse tensor using position-index; + // loads the value from dense tensor using coords. Value val = enc ? rewriter.create(loc, vals, pidx) - : rewriter.create(loc, vals, coords); + : rewriter.create(loc, vals, lcvs); // 2. Inline the block in the foreach operator. Block *srcBlock = op.getBody(); @@ -969,8 +969,8 @@ SmallVector args; for (Dimension d = 0; d < dimRank; d++) { // FIXME: `toStoredDim` is deprecated - Value actual = coords[toStoredDim(enc, d)]; - args.push_back(actual); + Value dimCrd = lcvs[toStoredDim(enc, d)]; + args.push_back(dimCrd); } // Remap value. args.push_back(val); @@ -1019,7 +1019,7 @@ // Implement the NewOp as follows: // %orderedCoo = sparse_tensor.new %filename - // %t = sparse_tensor.ConvertOp %orderedCoo + // %t = sparse_tensor.convert %orderedCoo RankedTensorType cooTp = getCOOFromTypeWithOrdering(dstTp, encDst.getDimOrdering(), true); Value cooTensor = rewriter.create(loc, cooTp, op.getSource()); @@ -1043,7 +1043,7 @@ Value src = op.getTensor(); Value nnz = rewriter.create(loc, src); - // Allocate a temporary buffer for storing dimension sizes and indices. + // Allocate a temporary buffer for storing dimension-sizes/coordinates. const auto srcTp = getSparseTensorType(src); const Dimension dimRank = srcTp.getDimRank(); Type indexTp = rewriter.getIndexType(); @@ -1068,7 +1068,7 @@ createFuncCall(rewriter, loc, "outSparseTensorWriterMetaData", {}, {writer, rankValue, nnz, dimSizes}, EmitCInterface::On); - Value indices = dimSizes; // Reuse the dimSizes buffer for indices. + Value dimCoords = dimSizes; // Reuse the dimSizes buffer for dimCoords. Type eltTp = srcTp.getElementType(); SmallString<29> outNextFuncName{"outSparseTensorWriterNext", primaryTypeFunctionSuffix(eltTp)}; @@ -1077,14 +1077,14 @@ // For each element in the source tensor, output the element. rewriter.create( loc, src, std::nullopt, - [&](OpBuilder &builder, Location loc, ValueRange args, Value v, + [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, ValueRange reduc) { for (Dimension d = 0; d < dimRank; d++) { - rewriter.create(loc, args[d], indices, + rewriter.create(loc, dcvs[d], dimCoords, constantIndex(builder, loc, d)); } rewriter.create(loc, v, value); - SmallVector operands{writer, rankValue, indices, value}; + SmallVector operands{writer, rankValue, dimCoords, value}; FlatSymbolRefAttr fn = getFunc(module, outNextFuncName, {}, operands, EmitCInterface::On); builder.create(loc, TypeRange(), fn, operands); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.h @@ -38,38 +38,38 @@ // ; if dense: // // ; if compresed: -// memref pointers-l ; pointers for sparse level l -// memref indices-l ; indices for sparse level l +// memref positions-l ; positions for sparse level l +// memref coordinates-l ; coordinates for sparse level l // ; if singleton: -// memref indices-l ; indices for singleton level l +// memref coordinates-l ; coordinates for singleton level l // // memref values ; values // // struct sparse_tensor.storage_specifier { -// array dimSizes ; sizes for each dimension -// array memSizes; ; sizes for each data memref +// array lvlSizes ; sizes/cardinalities for each level +// array memSizes; ; sizes/lengths for each data memref // } // }; // // In addition, for a "trailing COO region", defined as a compressed level -// followed by one ore more singleton levels, the default SOA storage that +// followed by one or more singleton levels, the default SOA storage that // is inherent to the TACO format is optimized into an AOS storage where -// all indices of a stored element appear consecutively. In such cases, -// a special operation (sparse_tensor.indices_buffer) must be used to -// access the AOS index array. In the code below, the method `getCOOStart` +// all coordinates of a stored element appear consecutively. In such cases, +// a special operation (sparse_tensor.coordinates_buffer) must be used to +// access the AOS coordinates array. In the code below, the method `getCOOStart` // is used to find the start of the "trailing COO region". // // Examples. // // #CSR storage of 2-dim matrix yields -// memref ; pointers-1 -// memref ; indices-1 +// memref ; positions-1 +// memref ; coordinates-1 // memref ; values // struct<(array<2 x i64>, array<3 x i64>)>) ; lvl0, lvl1, 3xsizes // // #COO storage of 2-dim matrix yields -// memref, ; pointers-0, essentially [0,sz] -// memref ; AOS index storage +// memref, ; positions-0, essentially [0,sz] +// memref ; AOS coordinates storage // memref ; values // struct<(array<2 x i64>, array<3 x i64>)>) ; lvl0, lvl1, 3xsizes // @@ -77,15 +77,15 @@ enum class SparseTensorFieldKind : uint32_t { StorageSpec = 0, - PtrMemRef = 1, - IdxMemRef = 2, + PosMemRef = 1, + CrdMemRef = 2, ValMemRef = 3 }; -static_assert(static_cast(SparseTensorFieldKind::PtrMemRef) == - static_cast(StorageSpecifierKind::PtrMemSize)); -static_assert(static_cast(SparseTensorFieldKind::IdxMemRef) == - static_cast(StorageSpecifierKind::IdxMemSize)); +static_assert(static_cast(SparseTensorFieldKind::PosMemRef) == + static_cast(StorageSpecifierKind::PosMemSize)); +static_assert(static_cast(SparseTensorFieldKind::CrdMemRef) == + static_cast(StorageSpecifierKind::CrdMemSize)); static_assert(static_cast(SparseTensorFieldKind::ValMemRef) == static_cast(StorageSpecifierKind::ValMemSize)); @@ -98,13 +98,13 @@ // `FieldIndex` for their return type, via the same reasoning for why // `Dimension`/`Level` are used both for identifiers and ranks. -/// For each field that will be allocated for the given sparse tensor encoding, -/// calls the callback with the corresponding field index, field kind, dimension -/// (for sparse tensor level memrefs) and dimlevelType. -/// The field index always starts with zero and increments by one between two -/// callback invocations. -/// Ideally, all other methods should rely on this function to query a sparse -/// tensor fields instead of relying on ad-hoc index computation. +/// For each field that will be allocated for the given sparse tensor +/// encoding, calls the callback with the corresponding field index, +/// field kind, level, and level-type (the last two are only for level +/// memrefs). The field index always starts with zero and increments +/// by one between each callback invocation. Ideally, all other methods +/// should rely on this function to query a sparse tensor fields instead +/// of relying on ad-hoc index computation. void foreachFieldInSparseTensor( SparseTensorEncodingAttr, llvm::function_ref(kind); } @@ -177,7 +177,7 @@ std::optional lvl) const { FieldIndex fieldIdx = -1u; unsigned stride = 1; - if (kind == SparseTensorFieldKind::IdxMemRef) { + if (kind == SparseTensorFieldKind::CrdMemRef) { assert(lvl.has_value()); const Level cooStart = getCOOStart(enc); const Level lvlRank = enc.getLvlRank(); @@ -206,40 +206,22 @@ SparseTensorEncodingAttr enc; }; -// FIXME: Functions/methods marked with [CLARIFY_DIM_LVL] require -// clarification on whether their "dim" argument should actually -// be `Level` or `Dimension`. In particular, it's unclear whether -// `StorageSpecifierKind::DimSize` actually means to refer to dimension-sizes -// vs level-sizes. If it's the latter (which seems unlikely), then all the -// noted functions should use the `Level` type alias. If it's the former, -// then the functions which specifically use `DimSize` should be changed -// to use the `Dimension` type alias; however, the functions which take -// an unknown `StorageSpecifierKind` must be adjusted to ensure that they -// correctly interpret the "dim" argument since the interpretation depends -// on the `StorageSpecifierKind` value. Since wrengr couldn't figure this -// out from context, Peiming or Bixia should review these functions and -// update them as appropriate. - class SparseTensorSpecifier { public: explicit SparseTensorSpecifier(Value specifier) : specifier(cast>(specifier)) {} - // Undef value for dimension sizes, all zero value for memory sizes. + // Undef value for level-sizes, all zero values for memory-sizes. static Value getInitValue(OpBuilder &builder, Location loc, SparseTensorType stt); /*implicit*/ operator Value() { return specifier; } - // FIXME: see note [CLARIFY_DIM_LVL]. Value getSpecifierField(OpBuilder &builder, Location loc, - StorageSpecifierKind kind, - std::optional dim); + StorageSpecifierKind kind, std::optional lvl); - // FIXME: see note [CLARIFY_DIM_LVL]. void setSpecifierField(OpBuilder &builder, Location loc, Value v, - StorageSpecifierKind kind, - std::optional dim); + StorageSpecifierKind kind, std::optional lvl); private: TypedValue specifier; @@ -280,21 +262,19 @@ Value getSpecifier() const { return fields.back(); } - // FIXME: see note [CLARIFY_DIM_LVL]. Value getSpecifierField(OpBuilder &builder, Location loc, StorageSpecifierKind kind, - std::optional dim) const { + std::optional lvl) const { SparseTensorSpecifier md(fields.back()); - return md.getSpecifierField(builder, loc, kind, dim); + return md.getSpecifierField(builder, loc, kind, lvl); } - // FIXME: see note [CLARIFY_DIM_LVL]. - Value getDimSize(OpBuilder &builder, Location loc, unsigned dim) const { - return getSpecifierField(builder, loc, StorageSpecifierKind::DimSize, dim); + Value getLvlSize(OpBuilder &builder, Location loc, Level lvl) const { + return getSpecifierField(builder, loc, StorageSpecifierKind::LvlSize, lvl); } - Value getPtrMemRef(Level lvl) const { - return getMemRefField(SparseTensorFieldKind::PtrMemRef, lvl); + Value getPosMemRef(Level lvl) const { + return getMemRefField(SparseTensorFieldKind::PosMemRef, lvl); } Value getValMemRef() const { @@ -311,13 +291,13 @@ return getField(fidx); } - Value getPtrMemSize(OpBuilder &builder, Location loc, Level lvl) const { - return getSpecifierField(builder, loc, StorageSpecifierKind::PtrMemSize, + Value getPosMemSize(OpBuilder &builder, Location loc, Level lvl) const { + return getSpecifierField(builder, loc, StorageSpecifierKind::PosMemSize, lvl); } - Value getIdxMemSize(OpBuilder &builder, Location loc, Level lvl) const { - return getSpecifierField(builder, loc, StorageSpecifierKind::IdxMemSize, + Value getCrdMemSize(OpBuilder &builder, Location loc, Level lvl) const { + return getSpecifierField(builder, loc, StorageSpecifierKind::CrdMemSize, lvl); } @@ -341,15 +321,15 @@ return fields.drop_back(); } - std::pair getIdxMemRefIndexAndStride(Level lvl) const { + std::pair getCrdMemRefIndexAndStride(Level lvl) const { StorageLayout layout(rType.getEncoding()); - return layout.getFieldIndexAndStride(SparseTensorFieldKind::IdxMemRef, lvl); + return layout.getFieldIndexAndStride(SparseTensorFieldKind::CrdMemRef, lvl); } Value getAOSMemRef() const { const Level cooStart = getCOOStart(rType.getEncoding()); assert(cooStart < rType.getLvlRank()); - return getMemRefField(SparseTensorFieldKind::IdxMemRef, cooStart); + return getMemRefField(SparseTensorFieldKind::CrdMemRef, cooStart); } RankedTensorType getRankedTensorType() const { return rType; } @@ -366,7 +346,7 @@ SparseTensorDescriptor(SparseTensorType stt, ValueRange buffers) : SparseTensorDescriptorImpl(stt, buffers) {} - Value getIdxMemRefOrView(OpBuilder &builder, Location loc, Level lvl) const; + Value getCrdMemRefOrView(OpBuilder &builder, Location loc, Level lvl) const; }; /// Uses SmallVectorImpl & for mutable descriptors. @@ -409,12 +389,11 @@ fields[fidx] = v; } - // FIXME: see note [CLARIFY_DIM_LVL]. void setSpecifierField(OpBuilder &builder, Location loc, - StorageSpecifierKind kind, std::optional dim, + StorageSpecifierKind kind, std::optional lvl, Value v) { SparseTensorSpecifier md(fields.back()); - md.setSpecifierField(builder, loc, v, kind, dim); + md.setSpecifierField(builder, loc, v, kind, lvl); fields.back() = md; } @@ -423,17 +402,16 @@ std::nullopt, v); } - void setIdxMemSize(OpBuilder &builder, Location loc, Level lvl, Value v) { - setSpecifierField(builder, loc, StorageSpecifierKind::IdxMemSize, lvl, v); + void setCrdMemSize(OpBuilder &builder, Location loc, Level lvl, Value v) { + setSpecifierField(builder, loc, StorageSpecifierKind::CrdMemSize, lvl, v); } - void setPtrMemSize(OpBuilder &builder, Location loc, Level lvl, Value v) { - setSpecifierField(builder, loc, StorageSpecifierKind::PtrMemSize, lvl, v); + void setPosMemSize(OpBuilder &builder, Location loc, Level lvl, Value v) { + setSpecifierField(builder, loc, StorageSpecifierKind::PosMemSize, lvl, v); } - // FIXME: see note [CLARIFY_DIM_LVL]. - void setDimSize(OpBuilder &builder, Location loc, unsigned dim, Value v) { - setSpecifierField(builder, loc, StorageSpecifierKind::DimSize, dim, v); + void setLvlSize(OpBuilder &builder, Location loc, Level lvl, Value v) { + setSpecifierField(builder, loc, StorageSpecifierKind::LvlSize, lvl, v); } }; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorStorageLayout.cpp @@ -22,11 +22,11 @@ // Private helper methods. //===----------------------------------------------------------------------===// -static IntegerAttr fromOptionalInt(MLIRContext *ctx, - std::optional dim) { - if (!dim) - return nullptr; - return IntegerAttr::get(IndexType::get(ctx), dim.value()); +/// Constructs a nullable `LevelAttr` from the `std::optional`. +static IntegerAttr optionalLevelAttr(MLIRContext *ctx, + std::optional lvl) { + return lvl ? IntegerAttr::get(IndexType::get(ctx), lvl.value()) + : IntegerAttr(); } // This is only ever called from `SparseTensorTypeToBufferConverter`, @@ -82,36 +82,37 @@ Value SparseTensorSpecifier::getSpecifierField(OpBuilder &builder, Location loc, StorageSpecifierKind kind, - std::optional dim) { + std::optional lvl) { return builder.create( - loc, specifier, kind, fromOptionalInt(specifier.getContext(), dim)); + loc, specifier, kind, optionalLevelAttr(specifier.getContext(), lvl)); } void SparseTensorSpecifier::setSpecifierField(OpBuilder &builder, Location loc, Value v, StorageSpecifierKind kind, - std::optional dim) { + std::optional lvl) { + // TODO: make `v` have type `TypedValue` instead. assert(v.getType().isIndex()); specifier = builder.create( - loc, specifier, kind, fromOptionalInt(specifier.getContext(), dim), v); + loc, specifier, kind, optionalLevelAttr(specifier.getContext(), lvl), v); } //===----------------------------------------------------------------------===// // SparseTensorDescriptor methods. //===----------------------------------------------------------------------===// -Value sparse_tensor::SparseTensorDescriptor::getIdxMemRefOrView( - OpBuilder &builder, Location loc, Level idxLvl) const { +Value sparse_tensor::SparseTensorDescriptor::getCrdMemRefOrView( + OpBuilder &builder, Location loc, Level lvl) const { const Level cooStart = getCOOStart(rType.getEncoding()); - if (idxLvl < cooStart) - return getMemRefField(SparseTensorFieldKind::IdxMemRef, idxLvl); + if (lvl < cooStart) + return getMemRefField(SparseTensorFieldKind::CrdMemRef, lvl); Value stride = constantIndex(builder, loc, rType.getLvlRank() - cooStart); - Value size = getIdxMemSize(builder, loc, cooStart); + Value size = getCrdMemSize(builder, loc, cooStart); size = builder.create(loc, size, stride); return builder.create( - loc, getMemRefField(SparseTensorFieldKind::IdxMemRef, cooStart), - /*offset=*/ValueRange{constantIndex(builder, loc, idxLvl - cooStart)}, + loc, getMemRefField(SparseTensorFieldKind::CrdMemRef, cooStart), + /*offset=*/ValueRange{constantIndex(builder, loc, lvl - cooStart)}, /*size=*/ValueRange{size}, /*step=*/ValueRange{stride}); } @@ -129,8 +130,8 @@ callback) { assert(enc); -#define RETURN_ON_FALSE(idx, kind, dim, dlt) \ - if (!(callback(idx, kind, dim, dlt))) \ +#define RETURN_ON_FALSE(fidx, kind, dim, dlt) \ + if (!(callback(fidx, kind, dim, dlt))) \ return; const auto lvlTypes = enc.getDimLevelType(); @@ -145,10 +146,10 @@ // order. const auto dlt = lvlTypes[l]; if (isCompressedDLT(dlt)) { - RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::PtrMemRef, l, dlt); - RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::IdxMemRef, l, dlt); + RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::PosMemRef, l, dlt); + RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::CrdMemRef, l, dlt); } else if (isSingletonDLT(dlt)) { - RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::IdxMemRef, l, dlt); + RETURN_ON_FALSE(fieldIdx++, SparseTensorFieldKind::CrdMemRef, l, dlt); } else { assert(isDenseDLT(dlt)); // no fields } @@ -171,30 +172,30 @@ callback) { assert(stt.hasEncoding()); // Construct the basic types. - Type idxType = stt.getIndexType(); - Type ptrType = stt.getPointerType(); - Type eltType = stt.getElementType(); - - Type metaDataType = StorageSpecifierType::get(stt.getEncoding()); - // memref pointers - Type ptrMemType = MemRefType::get({ShapedType::kDynamic}, ptrType); - // memref indices - Type idxMemType = MemRefType::get({ShapedType::kDynamic}, idxType); + const Type crdType = stt.getCrdType(); + const Type posType = stt.getPosType(); + const Type eltType = stt.getElementType(); + + const Type metaDataType = StorageSpecifierType::get(stt.getEncoding()); + // memref positions + const Type posMemType = MemRefType::get({ShapedType::kDynamic}, posType); + // memref coordinates + const Type crdMemType = MemRefType::get({ShapedType::kDynamic}, crdType); // memref values - Type valMemType = MemRefType::get({ShapedType::kDynamic}, eltType); + const Type valMemType = MemRefType::get({ShapedType::kDynamic}, eltType); foreachFieldInSparseTensor( stt.getEncoding(), - [metaDataType, ptrMemType, idxMemType, valMemType, + [metaDataType, posMemType, crdMemType, valMemType, callback](FieldIndex fieldIdx, SparseTensorFieldKind fieldKind, Level lvl, DimLevelType dlt) -> bool { switch (fieldKind) { case SparseTensorFieldKind::StorageSpec: return callback(metaDataType, fieldIdx, fieldKind, lvl, dlt); - case SparseTensorFieldKind::PtrMemRef: - return callback(ptrMemType, fieldIdx, fieldKind, lvl, dlt); - case SparseTensorFieldKind::IdxMemRef: - return callback(idxMemType, fieldIdx, fieldKind, lvl, dlt); + case SparseTensorFieldKind::PosMemRef: + return callback(posMemType, fieldIdx, fieldKind, lvl, dlt); + case SparseTensorFieldKind::CrdMemRef: + return callback(crdMemType, fieldIdx, fieldKind, lvl, dlt); case SparseTensorFieldKind::ValMemRef: return callback(valMemType, fieldIdx, fieldKind, lvl, dlt); }; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp @@ -44,13 +44,6 @@ bool enableSIMDIndex32; }; -/// Helper to test for given index value. -static bool isIntValue(Value val, int64_t idx) { - if (auto ival = getConstantIntValue(val)) - return *ival == idx; - return false; -} - /// Helper test for invariant value (defined outside given block). static bool isInvariantValue(Value val, Block *block) { return val.getDefiningOp() && val.getDefiningOp()->getBlock() != block; @@ -67,9 +60,9 @@ return VectorType::get(vl.vectorLength, etp, numScalableDims); } -/// Constructs vector type from pointer. -static VectorType vectorType(VL vl, Value ptr) { - return vectorType(vl, getMemRefType(ptr).getElementType()); +/// Constructs vector type from a memref value. +static VectorType vectorType(VL vl, Value mem) { + return vectorType(vl, getMemRefType(mem).getElementType()); } /// Constructs vector iteration mask. @@ -116,17 +109,17 @@ /// that the sparse compiler can only generate indirect loads in /// the last index, i.e. back(). static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl, - Value ptr, ArrayRef idxs, Value vmask) { - VectorType vtp = vectorType(vl, ptr); + Value mem, ArrayRef idxs, Value vmask) { + VectorType vtp = vectorType(vl, mem); Value pass = constantZero(rewriter, loc, vtp); if (idxs.back().getType().isa()) { SmallVector scalarArgs(idxs.begin(), idxs.end()); Value indexVec = idxs.back(); scalarArgs.back() = constantIndex(rewriter, loc, 0); - return rewriter.create(loc, vtp, ptr, scalarArgs, + return rewriter.create(loc, vtp, mem, scalarArgs, indexVec, vmask, pass); } - return rewriter.create(loc, vtp, ptr, idxs, vmask, + return rewriter.create(loc, vtp, mem, idxs, vmask, pass); } @@ -134,17 +127,17 @@ /// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note /// that the sparse compiler can only generate indirect stores in /// the last index, i.e. back(). -static void genVectorStore(PatternRewriter &rewriter, Location loc, Value ptr, +static void genVectorStore(PatternRewriter &rewriter, Location loc, Value mem, ArrayRef idxs, Value vmask, Value rhs) { if (idxs.back().getType().isa()) { SmallVector scalarArgs(idxs.begin(), idxs.end()); Value indexVec = idxs.back(); scalarArgs.back() = constantIndex(rewriter, loc, 0); - rewriter.create(loc, ptr, scalarArgs, indexVec, vmask, + rewriter.create(loc, mem, scalarArgs, indexVec, vmask, rhs); return; } - rewriter.create(loc, ptr, idxs, vmask, rhs); + rewriter.create(loc, mem, idxs, vmask, rhs); } /// Detects a vectorizable reduction operations and returns the @@ -233,9 +226,9 @@ /// See https://llvm.org/docs/GetElementPtr.html for some background on /// the complications described below. /// -/// We need to generate a pointer/index load from the sparse storage scheme. -/// Narrower data types need to be zero extended before casting the value -/// into the index type used for looping and indexing. +/// We need to generate a position/coordinate load from the sparse storage +/// scheme. Narrower data types need to be zero extended before casting +/// the value into the `index` type used for looping and indexing. /// /// For the scalar case, subscripts simply zero extend narrower indices /// into 64-bit values before casting to an index type without a performance @@ -416,8 +409,8 @@ } // Proper load operations. These are either values involved in the // actual computation, such as a[i] = b[i] becomes a[lo:hi] = b[lo:hi], - // or index values inside the computation that are now fetched from - // the sparse storage index arrays, such as a[i] = i becomes + // or coordinate values inside the computation that are now fetched from + // the sparse storage coordinates arrays, such as a[i] = i becomes // a[lo:hi] = ind[lo:hi], where 'lo' denotes the current index // and 'hi = lo + vl - 1'. if (auto load = dyn_cast(def)) { @@ -619,7 +612,7 @@ // Check for single block, unit-stride for-loop that is generated by // sparse compiler, which means no data dependence analysis is required, // and its loop-body is very restricted in form. - if (!op.getRegion().hasOneBlock() || !isIntValue(op.getStep(), 1) || + if (!op.getRegion().hasOneBlock() || !isConstantIntValue(op.getStep(), 1) || !op->hasAttr(LoopEmitter::getLoopEmitterLoopAttrName())) return failure(); // Analyze (!codegen) and rewrite (codegen) loop-body. diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -450,12 +450,12 @@ } } -/// Computes a topologically sorted iteration graph for the linalg -/// operation. Ensures all tensors are visited in natural index order. This -/// is essential for sparse storage formats since these only support access -/// along fixed dimensions. Even for dense storage formats, however, the -/// natural index order yields innermost unit-stride access with better -/// spatial locality. +/// Computes a topologically sorted iteration graph for the linalg operation. +/// Ensures all tensors are visited in natural coordinate order. This is +/// essential for sparse storage formats since these only support access +/// along fixed levels. Even for dense storage formats, however, the natural +/// coordinate order yields innermost unit-stride access with better spatial +/// locality. static bool computeIterationGraph(CodegenEnv &env, unsigned mask, OpOperand *skip = nullptr) { // Set up an n x n from/to adjacency matrix of the iteration graph @@ -605,6 +605,9 @@ } /// Generates index for load/store on sparse tensor. +// FIXME: It's not entirely clear what "index" means here (i.e., is it +// a "coordinate", or "Ldx", or what). So the function should be renamed +// and/or the documentation expanded in order to clarify. static Value genIndex(CodegenEnv &env, OpOperand *t) { auto map = env.op().getMatchingIndexingMap(t); const auto stt = getSparseTensorType(t->get()); @@ -644,7 +647,7 @@ OpOperand *t) { linalg::GenericOp op = env.op(); Location loc = op.getLoc(); - // Direct lexicographic index order, tensor loads as zero. + // Direct lexicographic coordinate order, tensor loads as zero. if (!env.isExpand()) { Type tp = getElementTypeOrSelf(t->get().getType()); return constantZero(builder, loc, tp); @@ -660,7 +663,7 @@ linalg::GenericOp op = env.op(); Location loc = op.getLoc(); Value identity = env.getCustomRedId(); - // Direct lexicographic index order, tensor loads as identity. + // Direct lexicographic coordinate order, tensor loads as identity. if (!env.isExpand()) return identity; // Load from expanded access pattern if filled, identity otherwise. @@ -677,9 +680,12 @@ Value rhs) { linalg::GenericOp op = env.op(); Location loc = op.getLoc(); - // Direct insertion in lexicographic index order. + // Direct insertion in lexicographic coordinate order. if (!env.isExpand()) { unsigned rank = op.getRank(t); + // FIXME: It's not entirely clear what "indices" means here (i.e., + // are they "coordinates"? and if so, then are they level-coords or + // dim-coords?) SmallVector indices; for (unsigned i = 0; i < rank; i++) { assert(env.emitter().getLoopIV(i)); @@ -822,11 +828,6 @@ return env.exp(exp).val; } -/// Generates an index value. -inline static Value genIndexValue(CodegenEnv &env, unsigned idx) { - return env.getLoopIdxValue(idx); -} - /// Semi-ring branches are simply inlined by the sparse compiler. Prior /// analysis has verified that all computations are "local" to the inlined /// branch or otherwise invariantly defined outside the loop nest, with the @@ -836,7 +837,7 @@ Value e, unsigned ldx) { if (Operation *def = e.getDefiningOp()) { if (auto indexOp = dyn_cast(def)) - return genIndexValue(env, indexOp.getDim()); + return env.getLoopIdxValue(indexOp.getDim()); if (def->getBlock() == block) { for (unsigned i = 0, n = def->getNumOperands(); i < n; i++) { rewriter.updateRootInPlace(def, [&]() { @@ -862,7 +863,7 @@ if (env.exp(exp).kind == Kind::kInvariant) return genInvariantValue(env, exp); if (env.exp(exp).kind == Kind::kIndex) - return genIndexValue(env, env.exp(exp).index); + return env.getLoopIdxValue(env.exp(exp).index); if (env.exp(exp).kind == Kind::kReduce) env.startCustomReduc(exp); // enter custom @@ -1613,8 +1614,8 @@ auto dstEnc = SparseTensorEncodingAttr::get( getContext(), srcEnc.getDimLevelType(), permute(env, env.op().getMatchingIndexingMap(t)), // new order - srcEnc.getHigherOrdering(), srcEnc.getPointerBitWidth(), - srcEnc.getIndexBitWidth()); + srcEnc.getHigherOrdering(), srcEnc.getPosWidth(), + srcEnc.getCrdWidth()); auto dstTp = RankedTensorType::get(srcTp.getShape(), srcTp.getElementType(), dstEnc); auto convert = rewriter.create(tval.getLoc(), dstTp, tval); diff --git a/mlir/lib/ExecutionEngine/SparseTensor/NNZ.cpp b/mlir/lib/ExecutionEngine/SparseTensor/NNZ.cpp --- a/mlir/lib/ExecutionEngine/SparseTensor/NNZ.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensor/NNZ.cpp @@ -53,26 +53,26 @@ } } -void SparseTensorNNZ::forallIndices(uint64_t stopLvl, - SparseTensorNNZ::NNZConsumer yield) const { +void SparseTensorNNZ::forallCoords(uint64_t stopLvl, + SparseTensorNNZ::NNZConsumer yield) const { assert(stopLvl < getLvlRank() && "Level out of bounds"); assert(isCompressedDLT(lvlTypes[stopLvl]) && "Cannot look up non-compressed levels"); - forallIndices(yield, stopLvl, 0, 0); + forallCoords(yield, stopLvl, 0, 0); } -void SparseTensorNNZ::add(const std::vector &lvlInd) { +void SparseTensorNNZ::add(const std::vector &lvlCoords) { uint64_t parentPos = 0; for (uint64_t l = 0, lvlrank = getLvlRank(); l < lvlrank; ++l) { if (isCompressedDLT(lvlTypes[l])) nnz[l][parentPos]++; - parentPos = parentPos * lvlSizes[l] + lvlInd[l]; + parentPos = parentPos * lvlSizes[l] + lvlCoords[l]; } } -void SparseTensorNNZ::forallIndices(SparseTensorNNZ::NNZConsumer yield, - uint64_t stopLvl, uint64_t parentPos, - uint64_t l) const { +void SparseTensorNNZ::forallCoords(SparseTensorNNZ::NNZConsumer yield, + uint64_t stopLvl, uint64_t parentPos, + uint64_t l) const { assert(l <= stopLvl); if (l == stopLvl) { assert(parentPos < nnz[l].size() && "Cursor is out of range"); @@ -81,6 +81,6 @@ const uint64_t sz = lvlSizes[l]; const uint64_t pstart = parentPos * sz; for (uint64_t i = 0; i < sz; ++i) - forallIndices(yield, stopLvl, pstart + i, l + 1); + forallCoords(yield, stopLvl, pstart + i, l + 1); } } diff --git a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp --- a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp @@ -74,19 +74,19 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_NEWENUMERATOR) #undef IMPL_NEWENUMERATOR -#define IMPL_GETPOINTERS(PNAME, P) \ - void SparseTensorStorageBase::getPointers(std::vector

**, uint64_t) { \ - FATAL_PIV("getPointers" #PNAME); \ +#define IMPL_GETPOSITIONS(PNAME, P) \ + void SparseTensorStorageBase::getPositions(std::vector

**, uint64_t) { \ + FATAL_PIV("getPositions" #PNAME); \ } -MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETPOINTERS) -#undef IMPL_GETPOINTERS +MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETPOSITIONS) +#undef IMPL_GETPOSITIONS -#define IMPL_GETINDICES(INAME, I) \ - void SparseTensorStorageBase::getIndices(std::vector **, uint64_t) { \ - FATAL_PIV("getIndices" #INAME); \ +#define IMPL_GETCOORDINATES(CNAME, C) \ + void SparseTensorStorageBase::getCoordinates(std::vector **, uint64_t) { \ + FATAL_PIV("getCoordinates" #CNAME); \ } -MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETINDICES) -#undef IMPL_GETINDICES +MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETCOORDINATES) +#undef IMPL_GETCOORDINATES #define IMPL_GETVALUES(VNAME, V) \ void SparseTensorStorageBase::getValues(std::vector **) { \ diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp @@ -17,7 +17,7 @@ // The following memory-resident sparse storage schemes are supported: // // (a) A coordinate scheme for temporarily storing and lexicographically -// sorting a sparse tensor by index (SparseTensorCOO). +// sorting a sparse tensor by coordinate (SparseTensorCOO). // // (b) A "one-size-fits-all" sparse tensor storage scheme defined by // per-dimension sparse/dense annnotations together with a dimension @@ -126,11 +126,11 @@ /// `dim2lvl` argument must be a permutation. /// Used by `IMPL_CONVERTTOMLIRSPARSETENSOR`. // -// TODO: generalize beyond 64-bit indices. +// TODO: generalize beyond 64-bit overhead types. template static SparseTensorStorage * toMLIRSparseTensor(uint64_t rank, uint64_t nse, const uint64_t *dimSizes, - const V *values, const uint64_t *dimIndices, + const V *values, const uint64_t *dimCoordinates, const uint64_t *dim2lvl, const DimLevelType *lvlTypes) { #ifndef NDEBUG // Verify that the sparsity values are supported. @@ -143,22 +143,22 @@ #endif // Verify that `dim2lvl` is a permutation of `[0..(rank-1)]`. // NOTE: The construction of `lvlSizes` and `lvl2dim` don't generalize - // to arbitrary `dim2lvl` mappings. Whereas constructing `lvlInd` from - // `dimInd` does (though the details would have to be updated, just + // to arbitrary `dim2lvl` mappings. Whereas constructing `lvlCoords` from + // `dimCoords` does (though the details would have to be updated, just // like for `IMPL_ADDELT`). - detail::PermutationRef d2l(rank, dim2lvl); + const detail::PermutationRef d2l(rank, dim2lvl); // Convert external format to internal COO. - auto lvlSizes = d2l.pushforward(rank, dimSizes); + const auto lvlSizes = d2l.pushforward(rank, dimSizes); auto *lvlCOO = new SparseTensorCOO(lvlSizes, nse); - std::vector lvlInd(rank); - const uint64_t *dimInd = dimIndices; + std::vector lvlCoords(rank); + const uint64_t *dimCoords = dimCoordinates; for (uint64_t i = 0; i < nse; ++i) { - d2l.pushforward(rank, dimInd, lvlInd.data()); - lvlCOO->add(lvlInd, values[i]); - dimInd += rank; + d2l.pushforward(rank, dimCoords, lvlCoords.data()); + lvlCOO->add(lvlCoords, values[i]); + dimCoords += rank; } // Return sparse tensor storage format as opaque pointer. - auto lvl2dim = d2l.inverse(); + const auto lvl2dim = d2l.inverse(); auto *tensor = SparseTensorStorage::newFromCOO( rank, dimSizes, rank, lvlTypes, lvl2dim.data(), *lvlCOO); delete lvlCOO; @@ -172,15 +172,15 @@ // SparseTensorCOO, then to the output. We may want to reduce the number // of copies. // -// TODO: generalize beyond 64-bit indices, no dim ordering, all dimensions -// compressed +// TODO: generalize beyond 64-bit overhead types, no dim ordering, +// all dimensions compressed template static void fromMLIRSparseTensor(const SparseTensorStorage *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, - V **pValues, uint64_t **pIndices) { + V **pValues, uint64_t **pCoordinates) { assert(tensor && "Received nullptr for tensor"); - uint64_t dimRank = tensor->getDimRank(); + const uint64_t dimRank = tensor->getDimRank(); const auto &dimSizes = tensor->getDimSizes(); std::vector identityPerm(dimRank); std::iota(identityPerm.begin(), identityPerm.end(), 0); @@ -188,30 +188,31 @@ tensor->toCOO(dimRank, dimSizes.data(), dimRank, identityPerm.data()); const std::vector> &elements = coo->getElements(); - uint64_t nse = elements.size(); + const uint64_t nse = elements.size(); const auto &cooSizes = coo->getDimSizes(); assert(cooSizes.size() == dimRank && "Rank mismatch"); - uint64_t *shape = new uint64_t[dimRank]; - std::memcpy((void *)shape, (const void *)cooSizes.data(), + uint64_t *dimShape = new uint64_t[dimRank]; + std::memcpy(static_cast(dimShape), + static_cast(cooSizes.data()), sizeof(uint64_t) * dimRank); V *values = new V[nse]; - uint64_t *indices = new uint64_t[dimRank * nse]; + uint64_t *coordinates = new uint64_t[dimRank * nse]; for (uint64_t i = 0, base = 0; i < nse; ++i) { values[i] = elements[i].value; for (uint64_t d = 0; d < dimRank; ++d) - indices[base + d] = elements[i].indices[d]; + coordinates[base + d] = elements[i].coords[d]; base += dimRank; } delete coo; *pRank = dimRank; *pNse = nse; - *pShape = shape; + *pShape = dimShape; *pValues = values; - *pIndices = indices; + *pCoordinates = coordinates; } //===----------------------------------------------------------------------===// @@ -280,22 +281,22 @@ // //===----------------------------------------------------------------------===// -#define CASE(p, i, v, P, I, V) \ - if (ptrTp == (p) && indTp == (i) && valTp == (v)) { \ +#define CASE(p, c, v, P, C, V) \ + if (posTp == (p) && crdTp == (c) && valTp == (v)) { \ switch (action) { \ case Action::kEmpty: \ - return SparseTensorStorage::newEmpty( \ + return SparseTensorStorage::newEmpty( \ dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, lvl2dim); \ case Action::kFromCOO: { \ assert(ptr && "Received nullptr for SparseTensorCOO object"); \ auto &coo = *static_cast *>(ptr); \ - return SparseTensorStorage::newFromCOO( \ + return SparseTensorStorage::newFromCOO( \ dimRank, dimSizes, lvlRank, lvlTypes, lvl2dim, coo); \ } \ case Action::kSparseToSparse: { \ assert(ptr && "Received nullptr for SparseTensorStorage object"); \ auto &tensor = *static_cast(ptr); \ - return SparseTensorStorage::newFromSparseTensor( \ + return SparseTensorStorage::newFromSparseTensor( \ dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, lvl2dim, dimRank, \ dim2lvl, tensor); \ } \ @@ -303,12 +304,12 @@ return new SparseTensorCOO(lvlRank, lvlSizes); \ case Action::kToCOO: { \ assert(ptr && "Received nullptr for SparseTensorStorage object"); \ - auto &tensor = *static_cast *>(ptr); \ + auto &tensor = *static_cast *>(ptr); \ return tensor.toCOO(lvlRank, lvlSizes, dimRank, dim2lvl); \ } \ case Action::kToIterator: { \ assert(ptr && "Received nullptr for SparseTensorStorage object"); \ - auto &tensor = *static_cast *>(ptr); \ + auto &tensor = *static_cast *>(ptr); \ auto *coo = tensor.toCOO(lvlRank, lvlSizes, dimRank, dim2lvl); \ return new SparseTensorIterator(coo); \ } \ @@ -335,8 +336,8 @@ StridedMemRefType *lvlSizesRef, StridedMemRefType *lvlTypesRef, StridedMemRefType *lvl2dimRef, - StridedMemRefType *dim2lvlRef, OverheadType ptrTp, - OverheadType indTp, PrimaryType valTp, Action action, void *ptr) { + StridedMemRefType *dim2lvlRef, OverheadType posTp, + OverheadType crdTp, PrimaryType valTp, Action action, void *ptr) { ASSERT_NO_STRIDE(dimSizesRef); ASSERT_NO_STRIDE(lvlSizesRef); ASSERT_NO_STRIDE(lvlTypesRef); @@ -355,10 +356,10 @@ // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases. // This is safe because of the static_assert above. - if (ptrTp == OverheadType::kIndex) - ptrTp = OverheadType::kU64; - if (indTp == OverheadType::kIndex) - indTp = OverheadType::kU64; + if (posTp == OverheadType::kIndex) + posTp = OverheadType::kU64; + if (crdTp == OverheadType::kIndex) + crdTp = OverheadType::kU64; // Double matrices with all combinations of overhead storage. CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t, @@ -463,8 +464,8 @@ // Unsupported case (add above if needed). // TODO: better pretty-printing of enum values! MLIR_SPARSETENSOR_FATAL( - "unsupported combination of types: \n", - static_cast(ptrTp), static_cast(indTp), + "unsupported combination of types: \n", + static_cast(posTp), static_cast(crdTp), static_cast(valTp)); } #undef CASE @@ -484,64 +485,67 @@ #define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \ void _mlir_ciface_##NAME(StridedMemRefType *ref, void *tensor, \ - index_type d) { \ + index_type lvl) { \ assert(ref &&tensor); \ std::vector *v; \ - static_cast(tensor)->LIB(&v, d); \ + static_cast(tensor)->LIB(&v, lvl); \ assert(v); \ aliasIntoMemref(v->size(), v->data(), *ref); \ } -#define IMPL_SPARSEPOINTERS(PNAME, P) \ - IMPL_GETOVERHEAD(sparsePointers##PNAME, P, getPointers) -MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEPOINTERS) -#undef IMPL_SPARSEPOINTERS - -#define IMPL_SPARSEINDICES(INAME, I) \ - IMPL_GETOVERHEAD(sparseIndices##INAME, I, getIndices) -MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEINDICES) -#undef IMPL_SPARSEINDICES +#define IMPL_SPARSEPOSITIONS(PNAME, P) \ + IMPL_GETOVERHEAD(sparsePositions##PNAME, P, getPositions) +MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEPOSITIONS) +#undef IMPL_SPARSEPOSITIONS + +#define IMPL_SPARSECOORDINATES(CNAME, C) \ + IMPL_GETOVERHEAD(sparseCoordinates##CNAME, C, getCoordinates) +MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATES) +#undef IMPL_SPARSECOORDINATES #undef IMPL_GETOVERHEAD // TODO: while this API design will work for arbitrary dim2lvl mappings, -// we should probably move the `dimInd`-to-`lvlInd` computation into codegen -// (since that could enable optimizations to remove the intermediate memref). +// we should probably move the `dimCoords`-to-`lvlCoords` computation into +// codegen (since that could enable optimizations to remove the intermediate +// memref). #define IMPL_ADDELT(VNAME, V) \ void *_mlir_ciface_addElt##VNAME( \ void *lvlCOO, StridedMemRefType *vref, \ - StridedMemRefType *dimIndRef, \ + StridedMemRefType *dimCoordsRef, \ StridedMemRefType *dim2lvlRef) { \ assert(lvlCOO &&vref); \ - ASSERT_NO_STRIDE(dimIndRef); \ + ASSERT_NO_STRIDE(dimCoordsRef); \ ASSERT_NO_STRIDE(dim2lvlRef); \ - const uint64_t rank = MEMREF_GET_USIZE(dimIndRef); \ + const uint64_t rank = MEMREF_GET_USIZE(dimCoordsRef); \ ASSERT_USIZE_EQ(dim2lvlRef, rank); \ - const index_type *dimInd = MEMREF_GET_PAYLOAD(dimIndRef); \ + const index_type *dimCoords = MEMREF_GET_PAYLOAD(dimCoordsRef); \ const index_type *dim2lvl = MEMREF_GET_PAYLOAD(dim2lvlRef); \ - std::vector lvlInd(rank); \ + std::vector lvlCoords(rank); \ for (uint64_t d = 0; d < rank; ++d) \ - lvlInd[dim2lvl[d]] = dimInd[d]; \ + lvlCoords[dim2lvl[d]] = dimCoords[d]; \ V *value = MEMREF_GET_PAYLOAD(vref); \ - static_cast *>(lvlCOO)->add(lvlInd, *value); \ + static_cast *>(lvlCOO)->add(lvlCoords, *value); \ return lvlCOO; \ } MLIR_SPARSETENSOR_FOREVERY_V(IMPL_ADDELT) #undef IMPL_ADDELT +// NOTE: the `cref` argument uses the same coordinate-space as the `iter` +// (which can be either dim- or lvl-coords, depending on context). #define IMPL_GETNEXT(VNAME, V) \ bool _mlir_ciface_getNext##VNAME(void *iter, \ - StridedMemRefType *iref, \ + StridedMemRefType *cref, \ StridedMemRefType *vref) { \ assert(iter &&vref); \ - ASSERT_NO_STRIDE(iref); \ - index_type *indx = MEMREF_GET_PAYLOAD(iref); \ + ASSERT_NO_STRIDE(cref); \ + index_type *coords = MEMREF_GET_PAYLOAD(cref); \ V *value = MEMREF_GET_PAYLOAD(vref); \ - const uint64_t isize = MEMREF_GET_USIZE(iref); \ + const uint64_t rank = MEMREF_GET_USIZE(cref); \ const Element *elem = \ static_cast *>(iter)->getNext(); \ if (elem == nullptr) \ return false; \ - for (uint64_t r = 0; r < isize; r++) \ - indx[r] = elem->indices[r]; \ + for (uint64_t d = 0; d < rank; d++) \ + coords[d] = elem->coords[d]; \ *value = elem->value; \ return true; \ } @@ -549,36 +553,37 @@ #undef IMPL_GETNEXT #define IMPL_LEXINSERT(VNAME, V) \ - void _mlir_ciface_lexInsert##VNAME(void *tensor, \ - StridedMemRefType *cref, \ - StridedMemRefType *vref) { \ - assert(tensor &&vref); \ - ASSERT_NO_STRIDE(cref); \ - index_type *cursor = MEMREF_GET_PAYLOAD(cref); \ - assert(cursor); \ + void _mlir_ciface_lexInsert##VNAME( \ + void *t, StridedMemRefType *lvlCoordsRef, \ + StridedMemRefType *vref) { \ + assert(t &&vref); \ + auto &tensor = *static_cast(t); \ + ASSERT_NO_STRIDE(lvlCoordsRef); \ + index_type *lvlCoords = MEMREF_GET_PAYLOAD(lvlCoordsRef); \ + assert(lvlCoords); \ V *value = MEMREF_GET_PAYLOAD(vref); \ - static_cast(tensor)->lexInsert(cursor, *value); \ + tensor.lexInsert(lvlCoords, *value); \ } MLIR_SPARSETENSOR_FOREVERY_V(IMPL_LEXINSERT) #undef IMPL_LEXINSERT #define IMPL_EXPINSERT(VNAME, V) \ void _mlir_ciface_expInsert##VNAME( \ - void *tensor, StridedMemRefType *cref, \ + void *t, StridedMemRefType *lvlCoordsRef, \ StridedMemRefType *vref, StridedMemRefType *fref, \ StridedMemRefType *aref, index_type count) { \ - assert(tensor); \ - ASSERT_NO_STRIDE(cref); \ + assert(t); \ + auto &tensor = *static_cast(t); \ + ASSERT_NO_STRIDE(lvlCoordsRef); \ ASSERT_NO_STRIDE(vref); \ ASSERT_NO_STRIDE(fref); \ ASSERT_NO_STRIDE(aref); \ ASSERT_USIZE_EQ(vref, MEMREF_GET_USIZE(fref)); \ - index_type *cursor = MEMREF_GET_PAYLOAD(cref); \ + index_type *lvlCoords = MEMREF_GET_PAYLOAD(lvlCoordsRef); \ V *values = MEMREF_GET_PAYLOAD(vref); \ bool *filled = MEMREF_GET_PAYLOAD(fref); \ index_type *added = MEMREF_GET_PAYLOAD(aref); \ - static_cast(tensor)->expInsert( \ - cursor, values, filled, added, count); \ + tensor.expInsert(lvlCoords, values, filled, added, count); \ } MLIR_SPARSETENSOR_FOREVERY_V(IMPL_EXPINSERT) #undef IMPL_EXPINSERT @@ -596,13 +601,13 @@ // FIXME: update `SparseTensorCodegenPass` to use // `_mlir_ciface_getSparseTensorReaderDimSizes` instead. void _mlir_ciface_copySparseTensorReaderDimSizes( - void *p, StridedMemRefType *dref) { + void *p, StridedMemRefType *dimSizesRef) { assert(p); SparseTensorReader &reader = *static_cast(p); - ASSERT_NO_STRIDE(dref); - const uint64_t dimRank = MEMREF_GET_USIZE(dref); - ASSERT_USIZE_EQ(dref, reader.getRank()); - index_type *dimSizes = MEMREF_GET_PAYLOAD(dref); + ASSERT_NO_STRIDE(dimSizesRef); + const uint64_t dimRank = MEMREF_GET_USIZE(dimSizesRef); + ASSERT_USIZE_EQ(dimSizesRef, reader.getRank()); + index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef); const index_type *fileSizes = reader.getDimSizes(); for (uint64_t d = 0; d < dimRank; ++d) dimSizes[d] = fileSizes[d]; @@ -618,19 +623,21 @@ #define IMPL_GETNEXT(VNAME, V) \ void _mlir_ciface_getSparseTensorReaderNext##VNAME( \ - void *p, StridedMemRefType *iref, \ + void *p, StridedMemRefType *dimCoordsRef, \ StridedMemRefType *vref) { \ assert(p &&vref); \ auto &reader = *static_cast(p); \ - ASSERT_NO_STRIDE(iref); \ - const uint64_t rank = MEMREF_GET_USIZE(iref); \ - index_type *indices = MEMREF_GET_PAYLOAD(iref); \ + ASSERT_NO_STRIDE(dimCoordsRef); \ + const uint64_t dimRank = MEMREF_GET_USIZE(dimCoordsRef); \ + index_type *dimCoords = MEMREF_GET_PAYLOAD(dimCoordsRef); \ V *value = MEMREF_GET_PAYLOAD(vref); \ - *value = reader.readCOOElement(rank, indices); \ + *value = reader.readElement(dimRank, dimCoords); \ } MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT) #undef IMPL_GETNEXT +// FIXME: This function name is weird; should rename to +// "sparseTensorReaderReadToBuffers". #define IMPL_GETNEXT(VNAME, V, CNAME, C) \ bool _mlir_ciface_getSparseTensorReaderRead##CNAME##VNAME( \ void *p, StridedMemRefType *dim2lvlRef, \ @@ -644,7 +651,7 @@ const uint64_t vSize = MEMREF_GET_USIZE(vref); \ const uint64_t lvlRank = reader.getRank(); \ assert(vSize *lvlRank <= cSize); \ - assert(vSize >= reader.getNNZ() && "Not enough space in buffers"); \ + assert(vSize >= reader.getNSE() && "Not enough space in buffers"); \ ASSERT_USIZE_EQ(dim2lvlRef, lvlRank); \ (void)cSize; \ (void)vSize; \ @@ -662,8 +669,8 @@ void *p, StridedMemRefType *lvlSizesRef, StridedMemRefType *lvlTypesRef, StridedMemRefType *lvl2dimRef, - StridedMemRefType *dim2lvlRef, OverheadType ptrTp, - OverheadType indTp, PrimaryType valTp) { + StridedMemRefType *dim2lvlRef, OverheadType posTp, + OverheadType crdTp, PrimaryType valTp) { assert(p); SparseTensorReader &reader = *static_cast(p); ASSERT_NO_STRIDE(lvlSizesRef); @@ -683,18 +690,18 @@ // // FIXME(wrengr): Really need to define a separate x-macro for handling // all this. (Or ideally some better, entirely-different approach) -#define CASE(p, i, v, P, I, V) \ - if (ptrTp == OverheadType::p && indTp == OverheadType::i && \ +#define CASE(p, c, v, P, C, V) \ + if (posTp == OverheadType::p && crdTp == OverheadType::c && \ valTp == PrimaryType::v) \ - return static_cast(reader.readSparseTensor( \ + return static_cast(reader.readSparseTensor( \ lvlRank, lvlSizes, lvlTypes, lvl2dim, dim2lvl)); #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V) // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases. // This is safe because of the static_assert above. - if (ptrTp == OverheadType::kIndex) - ptrTp = OverheadType::kU64; - if (indTp == OverheadType::kIndex) - indTp = OverheadType::kU64; + if (posTp == OverheadType::kIndex) + posTp = OverheadType::kU64; + if (crdTp == OverheadType::kIndex) + crdTp = OverheadType::kU64; // Double matrices with all combinations of overhead storage. CASE(kU64, kU64, kF64, uint64_t, uint64_t, double); CASE(kU64, kU32, kF64, uint64_t, uint32_t, double); @@ -762,37 +769,38 @@ // Unsupported case (add above if needed). // TODO: better pretty-printing of enum values! MLIR_SPARSETENSOR_FATAL( - "unsupported combination of types: \n", - static_cast(ptrTp), static_cast(indTp), + "unsupported combination of types: \n", + static_cast(posTp), static_cast(crdTp), static_cast(valTp)); #undef CASE_SECSAME #undef CASE } void _mlir_ciface_outSparseTensorWriterMetaData( - void *p, index_type rank, index_type nnz, - StridedMemRefType *dref) { + void *p, index_type dimRank, index_type nse, + StridedMemRefType *dimSizesRef) { assert(p); - ASSERT_NO_STRIDE(dref); - assert(rank != 0); - index_type *dimSizes = MEMREF_GET_PAYLOAD(dref); + ASSERT_NO_STRIDE(dimSizesRef); + assert(dimRank != 0); + index_type *dimSizes = MEMREF_GET_PAYLOAD(dimSizesRef); SparseTensorWriter &file = *static_cast(p); - file << rank << " " << nnz << std::endl; - for (index_type r = 0; r < rank - 1; ++r) - file << dimSizes[r] << " "; - file << dimSizes[rank - 1] << std::endl; + file << dimRank << " " << nse << std::endl; + for (index_type d = 0; d < dimRank - 1; ++d) + file << dimSizes[d] << " "; + file << dimSizes[dimRank - 1] << std::endl; } #define IMPL_OUTNEXT(VNAME, V) \ void _mlir_ciface_outSparseTensorWriterNext##VNAME( \ - void *p, index_type rank, StridedMemRefType *iref, \ + void *p, index_type dimRank, \ + StridedMemRefType *dimCoordsRef, \ StridedMemRefType *vref) { \ assert(p &&vref); \ - ASSERT_NO_STRIDE(iref); \ - index_type *indices = MEMREF_GET_PAYLOAD(iref); \ + ASSERT_NO_STRIDE(dimCoordsRef); \ + const index_type *dimCoords = MEMREF_GET_PAYLOAD(dimCoordsRef); \ SparseTensorWriter &file = *static_cast(p); \ - for (index_type r = 0; r < rank; ++r) \ - file << (indices[r] + 1) << " "; \ + for (index_type d = 0; d < dimRank; ++d) \ + file << (dimCoords[d] + 1) << " "; \ V *value = MEMREF_GET_PAYLOAD(vref); \ file << *value << std::endl; \ } @@ -872,21 +880,22 @@ // We can't use `static_cast` here because `DimLevelType` is an enum-class. #define IMPL_CONVERTTOMLIRSPARSETENSOR(VNAME, V) \ void *convertToMLIRSparseTensor##VNAME( \ - uint64_t rank, uint64_t nse, uint64_t *shape, V *values, \ - uint64_t *indices, uint64_t *perm, uint8_t *sparse) { \ - return toMLIRSparseTensor(rank, nse, shape, values, indices, perm, \ - reinterpret_cast(sparse)); \ + uint64_t rank, uint64_t nse, uint64_t *dimSizes, V *values, \ + uint64_t *dimCoordinates, uint64_t *dim2lvl, uint8_t *lvlTypes) { \ + return toMLIRSparseTensor(rank, nse, dimSizes, values, dimCoordinates, \ + dim2lvl, \ + reinterpret_cast(lvlTypes)); \ } MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR) #undef IMPL_CONVERTTOMLIRSPARSETENSOR #define IMPL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V) \ - void convertFromMLIRSparseTensor##VNAME(void *tensor, uint64_t *pRank, \ - uint64_t *pNse, uint64_t **pShape, \ - V **pValues, uint64_t **pIndices) { \ + void convertFromMLIRSparseTensor##VNAME( \ + void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \ + V **pValues, uint64_t **pCoordinates) { \ fromMLIRSparseTensor( \ static_cast *>(tensor), \ - pRank, pNse, pShape, pValues, pIndices); \ + pRank, pNse, pShape, pValues, pCoordinates); \ } MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR) #undef IMPL_CONVERTFROMMLIRSPARSETENSOR @@ -908,8 +917,8 @@ return static_cast(p)->isSymmetric(); } -index_type getSparseTensorReaderNNZ(void *p) { - return static_cast(p)->getNNZ(); +index_type getSparseTensorReaderNSE(void *p) { + return static_cast(p)->getNSE(); } index_type getSparseTensorReaderDimSize(void *p, index_type d) { diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c --- a/mlir/test/CAPI/sparse_tensor.c +++ b/mlir/test/CAPI/sparse_tensor.c @@ -28,7 +28,7 @@ "dimLevelType = [ \"dense\", \"compressed\", \"compressed\"], " "dimOrdering = affine_map<(d0, d1, d2) -> (d0, d1, d2)>, " "higherOrdering = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, " - "pointerBitWidth = 32, indexBitWidth = 64 }>"; + "posWidth = 32, crdWidth = 64 }>"; // clang-format on MlirAttribute originalAttr = mlirAttributeParseGet(ctx, mlirStringRefCreateFromCString(originalAsm)); @@ -46,26 +46,24 @@ // CHECK: level_type: 4 // CHECK: level_type: 8 // CHECK: level_type: 8 - int numLevelTypes = mlirSparseTensorEncodingGetNumDimLevelTypes(originalAttr); + int lvlRank = mlirSparseTensorEncodingGetLvlRank(originalAttr); enum MlirSparseTensorDimLevelType *levelTypes = - malloc(sizeof(enum MlirSparseTensorDimLevelType) * numLevelTypes); - for (int i = 0; i < numLevelTypes; ++i) { - levelTypes[i] = - mlirSparseTensorEncodingAttrGetDimLevelType(originalAttr, i); - fprintf(stderr, "level_type: %d\n", levelTypes[i]); + malloc(sizeof(enum MlirSparseTensorDimLevelType) * lvlRank); + for (int l = 0; l < lvlRank; ++l) { + levelTypes[l] = + mlirSparseTensorEncodingAttrGetDimLevelType(originalAttr, l); + fprintf(stderr, "level_type: %d\n", levelTypes[l]); } - // CHECK: pointer: 32 - int pointerBitWidth = - mlirSparseTensorEncodingAttrGetPointerBitWidth(originalAttr); - fprintf(stderr, "pointer: %d\n", pointerBitWidth); - // CHECK: index: 64 - int indexBitWidth = - mlirSparseTensorEncodingAttrGetIndexBitWidth(originalAttr); - fprintf(stderr, "index: %d\n", indexBitWidth); + // CHECK: posWidth: 32 + int posWidth = mlirSparseTensorEncodingAttrGetPosWidth(originalAttr); + fprintf(stderr, "posWidth: %d\n", posWidth); + // CHECK: crdWidth: 64 + int crdWidth = mlirSparseTensorEncodingAttrGetCrdWidth(originalAttr); + fprintf(stderr, "crdWidth: %d\n", crdWidth); - MlirAttribute newAttr = mlirSparseTensorEncodingAttrGet( - ctx, numLevelTypes, levelTypes, dimOrdering, higherOrdering, - pointerBitWidth, indexBitWidth); + MlirAttribute newAttr = + mlirSparseTensorEncodingAttrGet(ctx, lvlRank, levelTypes, dimOrdering, + higherOrdering, posWidth, crdWidth); mlirAttributeDump(newAttr); // For debugging filecheck output. // CHECK: equal: 1 fprintf(stderr, "equal: %d\n", mlirAttributeEqual(originalAttr, newAttr)); diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -4,26 +4,26 @@ #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], - indexBitWidth = 64, - pointerBitWidth = 32 + crdWidth = 64, + posWidth = 32 }> #Dense2D = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense" ], - indexBitWidth = 64, - pointerBitWidth = 32 + crdWidth = 64, + posWidth = 32 }> #Row = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ], - indexBitWidth = 64, - pointerBitWidth = 32 + crdWidth = 64, + posWidth = 32 }> #CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], - indexBitWidth = 64, - pointerBitWidth = 32 + crdWidth = 64, + posWidth = 32 }> #UCSR = #sparse_tensor.encoding<{ @@ -37,8 +37,8 @@ #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], - indexBitWidth = 64, - pointerBitWidth = 32 + crdWidth = 64, + posWidth = 32 }> #Dense3D = #sparse_tensor.encoding<{ @@ -194,7 +194,7 @@ // CHECK-LABEL: func @sparse_dense_3d_dyn( // CHECK-SAME: %[[A0:.*]]: memref, // CHECK-SAME: %[[A1:.*]]: !sparse_tensor.storage_specifier -// CHECK: %[[A2:.*]] = sparse_tensor.storage_specifier.get %[[A1]] dim_sz at 2 +// CHECK: %[[A2:.*]] = sparse_tensor.storage_specifier.get %[[A1]] lvl_sz at 2 // CHECK: return %[[A2]] : index func.func @sparse_dense_3d_dyn(%arg0: tensor) -> index { %c = arith.constant 1 : index @@ -202,7 +202,7 @@ return %0 : index } -// CHECK-LABEL: func @sparse_pointers_dcsr( +// CHECK-LABEL: func @sparse_positions_dcsr( // CHECK-SAME: %[[A0:.*0]]: memref, // CHECK-SAME: %[[A1:.*1]]: memref, // CHECK-SAME: %[[A2:.*2]]: memref, @@ -210,8 +210,8 @@ // CHECK-SAME: %[[A4:.*4]]: memref, // CHECK-SAME: %[[A5:.*5]]: !sparse_tensor.storage_specifier // CHECK: return %[[A2]] : memref -func.func @sparse_pointers_dcsr(%arg0: tensor) -> memref { - %0 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor to memref +func.func @sparse_positions_dcsr(%arg0: tensor) -> memref { + %0 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor to memref return %0 : memref } @@ -224,7 +224,7 @@ // CHECK-SAME: %[[A5:.*5]]: !sparse_tensor.storage_specifier // CHECK: return %[[A3]] : memref func.func @sparse_indices_dcsr(%arg0: tensor) -> memref { - %0 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor to memref + %0 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor to memref return %0 : memref } @@ -263,13 +263,13 @@ // CHECK-SAME: %[[A4:.*4]]: memref, // CHECK-SAME: %[[A5:.*5]]: !sparse_tensor.storage_specifier // CHECK: %[[C2:.*]] = arith.constant 2 : index -// CHECK: %[[S0:.*]] = sparse_tensor.storage_specifier.get %[[A5]] idx_mem_sz at 1 +// CHECK: %[[S0:.*]] = sparse_tensor.storage_specifier.get %[[A5]] crd_mem_sz at 1 // CHECK: %[[S2:.*]] = arith.divui %[[S0]], %[[C2]] : index // CHECK: %[[R1:.*]] = memref.subview %[[A3]][0] {{\[}}%[[S2]]] [2] : memref to memref> // CHECK: %[[R2:.*]] = memref.cast %[[R1]] : memref> to memref> // CHECK: return %[[R2]] : memref> func.func @sparse_indices_coo(%arg0: tensor) -> memref> { - %0 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor to memref> + %0 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor to memref> return %0 : memref> } @@ -282,7 +282,7 @@ // CHECK-SAME: %[[A5:.*5]]: !sparse_tensor.storage_specifier // CHECK: return %[[A3]] : memref func.func @sparse_indices_buffer_coo(%arg0: tensor) -> memref { - %0 = sparse_tensor.indices_buffer %arg0 : tensor to memref + %0 = sparse_tensor.coordinates_buffer %arg0 : tensor to memref return %0 : memref } @@ -323,13 +323,13 @@ // CHECK: %[[A7:.*]] = memref.alloc() : memref<16xf64> // CHECK: %[[A8:.*]] = memref.cast %[[A7]] : memref<16xf64> to memref // CHECK: %[[A9:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier -// CHECK: %[[A11:.*]] = sparse_tensor.storage_specifier.set %[[A9]] dim_sz at 0 with %[[A0]] : !sparse_tensor.storage_specifier -// CHECK: %[[A12:.*]] = sparse_tensor.storage_specifier.set %[[A11]] dim_sz at 1 with %[[A1]] : !sparse_tensor.storage_specifier -// CHECK: %[[A14:.*]] = sparse_tensor.storage_specifier.get %[[A12]] ptr_mem_sz at 1 : !sparse_tensor.storage_specifier +// CHECK: %[[A11:.*]] = sparse_tensor.storage_specifier.set %[[A9]] lvl_sz at 0 with %[[A0]] : !sparse_tensor.storage_specifier +// CHECK: %[[A12:.*]] = sparse_tensor.storage_specifier.set %[[A11]] lvl_sz at 1 with %[[A1]] : !sparse_tensor.storage_specifier +// CHECK: %[[A14:.*]] = sparse_tensor.storage_specifier.get %[[A12]] pos_mem_sz at 1 : !sparse_tensor.storage_specifier // CHECK: %[[A15:.*]], %[[A17:.*]] = sparse_tensor.push_back %[[A14]], %[[A4]], %[[A2]] : index, memref, index -// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.set %[[A12]] ptr_mem_sz at 1 with %[[A17]] : !sparse_tensor.storage_specifier +// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.set %[[A12]] pos_mem_sz at 1 with %[[A17]] : !sparse_tensor.storage_specifier // CHECK: %[[A23:.*]], %[[A25:.*]] = sparse_tensor.push_back %[[A17]], %[[A15]], %[[A2]], %[[A0]] : index, memref, index, index -// CHECK: %[[A26:.*]] = sparse_tensor.storage_specifier.set %[[A18]] ptr_mem_sz at 1 with %[[A25]] : !sparse_tensor.storage_specifier +// CHECK: %[[A26:.*]] = sparse_tensor.storage_specifier.set %[[A18]] pos_mem_sz at 1 with %[[A25]] : !sparse_tensor.storage_specifier // CHECK: return %[[A23]], %[[A6]], %[[A8]], %[[A26]] : memref, memref, memref, !sparse_tensor.storage_specifier func.func @sparse_alloc_csc(%arg0: index) -> tensor<10x?xf64, #CSC> { %0 = bufferization.alloc_tensor(%arg0) : tensor<10x?xf64, #CSC> @@ -346,9 +346,9 @@ // CHECK: %[[A5:.*]] = memref.alloc() : memref<6000xf64> // CHECK: %[[A6:.*]] = memref.cast %[[A5]] : memref<6000xf64> to memref // CHECK: %[[A7:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier -// CHECK: %[[A8:.*]] = sparse_tensor.storage_specifier.set %[[A7]] dim_sz at 0 with %[[A3]] : !sparse_tensor.storage_specifier -// CHECK: %[[A9:.*]] = sparse_tensor.storage_specifier.set %[[A8]] dim_sz at 1 with %[[A2]] : !sparse_tensor.storage_specifier -// CHECK: %[[A10:.*]] = sparse_tensor.storage_specifier.set %[[A9]] dim_sz at 2 with %[[A1]] : !sparse_tensor.storage_specifier +// CHECK: %[[A8:.*]] = sparse_tensor.storage_specifier.set %[[A7]] lvl_sz at 0 with %[[A3]] : !sparse_tensor.storage_specifier +// CHECK: %[[A9:.*]] = sparse_tensor.storage_specifier.set %[[A8]] lvl_sz at 1 with %[[A2]] : !sparse_tensor.storage_specifier +// CHECK: %[[A10:.*]] = sparse_tensor.storage_specifier.set %[[A9]] lvl_sz at 2 with %[[A1]] : !sparse_tensor.storage_specifier // CHECK: %[[A12:.*]] = sparse_tensor.storage_specifier.get %[[A10]] val_mem_sz : !sparse_tensor.storage_specifier // CHECK: %[[A15:.*]], %[[A14:.*]] = sparse_tensor.push_back %[[A12]], %[[A6]], %[[A4]], %[[A0]] : index, memref, f64, index // CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.set %[[A10]] val_mem_sz with %[[A14]] : !sparse_tensor.storage_specifier @@ -499,7 +499,7 @@ // CHECK: memref.dealloc %[[A4]] : memref // CHECK: memref.dealloc %[[A5]] : memref // CHECK: memref.dealloc %[[A6]] : memref -// CHECK: %[[A25:.*]] = sparse_tensor.storage_specifier.get %[[A24:.*]]#3 ptr_mem_sz at 1 : !sparse_tensor.storage_specifier +// CHECK: %[[A25:.*]] = sparse_tensor.storage_specifier.get %[[A24:.*]]#3 pos_mem_sz at 1 : !sparse_tensor.storage_specifier // CHECK: %[[A26:.*]] = memref.load %[[A24]]#0{{\[}}%[[A13]]] : memref // CHECK: %[[A27:.*]] = scf.for %[[A28:.*]] = %[[A12]] to %[[A25]] step %[[A12]] iter_args(%[[A29:.*]] = %[[A26]]) -> (i32) { // CHECK: %[[A30:.*]] = memref.load %[[A24]]#0{{\[}}%[[A28]]] : memref @@ -557,7 +557,7 @@ // CHECK: memref.dealloc %[[A4]] : memref // CHECK: memref.dealloc %[[A5]] : memref // CHECK: memref.dealloc %[[A6]] : memref -// CHECK: %[[A24:.*]] = sparse_tensor.storage_specifier.get %[[A23:.*]]#3 ptr_mem_sz at 1 : !sparse_tensor.storage_specifier +// CHECK: %[[A24:.*]] = sparse_tensor.storage_specifier.get %[[A23:.*]]#3 pos_mem_sz at 1 : !sparse_tensor.storage_specifier // CHECK: %[[A25:.*]] = memref.load %[[A23]]#0{{\[}}%[[A11]]] : memref // CHECK: %[[A26:.*]] = scf.for %[[A27:.*]] = %[[A12]] to %[[A24]] step %[[A12]] iter_args(%[[A28:.*]] = %[[A25]]) -> (index) { // CHECK: %[[A29:.*]] = memref.load %[[A23]]#0{{\[}}%[[A27]]] : memref @@ -675,20 +675,20 @@ // CHECK: call @copySparseTensorReaderDimSizes(%[[A5]], %[[A7]]) : (!llvm.ptr, memref) -> () // CHECK: %[[A8:.*]] = memref.load %[[A6]]{{\[}}%[[A3]]] : memref<2xindex> // CHECK: %[[A9:.*]] = memref.load %[[A6]]{{\[}}%[[A2]]] : memref<2xindex> -// CHECK: %[[A10:.*]] = call @getSparseTensorReaderNNZ(%[[A5]]) +// CHECK: %[[A10:.*]] = call @getSparseTensorReaderNSE(%[[A5]]) // CHECK: %[[A11:.*]] = arith.muli %[[A10]], %[[A4]] : index // CHECK: %[[A12:.*]] = memref.alloc() : memref<2xindex> // CHECK: %[[A13:.*]] = memref.cast %[[A12]] : memref<2xindex> to memref // CHECK: %[[A14:.*]] = memref.alloc(%[[A11]]) : memref // CHECK: %[[A15:.*]] = memref.alloc(%[[A10]]) : memref // CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>> -// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.set %[[A16]] dim_sz at 0 with %[[A8]] -// CHECK: %[[A19:.*]] = sparse_tensor.storage_specifier.get %[[A18]] ptr_mem_sz at 0 +// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.set %[[A16]] lvl_sz at 0 with %[[A8]] +// CHECK: %[[A19:.*]] = sparse_tensor.storage_specifier.get %[[A18]] pos_mem_sz at 0 // CHECK: %[[A21:.*]], %[[A22:.*]] = sparse_tensor.push_back %[[A19]], %[[A13]], %[[A3]] -// CHECK: %[[A24:.*]] = sparse_tensor.storage_specifier.set %[[A18]] ptr_mem_sz at 0 with %[[A22]] -// CHECK: %[[A26:.*]] = sparse_tensor.storage_specifier.set %[[A24]] dim_sz at 1 with %[[A9]] +// CHECK: %[[A24:.*]] = sparse_tensor.storage_specifier.set %[[A18]] pos_mem_sz at 0 with %[[A22]] +// CHECK: %[[A26:.*]] = sparse_tensor.storage_specifier.set %[[A24]] lvl_sz at 1 with %[[A9]] // CHECK: %[[A27:.*]], %[[A28:.*]] = sparse_tensor.push_back %[[A22]], %[[A21]], %[[A3]], %[[A2]] -// CHECK: %[[A30:.*]] = sparse_tensor.storage_specifier.set %[[A26]] ptr_mem_sz at 0 with %[[A28]] +// CHECK: %[[A30:.*]] = sparse_tensor.storage_specifier.set %[[A26]] pos_mem_sz at 0 with %[[A28]] // CHECK: %[[A31:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[A32:.*]] = memref.cast %[[A31]] : memref<2xindex> to memref // CHECK: memref.store %[[A3]], %[[A31]]{{\[}}%[[A3]]] : memref<2xindex> @@ -699,7 +699,7 @@ // CHECK: sparse_tensor.sort_coo hybrid_quick_sort %[[A10]], %[[A14]] jointly %[[A15]] {nx = 2 : index, ny = 0 : index} : memref jointly memref // CHECK: } // CHECK: memref.store %[[A10]], %[[A27]]{{\[}}%[[A2]]] : memref -// CHECK: %[[A36:.*]] = sparse_tensor.storage_specifier.set %[[A30]] idx_mem_sz at 0 with %[[A11]] +// CHECK: %[[A36:.*]] = sparse_tensor.storage_specifier.set %[[A30]] crd_mem_sz at 0 with %[[A11]] // CHECK: %[[A38:.*]] = sparse_tensor.storage_specifier.set %[[A36]] val_mem_sz with %[[A10]] // CHECK: call @delSparseTensorReader(%[[A5]]) : (!llvm.ptr) -> () // CHECK: return %[[A27]], %[[A14]], %[[A15]], %[[A38]] @@ -719,31 +719,31 @@ // CHECK: call @copySparseTensorReaderDimSizes(%[[A4]], %[[A6]]) // CHECK: %[[A7:.*]] = memref.load %[[A5]]{{\[}}%[[A2]]] : memref<2xindex> // CHECK: %[[A8:.*]] = memref.load %[[A5]]{{\[}}%[[A1]]] : memref<2xindex> -// CHECK: %[[A9:.*]] = call @getSparseTensorReaderNNZ(%[[A4]]) +// CHECK: %[[A9:.*]] = call @getSparseTensorReaderNSE(%[[A4]]) // CHECK: %[[A10:.*]] = arith.muli %[[A9]], %[[A3]] : index // CHECK: %[[A11:.*]] = memref.alloc() : memref<2xindex> // CHECK: %[[A12:.*]] = memref.cast %[[A11]] : memref<2xindex> to memref // CHECK: %[[A13:.*]] = memref.alloc(%[[A10]]) : memref // CHECK: %[[A14:.*]] = memref.alloc(%[[A9]]) : memref // CHECK: %[[A15:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier<#sparse_tensor.encoding<{ dimLevelType = [ "compressed", "singleton" ] }>> -// CHECK: %[[A17:.*]] = sparse_tensor.storage_specifier.set %[[A15]] dim_sz at 0 with %[[A8]] -// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.get %[[A17]] ptr_mem_sz at 0 +// CHECK: %[[A17:.*]] = sparse_tensor.storage_specifier.set %[[A15]] lvl_sz at 0 with %[[A8]] +// CHECK: %[[A18:.*]] = sparse_tensor.storage_specifier.get %[[A17]] pos_mem_sz at 0 // CHECK: %[[A20:.*]], %[[A21:.*]] = sparse_tensor.push_back %[[A18]], %[[A12]], %[[A2]] -// CHECK: %[[A23:.*]] = sparse_tensor.storage_specifier.set %[[A17]] ptr_mem_sz at 0 with %[[A21]] -// CHECK: %[[A25:.*]] = sparse_tensor.storage_specifier.set %[[A23]] dim_sz at 1 with %[[A7]] +// CHECK: %[[A23:.*]] = sparse_tensor.storage_specifier.set %[[A17]] pos_mem_sz at 0 with %[[A21]] +// CHECK: %[[A25:.*]] = sparse_tensor.storage_specifier.set %[[A23]] lvl_sz at 1 with %[[A7]] // CHECK: %[[A26:.*]], %[[A27:.*]] = sparse_tensor.push_back %[[A21]], %[[A20]], %[[A2]], %[[A1]] -// CHECK: %[[A29:.*]] = sparse_tensor.storage_specifier.set %[[A25]] ptr_mem_sz at 0 with %[[A27]] +// CHECK: %[[A29:.*]] = sparse_tensor.storage_specifier.set %[[A25]] pos_mem_sz at 0 with %[[A27]] // CHECK: %[[A30:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[A31:.*]] = memref.cast %[[A30]] : memref<2xindex> to memref // CHECK: memref.store %[[A1]], %[[A30]]{{\[}}%[[A2]]] : memref<2xindex> // CHECK: memref.store %[[A2]], %[[A30]]{{\[}}%[[A1]]] : memref<2xindex> // CHECK: %[[A32:.*]] = call @getSparseTensorReaderRead0F32(%[[A4]], %[[A31]], %[[A13]], %[[A14]]) // CHECK: memref.store %[[A9]], %[[A26]]{{\[}}%[[A1]]] : memref -// CHECK: %[[A34:.*]] = sparse_tensor.storage_specifier.set %[[A29]] idx_mem_sz at 0 with %[[A10]] +// CHECK: %[[A34:.*]] = sparse_tensor.storage_specifier.set %[[A29]] crd_mem_sz at 0 with %[[A10]] // CHECK: %[[A36:.*]] = sparse_tensor.storage_specifier.set %[[A34]] val_mem_sz with %[[A9]] // CHECK: call @delSparseTensorReader(%[[A4]]) : (!llvm.ptr) -> () // CHECK: return %[[A26]], %[[A13]], %[[A14]], %[[A36]] func.func @sparse_new_coo_permute_no(%arg0: !llvm.ptr) -> tensor { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor return %0 : tensor -} \ No newline at end of file +} diff --git a/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir b/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir --- a/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir @@ -17,12 +17,12 @@ // CHECK: %[[VAL_9:.*]] = memref.cast %[[VAL_8]] : memref<16xf64> to memref // CHECK: linalg.fill ins(%[[VAL_2]] : f64) outs(%[[VAL_8]] : memref<16xf64>) // CHECK: %[[VAL_10:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier -// CHECK: %[[VAL_12:.*]] = sparse_tensor.storage_specifier.set %[[VAL_10]] dim_sz at 0 with %[[VAL_0]] : !sparse_tensor.storage_specifier -// CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.get %[[VAL_12]] ptr_mem_sz at 0 : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_12:.*]] = sparse_tensor.storage_specifier.set %[[VAL_10]] lvl_sz at 0 with %[[VAL_0]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.get %[[VAL_12]] pos_mem_sz at 0 : !sparse_tensor.storage_specifier // CHECK: %[[VAL_15:.*]], %[[VAL_17:.*]] = sparse_tensor.push_back %[[VAL_14]], %[[VAL_5]], %[[VAL_3]] : index, memref, index -// CHECK: %[[VAL_18:.*]] = sparse_tensor.storage_specifier.set %[[VAL_12]] ptr_mem_sz at 0 with %[[VAL_17]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_18:.*]] = sparse_tensor.storage_specifier.set %[[VAL_12]] pos_mem_sz at 0 with %[[VAL_17]] : !sparse_tensor.storage_specifier // CHECK: %[[VAL_19:.*]], %[[VAL_21:.*]] = sparse_tensor.push_back %[[VAL_17]], %[[VAL_15]], %[[VAL_3]], %[[VAL_1]] : index, memref, index, index -// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_18]] ptr_mem_sz at 0 with %[[VAL_21]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_18]] pos_mem_sz at 0 with %[[VAL_21]] : !sparse_tensor.storage_specifier // CHECK: return %[[VAL_19]], %[[VAL_7]], %[[VAL_9]], %[[VAL_22]] : memref, memref, memref, !sparse_tensor.storage_specifier func.func @sparse_alloc_sparse_vector(%arg0: index) -> tensor { %0 = bufferization.alloc_tensor(%arg0) : tensor diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -6,14 +6,14 @@ #SparseVector64 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], - pointerBitWidth = 64, - indexBitWidth = 64 + posWidth = 64, + crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> #CSR = #sparse_tensor.encoding<{ @@ -174,63 +174,63 @@ return %0 : tensor } -// CHECK-LABEL: func @sparse_pointers( +// CHECK-LABEL: func @sparse_positions( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index -// CHECK: %[[T:.*]] = call @sparsePointers0(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[T:.*]] = call @sparsePositions0(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref -func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref { - %0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector> to memref +func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref { + %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector> to memref return %0 : memref } -// CHECK-LABEL: func @sparse_pointers64( +// CHECK-LABEL: func @sparse_positions64( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index -// CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[T:.*]] = call @sparsePositions64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref -func.func @sparse_pointers64(%arg0: tensor<128xf64, #SparseVector64>) -> memref { - %0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector64> to memref +func.func @sparse_positions64(%arg0: tensor<128xf64, #SparseVector64>) -> memref { + %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector64> to memref return %0 : memref } -// CHECK-LABEL: func @sparse_pointers32( +// CHECK-LABEL: func @sparse_positions32( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index -// CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[T:.*]] = call @sparsePositions32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref -func.func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref { - %0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector32> to memref +func.func @sparse_positions32(%arg0: tensor<128xf64, #SparseVector32>) -> memref { + %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector32> to memref return %0 : memref } // CHECK-LABEL: func @sparse_indices( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index -// CHECK: %[[T:.*]] = call @sparseIndices0(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[T:.*]] = call @sparseCoordinates0(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref { - %0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector> to memref + %0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector> to memref return %0 : memref } // CHECK-LABEL: func @sparse_indices64( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index -// CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[T:.*]] = call @sparseCoordinates64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func.func @sparse_indices64(%arg0: tensor<128xf64, #SparseVector64>) -> memref { - %0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector64> to memref + %0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector64> to memref return %0 : memref } // CHECK-LABEL: func @sparse_indices32( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: %[[C:.*]] = arith.constant 0 : index -// CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[T:.*]] = call @sparseCoordinates32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func.func @sparse_indices32(%arg0: tensor<128xf64, #SparseVector32>) -> memref { - %0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector32> to memref + %0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector32> to memref return %0 : memref } diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir --- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir @@ -192,10 +192,10 @@ // CHECK-RWT: sparse_tensor.yield %[[L0T2]] // CHECK-RWT: } // CHECK-RWT: %[[COO:.*]] = sparse_tensor.load %[[T1]] hasInserts -// CHECK-RWT: %[[NNZ:.*]] = sparse_tensor.number_of_entries %[[COO]] +// CHECK-RWT: %[[NSE:.*]] = sparse_tensor.number_of_entries %[[COO]] // CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[COO]] -// CHECK-RWT: %[[I:.*]] = sparse_tensor.indices_buffer %[[COO]] -// CHECK-RWT: sparse_tensor.sort_coo hybrid_quick_sort %[[NNZ]], %[[I]] jointly %[[V]] {nx = 2 : index, ny = 0 : index} +// CHECK-RWT: %[[I:.*]] = sparse_tensor.coordinates_buffer %[[COO]] +// CHECK-RWT: sparse_tensor.sort_coo hybrid_quick_sort %[[NSE]], %[[I]] jointly %[[V]] {nx = 2 : index, ny = 0 : index} // CHECK-RWT: %[[T3:.*]] = bufferization.alloc_tensor() // CHECK-RWT: %[[T4:.*]] = sparse_tensor.foreach in %[[COO]] init(%[[T3]]) // CHECK-RWT: ^bb0(%[[L1I0:.*]]: index, %[[L1I1:.*]]: index, %[[L1V:.*]]: f32, %[[L1T:.*]]: tensor diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir @@ -11,14 +11,14 @@ #SparseVector64 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], - pointerBitWidth = 64, - indexBitWidth = 64 + posWidth = 64, + crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> #SparseVector = #sparse_tensor.encoding<{ @@ -107,14 +107,14 @@ #SparseSingleton64 = #sparse_tensor.encoding<{ dimLevelType = ["singleton"], - pointerBitWidth = 64, - indexBitWidth = 64 + posWidth = 64, + crdWidth = 64 }> #SparseSingleton32 = #sparse_tensor.encoding<{ dimLevelType = ["singleton"], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> // CHECK-COO-LABEL: func @sparse_convert_singleton( diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir @@ -2,14 +2,14 @@ #SparseVector64 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], - pointerBitWidth = 64, - indexBitWidth = 64 + posWidth = 64, + crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ dimLevelType = ["compressed"], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir --- a/mlir/test/Dialect/SparseTensor/fold.mlir +++ b/mlir/test/Dialect/SparseTensor/fold.mlir @@ -22,13 +22,13 @@ // CHECK-LABEL: func @sparse_dce_getters( // CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>) -// CHECK-NOT: sparse_tensor.pointers -// CHECK-NOT: sparse_tensor.indices +// CHECK-NOT: sparse_tensor.positions +// CHECK-NOT: sparse_tensor.coordinates // CHECK-NOT: sparse_tensor.values // CHECK: return func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) { - %0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<64xf32, #SparseVector> to memref - %1 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<64xf32, #SparseVector> to memref + %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref + %1 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref %2 = sparse_tensor.values %arg0 : tensor<64xf32, #SparseVector> to memref return } @@ -54,11 +54,11 @@ // CHECK-NOT: sparse_tensor.storage_specifier.get // CHECK: return %[[A1]] func.func @sparse_get_specifier_dce_fold(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index, %arg2: index) -> index { - %0 = sparse_tensor.storage_specifier.set %arg0 dim_sz at 0 with %arg1 + %0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1 : !sparse_tensor.storage_specifier<#SparseVector> - %1 = sparse_tensor.storage_specifier.set %0 ptr_mem_sz at 0 with %arg2 + %1 = sparse_tensor.storage_specifier.set %0 pos_mem_sz at 0 with %arg2 : !sparse_tensor.storage_specifier<#SparseVector> - %2 = sparse_tensor.storage_specifier.get %1 dim_sz at 0 + %2 = sparse_tensor.storage_specifier.get %1 lvl_sz at 0 : !sparse_tensor.storage_specifier<#SparseVector> return %2 : index } diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -8,135 +8,135 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> -func.func @non_static_pack_ret(%data: tensor<6xf64>, %index: tensor<6x1xi32>) +func.func @non_static_pack_ret(%values: tensor<6xf64>, %coordinates: tensor<6x1xi32>) -> tensor { - // expected-error@+1 {{all input types must be statically shaped}} - %0 = sparse_tensor.pack %data, %index : tensor<6xf64>, tensor<6x1xi32> - to tensor + // expected-error@+1 {{the sparse-tensor must have static shape}} + %0 = sparse_tensor.pack %values, %coordinates + : tensor<6xf64>, tensor<6x1xi32> to tensor return %0 : tensor } // ----- -#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"], indexBitWidth=32}> +#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"], crdWidth=32}> -func.func @invalid_pack_dense(%data: tensor<6xf64>, %index: tensor<6x1xi32>) +func.func @invalid_pack_dense(%values: tensor<6xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf64, #DenseVector> { - // expected-error@+1 {{must operate on a COO tensor}} - %0 = sparse_tensor.pack %data, %index : tensor<6xf64>, tensor<6x1xi32> - to tensor<100xf64, #DenseVector> + // expected-error@+1 {{the sparse-tensor must have a COO type}} + %0 = sparse_tensor.pack %values, %coordinates + : tensor<6xf64>, tensor<6x1xi32> to tensor<100xf64, #DenseVector> return %0 : tensor<100xf64, #DenseVector> } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> -func.func @invalid_pack_data(%data: tensor<6x1xf64>, %index: tensor<6x1xi32>) +func.func @invalid_pack_data(%values: tensor<6x1xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf64, #SparseVector> { // expected-error@+1 {{'sparse_tensor.pack' op operand #0 must be 1D tensor of any type values}} - %0 = sparse_tensor.pack %data, %index : tensor<6x1xf64>, tensor<6x1xi32> - to tensor<100xf64, #SparseVector> + %0 = sparse_tensor.pack %values, %coordinates + : tensor<6x1xf64>, tensor<6x1xi32> to tensor<100xf64, #SparseVector> return %0 : tensor<100xf64, #SparseVector> } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> -func.func @invalid_pack_type(%data: tensor<6xf64>, %index: tensor<6x1xi32>) +func.func @invalid_pack_type(%values: tensor<6xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf32, #SparseVector> { - // expected-error@+1 {{unmatched type between input and output}} - %0 = sparse_tensor.pack %data, %index : tensor<6xf64>, tensor<6x1xi32> - to tensor<100xf32, #SparseVector> + // expected-error@+1 {{input/output element-types don't match}} + %0 = sparse_tensor.pack %values, %coordinates + : tensor<6xf64>, tensor<6x1xi32> to tensor<100xf32, #SparseVector> return %0 : tensor<100xf32, #SparseVector> } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> -func.func @invalid_pack_type(%data: tensor<5xf64>, %index: tensor<6x1xi32>) +func.func @invalid_pack_type(%values: tensor<5xf64>, %coordinates: tensor<6x1xi32>) -> tensor<100xf64, #SparseVector> { - // expected-error@+1 {{unmatched number of elements in data and indices}} - %0 = sparse_tensor.pack %data, %index : tensor<5xf64>, tensor<6x1xi32> - to tensor<100xf64, #SparseVector> + // expected-error@+1 {{values/coordinates number-of-elements don't match}} + %0 = sparse_tensor.pack %values, %coordinates + : tensor<5xf64>, tensor<6x1xi32> to tensor<100xf64, #SparseVector> return %0 : tensor<100xf64, #SparseVector> } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> -func.func @invalid_pack_type(%data: tensor<6xf64>, %index: tensor<6x2xi32>) +func.func @invalid_pack_type(%values: tensor<6xf64>, %coordinates: tensor<6x2xi32>) -> tensor<100xf64, #SparseVector> { - // expected-error@+1 {{unmatched rank between input and output}} - %0 = sparse_tensor.pack %data, %index : tensor<6xf64>, tensor<6x2xi32> - to tensor<100xf64, #SparseVector> + // expected-error@+1 {{input/output level-ranks don't match}} + %0 = sparse_tensor.pack %values, %coordinates + : tensor<6xf64>, tensor<6x2xi32> to tensor<100xf64, #SparseVector> return %0 : tensor<100xf64, #SparseVector> } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>) -> (tensor<6xf64>, tensor<6x1xi32>, i32) { - // expected-error@+1 {{unmatched type between input and output}} - %d, %i, %n = sparse_tensor.unpack %sp : tensor<100xf32, #SparseVector> - to tensor<6xf64>, tensor<6x1xi32>, i32 - return %d, %i, %n : tensor<6xf64>, tensor<6x1xi32>, i32 + // expected-error@+1 {{input/output element-types don't match}} + %values, %coordinates, %nse = sparse_tensor.unpack %sp + : tensor<100xf32, #SparseVector> to tensor<6xf64>, tensor<6x1xi32>, i32 + return %values, %coordinates, %nse : tensor<6xf64>, tensor<6x1xi32>, i32 } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>) -> (tensor<5xf32>, tensor<6x1xi32>, i32) { - // expected-error@+1 {{unmatched number of elements in data and indices}} - %d, %i, %n = sparse_tensor.unpack %sp : tensor<100xf32, #SparseVector> - to tensor<5xf32>, tensor<6x1xi32>, i32 - return %d, %i, %n : tensor<5xf32>, tensor<6x1xi32>, i32 + // expected-error@+1 {{values/coordinates number-of-elements don't match}} + %values, %coordinates, %nse = sparse_tensor.unpack %sp + : tensor<100xf32, #SparseVector> to tensor<5xf32>, tensor<6x1xi32>, i32 + return %values, %coordinates, %nse : tensor<5xf32>, tensor<6x1xi32>, i32 } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>) -> (tensor<6xf32>, tensor<6x2xi32>, i32) { - // expected-error@+1 {{unmatched rank between input and output}} - %d, %i, %n = sparse_tensor.unpack %sp : tensor<100xf32, #SparseVector> - to tensor<6xf32>, tensor<6x2xi32>, i32 - return %d, %i, %n : tensor<6xf32>, tensor<6x2xi32>, i32 + // expected-error@+1 {{input/output level-ranks don't match}} + %values, %coordinates, %nse = sparse_tensor.unpack %sp + : tensor<100xf32, #SparseVector> to tensor<6xf32>, tensor<6x2xi32>, i32 + return %values, %coordinates, %nse : tensor<6xf32>, tensor<6x2xi32>, i32 } // ----- -func.func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref { - // expected-error@+1 {{'sparse_tensor.pointers' op operand #0 must be sparse tensor of any type values, but got 'tensor<128xf64>'}} - %0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64> to memref +func.func @invalid_positions_dense(%arg0: tensor<128xf64>) -> memref { + // expected-error@+1 {{'sparse_tensor.positions' op operand #0 must be sparse tensor of any type values, but got 'tensor<128xf64>'}} + %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64> to memref return %0 : memref } // ----- -func.func @invalid_pointers_unranked(%arg0: tensor<*xf64>) -> memref { - // expected-error@+1 {{'sparse_tensor.pointers' op operand #0 must be sparse tensor of any type values, but got 'tensor<*xf64>'}} - %0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<*xf64> to memref +func.func @invalid_positions_unranked(%arg0: tensor<*xf64>) -> memref { + // expected-error@+1 {{'sparse_tensor.positions' op operand #0 must be sparse tensor of any type values, but got 'tensor<*xf64>'}} + %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<*xf64> to memref return %0 : memref } // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], pointerBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], posWidth=32}> -func.func @mismatch_pointers_types(%arg0: tensor<128xf64, #SparseVector>) -> memref { - // expected-error@+1 {{unexpected type for pointers}} - %0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<128xf64, #SparseVector> to memref +func.func @mismatch_positions_types(%arg0: tensor<128xf64, #SparseVector>) -> memref { + // expected-error@+1 {{unexpected type for positions}} + %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<128xf64, #SparseVector> to memref return %0 : memref } @@ -144,25 +144,25 @@ #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -func.func @pointers_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref { - // expected-error@+1 {{requested pointers dimension out of bounds}} - %0 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<128xf64, #SparseVector> to memref +func.func @positions_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref { + // expected-error@+1 {{requested level is out of bounds}} + %0 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<128xf64, #SparseVector> to memref return %0 : memref } // ----- func.func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref { - // expected-error@+1 {{'sparse_tensor.indices' op operand #0 must be sparse tensor of any type values, but got 'tensor<10x10xi32>'}} - %0 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<10x10xi32> to memref + // expected-error@+1 {{'sparse_tensor.coordinates' op operand #0 must be sparse tensor of any type values, but got 'tensor<10x10xi32>'}} + %0 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<10x10xi32> to memref return %0 : memref } // ----- func.func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref { - // expected-error@+1 {{'sparse_tensor.indices' op operand #0 must be sparse tensor of any type values, but got 'tensor<*xf64>'}} - %0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<*xf64> to memref + // expected-error@+1 {{'sparse_tensor.coordinates' op operand #0 must be sparse tensor of any type values, but got 'tensor<*xf64>'}} + %0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<*xf64> to memref return %0 : memref } @@ -171,8 +171,8 @@ #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> func.func @mismatch_indices_types(%arg0: tensor) -> memref { - // expected-error@+1 {{unexpected type for indices}} - %0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor to memref + // expected-error@+1 {{unexpected type for coordinates}} + %0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor to memref return %0 : memref } @@ -181,8 +181,8 @@ #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> func.func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref { - // expected-error@+1 {{requested indices dimension out of bounds}} - %0 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<128xf64, #SparseVector> to memref + // expected-error@+1 {{requested level is out of bounds}} + %0 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<128xf64, #SparseVector> to memref return %0 : memref } @@ -200,7 +200,7 @@ func.func @indices_buffer_noncoo(%arg0: tensor<128xf64, #SparseVector>) -> memref { // expected-error@+1 {{expected sparse tensor with a COO region}} - %0 = sparse_tensor.indices_buffer %arg0 : tensor<128xf64, #SparseVector> to memref + %0 = sparse_tensor.coordinates_buffer %arg0 : tensor<128xf64, #SparseVector> to memref return %0 : memref } @@ -208,7 +208,7 @@ func.func @indices_buffer_dense(%arg0: tensor<1024xf32>) -> memref { // expected-error@+1 {{must be sparse tensor of any type values}} - %0 = sparse_tensor.indices_buffer %arg0 : tensor<1024xf32> to memref + %0 = sparse_tensor.coordinates_buffer %arg0 : tensor<1024xf32> to memref return %0 : memref } @@ -265,7 +265,7 @@ func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index { // expected-error@+1 {{missing level argument}} - %0 = sparse_tensor.storage_specifier.get %arg0 idx_mem_sz + %0 = sparse_tensor.storage_specifier.get %arg0 crd_mem_sz : !sparse_tensor.storage_specifier<#SparseVector> return %0 : index } @@ -275,8 +275,8 @@ #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index { - // expected-error@+1 {{requested level out of bound}} - %0 = sparse_tensor.storage_specifier.get %arg0 dim_sz at 1 + // expected-error@+1 {{requested level is out of bounds}} + %0 = sparse_tensor.storage_specifier.get %arg0 lvl_sz at 1 : !sparse_tensor.storage_specifier<#SparseVector> return %0 : index } @@ -286,8 +286,8 @@ #COO = #sparse_tensor.encoding<{dimLevelType = ["compressed-nu", "singleton"]}> func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index { - // expected-error@+1 {{requested pointer memory size on a singleton level}} - %0 = sparse_tensor.storage_specifier.get %arg0 ptr_mem_sz at 1 + // expected-error@+1 {{requested position memory size on a singleton level}} + %0 = sparse_tensor.storage_specifier.get %arg0 pos_mem_sz at 1 : !sparse_tensor.storage_specifier<#COO> return %0 : index } @@ -313,7 +313,7 @@ #CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> func.func @sparse_wrong_arity_insert(%arg0: tensor<128x64xf64, #CSR>, %arg1: index, %arg2: f64) { - // expected-error@+1 {{'sparse_tensor.insert' op incorrect number of indices}} + // expected-error@+1 {{'sparse_tensor.insert' op incorrect number of coordinates}} sparse_tensor.insert %arg2 into %arg0[%arg1] : tensor<128x64xf64, #CSR> return } @@ -368,7 +368,7 @@ %arg3: index, %arg4: tensor<8x8xf64, #CSR>, %arg5: index) { - // expected-error@+1 {{'sparse_tensor.compress' op incorrect number of indices}} + // expected-error@+1 {{'sparse_tensor.compress' op incorrect number of coordinates}} sparse_tensor.compress %arg0, %arg1, %arg2, %arg3 into %arg4[%arg5,%arg5] : memref, memref, memref, tensor<8x8xf64, #CSR> return diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir @@ -42,22 +42,22 @@ // ----- -#a = #sparse_tensor.encoding<{pointerBitWidth = "x"}> // expected-error {{expected an integral pointer bitwidth}} +#a = #sparse_tensor.encoding<{posWidth = "x"}> // expected-error {{expected an integral position bitwidth}} func.func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> () // ----- -#a = #sparse_tensor.encoding<{pointerBitWidth = 42}> // expected-error {{unexpected pointer bitwidth: 42}} +#a = #sparse_tensor.encoding<{posWidth = 42}> // expected-error {{unexpected position bitwidth: 42}} func.func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> () // ----- -#a = #sparse_tensor.encoding<{indexBitWidth = "not really"}> // expected-error {{expected an integral index bitwidth}} +#a = #sparse_tensor.encoding<{crdWidth = "not really"}> // expected-error {{expected an integral index bitwidth}} func.func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> () // ----- -#a = #sparse_tensor.encoding<{indexBitWidth = 128}> // expected-error {{unexpected index bitwidth: 128}} +#a = #sparse_tensor.encoding<{crdWidth = 128}> // expected-error {{unexpected coordinate bitwidth: 128}} func.func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> () // ----- diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -13,7 +13,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> // CHECK-LABEL: func @sparse_pack( // CHECK-SAME: %[[D:.*]]: tensor<6xf64>, @@ -29,7 +29,7 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], indexBitWidth=32}> +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], crdWidth=32}> // CHECK-LABEL: func @sparse_unpack( // CHECK-SAME: %[[T:.*]]: tensor<100xf64, # @@ -85,12 +85,12 @@ #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> -// CHECK-LABEL: func @sparse_pointers( +// CHECK-LABEL: func @sparse_positions( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) -// CHECK: %[[T:.*]] = sparse_tensor.pointers %[[A]] {dimension = 0 : index} : tensor<128xf64, #{{.*}}> to memref +// CHECK: %[[T:.*]] = sparse_tensor.positions %[[A]] {level = 0 : index} : tensor<128xf64, #{{.*}}> to memref // CHECK: return %[[T]] : memref -func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref { - %0 = sparse_tensor.pointers %arg0 {dimension = 0 : index} : tensor<128xf64, #SparseVector> to memref +func.func @sparse_positions(%arg0: tensor<128xf64, #SparseVector>) -> memref { + %0 = sparse_tensor.positions %arg0 {level = 0 : index} : tensor<128xf64, #SparseVector> to memref return %0 : memref } @@ -100,10 +100,10 @@ // CHECK-LABEL: func @sparse_indices_buffer( // CHECK-SAME: %[[A:.*]]: tensor) -// CHECK: %[[T:.*]] = sparse_tensor.indices_buffer %[[A]] : tensor to memref +// CHECK: %[[T:.*]] = sparse_tensor.coordinates_buffer %[[A]] : tensor to memref // CHECK: return %[[T]] : memref func.func @sparse_indices_buffer(%arg0: tensor) -> memref { - %0 = sparse_tensor.indices_buffer %arg0 : tensor to memref + %0 = sparse_tensor.coordinates_buffer %arg0 : tensor to memref return %0 : memref } @@ -113,10 +113,10 @@ // CHECK-LABEL: func @sparse_indices( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) -// CHECK: %[[T:.*]] = sparse_tensor.indices %[[A]] {dimension = 0 : index} : tensor<128xf64, #{{.*}}> to memref +// CHECK: %[[T:.*]] = sparse_tensor.coordinates %[[A]] {level = 0 : index} : tensor<128xf64, #{{.*}}> to memref // CHECK: return %[[T]] : memref func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref { - %0 = sparse_tensor.indices %arg0 {dimension = 0 : index} : tensor<128xf64, #SparseVector> to memref + %0 = sparse_tensor.coordinates %arg0 {level = 0 : index} : tensor<128xf64, #SparseVector> to memref return %0 : memref } @@ -183,10 +183,10 @@ // CHECK-LABEL: func @sparse_get_md( // CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}> -// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] dim_sz at 0 +// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] lvl_sz at 0 // CHECK: return %[[T]] : index func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> index { - %0 = sparse_tensor.storage_specifier.get %arg0 dim_sz at 0 + %0 = sparse_tensor.storage_specifier.get %arg0 lvl_sz at 0 : !sparse_tensor.storage_specifier<#SparseVector> return %0 : index } @@ -198,11 +198,11 @@ // CHECK-LABEL: func @sparse_set_md( // CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>, // CHECK-SAME: %[[I:.*]]: index) -// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.set %[[A]] dim_sz at 0 with %[[I]] +// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.set %[[A]] lvl_sz at 0 with %[[I]] // CHECK: return %[[T]] : !sparse_tensor.storage_specifier<#{{.*}}> func.func @sparse_set_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index) -> !sparse_tensor.storage_specifier<#SparseVector> { - %0 = sparse_tensor.storage_specifier.set %arg0 dim_sz at 0 with %arg1 + %0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1 : !sparse_tensor.storage_specifier<#SparseVector> return %0 : !sparse_tensor.storage_specifier<#SparseVector> } diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -9,12 +9,12 @@ #CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)>, - pointerBitWidth = 64, - indexBitWidth = 64 + posWidth = 64, + crdWidth = 64 }> // CHECK-LABEL: func private @sparse_csr( -// CHECK-SAME: tensor>) +// CHECK-SAME: tensor>) func.func private @sparse_csr(tensor) // ----- @@ -22,8 +22,8 @@ #CSC = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, - pointerBitWidth = 0, - indexBitWidth = 0 + posWidth = 0, + crdWidth = 0 }> // CHECK-LABEL: func private @sparse_csc( @@ -35,12 +35,12 @@ #DCSC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, - pointerBitWidth = 0, - indexBitWidth = 64 + posWidth = 0, + crdWidth = 64 }> // CHECK-LABEL: func private @sparse_dcsc( -// CHECK-SAME: tensor (d1, d0)>, indexBitWidth = 64 }>>) +// CHECK-SAME: tensor (d1, d0)>, crdWidth = 64 }>>) func.func private @sparse_dcsc(tensor) // ----- diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir --- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir +++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir @@ -41,7 +41,7 @@ // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f32 -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK: %[[VAL_6:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -70,9 +70,9 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -112,13 +112,13 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f64 // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f64) outs(%[[VAL_14]] : memref<32x64xf64>) diff --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir @@ -110,8 +110,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref @@ -162,8 +162,8 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -202,8 +202,8 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) @@ -310,8 +310,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32xf32> -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) @@ -367,8 +367,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32xf32> -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>) @@ -403,8 +403,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] @@ -460,8 +460,8 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] @@ -495,11 +495,11 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) @@ -578,11 +578,11 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) @@ -640,11 +640,11 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) @@ -733,11 +733,11 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) @@ -832,7 +832,7 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -874,11 +874,11 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_11]][] : memref @@ -981,12 +981,12 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_2]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_2]] {dimension = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_13]][] : memref @@ -1099,12 +1099,12 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_3]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_3]] {dimension = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_3]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 0 : index} : tensor> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor> to memref // CHECK-DAG: %[[VAL_16:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor // CHECK-DAG: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_4]] @@ -1274,14 +1274,14 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_2]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_2]] {dimension = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_3]] : memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_15]][] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -97,8 +97,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> @@ -158,8 +158,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> @@ -199,8 +199,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> @@ -265,8 +265,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> @@ -307,10 +307,10 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> @@ -398,10 +398,10 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> @@ -441,15 +441,15 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) @@ -605,15 +605,15 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) @@ -704,11 +704,11 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>) @@ -808,11 +808,11 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) @@ -864,8 +864,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<16xf32> @@ -914,7 +914,7 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 10 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -959,8 +959,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK-DAG: %[[VAL_8:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref @@ -1009,10 +1009,10 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref // CHECK-DAG: %[[VAL_11:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref @@ -1081,16 +1081,16 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant true -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref // CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref -// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.pointers %[[VAL_2]] {dimension = 1 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.indices %[[VAL_2]] {dimension = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor> to memref // CHECK-DAG: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor> to memref // CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_3]] : memref // CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_memref %[[VAL_4]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir @@ -117,8 +117,8 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant true // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -184,8 +184,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -231,8 +231,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -302,8 +302,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -349,10 +349,10 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -445,10 +445,10 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 32 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -496,8 +496,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -572,8 +572,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -620,10 +620,10 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -721,10 +721,10 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -773,10 +773,10 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -878,10 +878,10 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -930,12 +930,12 @@ // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_9:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -1060,12 +1060,12 @@ // CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32> // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32> @@ -1123,8 +1123,8 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 2 : index} : tensor> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 2 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 2 : index} : tensor> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-DAG: %[[VAL_10:.*]] = tensor.dim %[[VAL_1]], %[[VAL_6]] : tensor> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref @@ -1187,9 +1187,9 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}>> -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}>> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}>> +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}>> +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir @@ -23,8 +23,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<4xf32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf32> @@ -64,10 +64,10 @@ // CHECK: %[[VAL_4:.*]] = arith.constant 3 : index // CHECK: %[[VAL_5:.*]] = arith.constant 0.000000e+00 : f32 // CHECK: %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref @@ -162,8 +162,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 2 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34xi32> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi32> @@ -201,11 +201,11 @@ // CHECK: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK: %[[VAL_4:.*]] = arith.constant 2 : index // CHECK: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref @@ -264,8 +264,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34x19xf64> // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64> @@ -315,11 +315,11 @@ // CHECK-DAG: %[[VAL_TRUE:.*]] = arith.constant true // CHECK-DAG: %[[VAL_FALSE:.*]] = arith.constant false // CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_15:.*]] = scf.for %[[VAL_16:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] iter_args(%[[VAL_17:.*]] = %[[VAL_8]]) -> (tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>) { // CHECK: %[[VAL_18:.*]] = arith.addi %[[VAL_16]], %[[VAL_5]] : index @@ -391,11 +391,11 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index -// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64> // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref @@ -455,11 +455,11 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index -// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<34x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64> // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_broadcast.mlir @@ -17,10 +17,10 @@ // CHECK-DAG: %[[TMP_c0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[TMP_c1:.*]] = arith.constant 1 : index // CHECK: %[[TMP_0:.*]] = bufferization.alloc_tensor() -// CHECK: %[[TMP_1:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 0 : index} -// CHECK: %[[TMP_2:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 0 : index} -// CHECK: %[[TMP_3:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 1 : index} -// CHECK: %[[TMP_4:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 1 : index} +// CHECK: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index} +// CHECK: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} +// CHECK: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} +// CHECK: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} // CHECK: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]] // CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_concat_codegen.mlir @@ -16,10 +16,10 @@ // CHECK: %[[TMP_c5:.*]] = arith.constant 5 : index // CHECK: %[[TMP_c2:.*]] = arith.constant 2 : index // CHECK: %[[TMP_0:.*]] = bufferization.alloc_tensor() : tensor<9x4xf64, #sparse_tensor -// CHECK: %[[TMP_1:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_2:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_3:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_4:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]] : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref @@ -36,10 +36,10 @@ // CHECK: } // CHECK: scf.yield %[[RET_4]] // CHECK: } -// CHECK: %[[TMP_8:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_9:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_10:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_11:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_8:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_9:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_10:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_11:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_12:.*]] = sparse_tensor.values %[[TMP_arg1]] : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_13:.*]] = memref.load %[[TMP_8]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_14:.*]] = memref.load %[[TMP_8]][%[[TMP_c1]]] : memref @@ -57,10 +57,10 @@ // CHECK: } // CHECK: scf.yield %[[RET_5]] // CHECK: } -// CHECK: %[[TMP_15:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_16:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_17:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_18:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_15:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_16:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_17:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_18:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_19:.*]] = sparse_tensor.values %[[TMP_arg2]] : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_20:.*]] = memref.load %[[TMP_15]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_21:.*]] = memref.load %[[TMP_15]][%[[TMP_c1]]] : memref @@ -102,10 +102,10 @@ // CHECK-DAG: %[[TMP_c9:.*]] = arith.constant 9 : index // CHECK-DAG: %[[TMP_c4:.*]] = arith.constant 4 : index // CHECK: %[[TMP_0:.*]] = bufferization.alloc_tensor(%[[TMP_c9]], %[[TMP_c4]]) : tensor // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref @@ -122,10 +122,10 @@ // CHECK: } // CHECK: scf.yield %[[RET_4]] // CHECK: } -// CHECK: %[[TMP_8:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_9:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_10:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_11:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_8:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_9:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_10:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_11:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_12:.*]] = sparse_tensor.values %[[TMP_arg1]] : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_13:.*]] = memref.load %[[TMP_8]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_14:.*]] = memref.load %[[TMP_8]][%[[TMP_c1]]] : memref @@ -143,10 +143,10 @@ // CHECK: } // CHECK: scf.yield %[[RET_5]] // CHECK: } -// CHECK: %[[TMP_15:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_16:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_17:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_18:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_15:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_16:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_17:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_18:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_19:.*]] = sparse_tensor.values %[[TMP_arg2]] : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_20:.*]] = memref.load %[[TMP_15]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_21:.*]] = memref.load %[[TMP_15]][%[[TMP_c1]]] : memref @@ -190,10 +190,10 @@ // CHECK-DAG: %[[TMP_d0:.*]] = arith.constant 0.000000e+00 : f64 // CHECK: %[[A:.*]] = memref.alloc(%[[TMP_c9]], %[[TMP_c4]]) : memref // CHECK: linalg.fill ins(%[[TMP_d0]] : f64) outs(%[[A]] : memref) -// CHECK: %[[TMP_1:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_2:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_3:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_4:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]] : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref @@ -208,10 +208,10 @@ // CHECK: memref.store %[[TMP_28]], %[[A]]{{\[}}%[[TMP_23]], %[[TMP_27]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[TMP_8:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_9:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_10:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_11:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_8:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_9:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_10:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_11:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_12:.*]] = sparse_tensor.values %[[TMP_arg1]] : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_13:.*]] = memref.load %[[TMP_8]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_14:.*]] = memref.load %[[TMP_8]][%[[TMP_c1]]] : memref @@ -227,10 +227,10 @@ // CHECK: memref.store %[[TMP_28]], %[[A]]{{\[}}%[[TMP_29]], %[[TMP_27]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[TMP_15:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_16:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_17:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_18:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_15:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_16:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_17:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_18:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_19:.*]] = sparse_tensor.values %[[TMP_arg2]] : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_20:.*]] = memref.load %[[TMP_15]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_21:.*]] = memref.load %[[TMP_15]][%[[TMP_c1]]] : memref @@ -275,10 +275,10 @@ // CHECK: memref.store %[[TMP_c9]], %[[DIM_0]][%[[TMP_c0]]] : memref<2xindex> // CHECK: memref.store %[[TMP_c4]], %[[DIM_0]][%[[TMP_c1]]] : memref<2xindex> // CHECK: %[[VAL_1:.*]] = memref.reshape %[[VAL_0]](%[[DIM_0]]) : (memref, memref<2xindex>) -> memref -// CHECK: %[[TMP_1:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_2:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_3:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_4:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]] : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref @@ -293,10 +293,10 @@ // CHECK: memref.store %[[TMP_28]], %[[VAL_1]][%[[TMP_23]], %[[TMP_27]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[TMP_8:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_9:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_10:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_11:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_8:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_9:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_10:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_11:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_12:.*]] = sparse_tensor.values %[[TMP_arg1]] : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_13:.*]] = memref.load %[[TMP_8]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_14:.*]] = memref.load %[[TMP_8]][%[[TMP_c1]]] : memref @@ -312,10 +312,10 @@ // CHECK: memref.store %[[TMP_28]], %[[VAL_1]][%[[TMP_29]], %[[TMP_27]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[TMP_15:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_16:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_17:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_18:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_15:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_16:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_17:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_18:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_19:.*]] = sparse_tensor.values %[[TMP_arg2]] : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_20:.*]] = memref.load %[[TMP_15]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_21:.*]] = memref.load %[[TMP_15]][%[[TMP_c1]]] : memref @@ -360,10 +360,10 @@ // CHECK: memref.store %[[TMP_c4]], %[[DIM_0]][%[[TMP_c0]]] : memref<2xindex> // CHECK: memref.store %[[TMP_c9]], %[[DIM_0]][%[[TMP_c1]]] : memref<2xindex> // CHECK: %[[VAL_1:.*]] = memref.reshape %[[VAL_0]](%[[DIM_0]]) : (memref, memref<2xindex>) -> memref -// CHECK: %[[TMP_1:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_2:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 0 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_3:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor -// CHECK: %[[TMP_4:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor +// CHECK: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]] : tensor<2x4xf64, #sparse_tensor // CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref @@ -378,10 +378,10 @@ // CHECK: memref.store %[[TMP_28]], %[[VAL_1]][%[[TMP_27]], %[[TMP_23]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[TMP_8:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_9:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_10:.*]] = sparse_tensor.pointers %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor -// CHECK: %[[TMP_11:.*]] = sparse_tensor.indices %[[TMP_arg1]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_8:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_9:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_10:.*]] = sparse_tensor.positions %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor +// CHECK: %[[TMP_11:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_12:.*]] = sparse_tensor.values %[[TMP_arg1]] : tensor<3x4xf64, #sparse_tensor // CHECK: %[[TMP_13:.*]] = memref.load %[[TMP_8]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_14:.*]] = memref.load %[[TMP_8]][%[[TMP_c1]]] : memref @@ -397,10 +397,10 @@ // CHECK: memref.store %[[TMP_28]], %[[VAL_1]][%[[TMP_27]], %[[TMP_29]]] : memref // CHECK: } // CHECK: } -// CHECK: %[[TMP_15:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_16:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 0 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_17:.*]] = sparse_tensor.pointers %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor -// CHECK: %[[TMP_18:.*]] = sparse_tensor.indices %[[TMP_arg2]] {dimension = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_15:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_16:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 0 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_17:.*]] = sparse_tensor.positions %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor +// CHECK: %[[TMP_18:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 1 : index} : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_19:.*]] = sparse_tensor.values %[[TMP_arg2]] : tensor<4x4xf64, #sparse_tensor // CHECK: %[[TMP_20:.*]] = memref.load %[[TMP_15]][%[[TMP_c0]]] : memref // CHECK: %[[TMP_21:.*]] = memref.load %[[TMP_15]][%[[TMP_c1]]] : memref @@ -427,4 +427,4 @@ tensor<3x4xf64, #DCSR>, tensor<4x4xf64, #DCSR> to tensor return %0 : tensor -} \ No newline at end of file +} diff --git a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir @@ -41,15 +41,15 @@ // CHECK: %[[VAL_25:.*]] = memref.cast %[[VAL_24]] : memref<300xindex> to memref // CHECK: linalg.fill ins(%[[F0]] : f64) outs(%[[VAL_20]] : memref<300xf64>) // CHECK: linalg.fill ins(%[[False]] : i1) outs(%[[VAL_22]] : memref<300xi1>) -// CHECK: %[[VAL_26:.*]] = call @sparsePointers0(%[[Arg0]], %[[I0]]) : (!llvm.ptr, index) -> memref -// CHECK: %[[VAL_27:.*]] = call @sparseIndices0(%[[Arg0]], %[[I0]]) : (!llvm.ptr, index) -> memref -// CHECK: %[[VAL_28:.*]] = call @sparsePointers0(%[[Arg0]], %[[I1]]) : (!llvm.ptr, index) -> memref -// CHECK: %[[VAL_29:.*]] = call @sparseIndices0(%[[Arg0]], %[[I1]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_26:.*]] = call @sparsePositions0(%[[Arg0]], %[[I0]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_27:.*]] = call @sparseCoordinates0(%[[Arg0]], %[[I0]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_28:.*]] = call @sparsePositions0(%[[Arg0]], %[[I1]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_29:.*]] = call @sparseCoordinates0(%[[Arg0]], %[[I1]]) : (!llvm.ptr, index) -> memref // CHECK: %[[VAL_30:.*]] = call @sparseValuesF64(%[[Arg0]]) : (!llvm.ptr) -> memref -// CHECK: %[[VAL_31:.*]] = call @sparsePointers0(%[[Arg1]], %[[I0]]) : (!llvm.ptr, index) -> memref -// CHECK: %[[VAL_32:.*]] = call @sparseIndices0(%[[Arg1]], %[[I0]]) : (!llvm.ptr, index) -> memref -// CHECK: %[[VAL_33:.*]] = call @sparsePointers0(%[[Arg1]], %[[I1]]) : (!llvm.ptr, index) -> memref -// CHECK: %[[VAL_34:.*]] = call @sparseIndices0(%[[Arg1]], %[[I1]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_31:.*]] = call @sparsePositions0(%[[Arg1]], %[[I0]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_32:.*]] = call @sparseCoordinates0(%[[Arg1]], %[[I0]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_33:.*]] = call @sparsePositions0(%[[Arg1]], %[[I1]]) : (!llvm.ptr, index) -> memref +// CHECK: %[[VAL_34:.*]] = call @sparseCoordinates0(%[[Arg1]], %[[I1]]) : (!llvm.ptr, index) -> memref // CHECK: %[[VAL_35:.*]] = call @sparseValuesF64(%[[Arg1]]) : (!llvm.ptr) -> memref // CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_26]]{{\[}}%[[I0]]] : memref // CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_26]]{{\[}}%[[I1]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir @@ -35,8 +35,8 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -67,8 +67,8 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -99,8 +99,8 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -131,8 +131,8 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -166,8 +166,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -226,8 +226,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -286,8 +286,8 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -322,8 +322,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -355,8 +355,8 @@ // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> -// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -403,8 +403,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex // CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_index.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_index.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_index.mlir @@ -76,10 +76,10 @@ // CHECK-DAG: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_2]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir @@ -30,8 +30,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64> @@ -91,8 +91,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : i64 -// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64> @@ -151,8 +151,8 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi64>) -> tensor<32xi64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64> @@ -187,8 +187,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -221,8 +221,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{.*}}}>> -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{.*}}}>> +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -255,8 +255,8 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi64>) -> tensor<32xi64> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64> @@ -293,8 +293,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64> @@ -353,8 +353,8 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64> @@ -411,8 +411,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -445,8 +445,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -479,8 +479,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse_tensor.encoding<{{{.*}}}>> to memref // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir @@ -13,10 +13,10 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 30 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20x30xf32> // CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x30xf32> @@ -59,10 +59,10 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<10x20xf32> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] // CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x30xf32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { @@ -109,15 +109,15 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant false // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = bufferization.alloc_tensor() : tensor<4x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<8x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]]] : memref @@ -203,10 +203,10 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xi32> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<6x6xi32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { @@ -254,10 +254,10 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : i64 // CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<5x3xi8> -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<5x6xi64> // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { @@ -302,11 +302,11 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_11]][] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir @@ -27,8 +27,8 @@ // CHECK-HIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-HIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-HIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -58,8 +58,8 @@ // CHECK-MIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-MIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-MIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref -// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref // CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -89,8 +89,8 @@ // CHECK-LIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-LIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-LIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-LIR-DAG: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref -// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-LIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-LIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref // CHECK-LIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-LIR-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir @@ -30,8 +30,8 @@ // CHECK-HIR-DAG: %[[VAL_3:.*]] = arith.constant 64 : index // CHECK-HIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-HIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -60,8 +60,8 @@ // CHECK-MIR-DAG: %[[VAL_3:.*]] = arith.constant 64 : index // CHECK-MIR-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-MIR-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref -// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref +// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref +// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref // CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-MIR-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -90,8 +90,8 @@ // CHECK-LIR-DAG: %[[VAL_3:.*]] = arith.constant 64 : index // CHECK-LIR-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-LIR-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref -// CHECK-LIR-DAG: %[[VAL_8:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref +// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref +// CHECK-LIR-DAG: %[[VAL_8:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref // CHECK-LIR-DAG: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref // CHECK-LIR: scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK-LIR: %[[VAL_14:.*]] = memref.load %[[VAL_1]]{{\[}}%[[VAL_13]]] : memref<64xf64> diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir @@ -27,8 +27,8 @@ // CHECK-HIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-HIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-HIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{{{.*}}}>> // CHECK-HIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-HIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -58,8 +58,8 @@ // CHECK-MIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-MIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-MIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-MIR: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref -// CHECK-MIR: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-MIR: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-MIR: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref // CHECK-MIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64> // CHECK-MIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> @@ -89,8 +89,8 @@ // CHECK-LIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index // CHECK-LIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-LIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-LIR: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref -// CHECK-LIR: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-LIR: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref +// CHECK-LIR: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-LIR: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref // CHECK-LIR: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-LIR-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir @@ -25,7 +25,7 @@ // CHECK: %[[VAL_9:.*]] = arith.addi %[[VAL_4]], %[[VAL_8]] : index // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_0]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_0]]{{\[}}%[[VAL_9]]] : memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] idx_mem_sz at 1 : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_13:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] crd_mem_sz at 1 : !sparse_tensor.storage_specifier // CHECK: %[[VAL_14:.*]] = arith.subi %[[VAL_11]], %[[VAL_8]] : index // CHECK: %[[VAL_15:.*]] = arith.cmpi ult, %[[VAL_10]], %[[VAL_11]] : index // CHECK: %[[VAL_16:.*]] = scf.if %[[VAL_15]] -> (i1) { @@ -42,7 +42,7 @@ // CHECK: %[[VAL_21:.*]] = arith.addi %[[VAL_13]], %[[VAL_8]] : index // CHECK: memref.store %[[VAL_21]], %[[VAL_0]]{{\[}}%[[VAL_9]]] : memref // CHECK: %[[VAL_22:.*]], %[[VAL_24:.*]] = sparse_tensor.push_back %[[VAL_13]], %[[VAL_1]], %[[VAL_5]] : index, memref, index -// CHECK: %[[VAL_25:.*]] = sparse_tensor.storage_specifier.set %[[VAL_3]] idx_mem_sz at 1 with %[[VAL_24]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_25:.*]] = sparse_tensor.storage_specifier.set %[[VAL_3]] crd_mem_sz at 1 with %[[VAL_24]] : !sparse_tensor.storage_specifier // CHECK: scf.yield %[[VAL_22]], %[[VAL_25]] : memref, !sparse_tensor.storage_specifier // CHECK: } // CHECK: %[[VAL_28:.*]] = sparse_tensor.storage_specifier.get %[[VAL_27:.*]]#1 val_mem_sz : !sparse_tensor.storage_specifier @@ -73,13 +73,13 @@ // CHECK: %[[VAL_18:.*]] = memref.alloc() : memref<16xf64> // CHECK: %[[VAL_19:.*]] = memref.cast %[[VAL_18]] : memref<16xf64> to memref // CHECK: %[[VAL_20:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier -// CHECK: %[[VAL_21:.*]] = sparse_tensor.storage_specifier.set %[[VAL_20]] dim_sz at 0 with %[[VAL_8]] : !sparse_tensor.storage_specifier -// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_21]] dim_sz at 1 with %[[VAL_8]] : !sparse_tensor.storage_specifier -// CHECK: %[[VAL_23:.*]] = sparse_tensor.storage_specifier.get %[[VAL_22]] ptr_mem_sz at 1 : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_21:.*]] = sparse_tensor.storage_specifier.set %[[VAL_20]] lvl_sz at 0 with %[[VAL_8]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_21]] lvl_sz at 1 with %[[VAL_8]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_23:.*]] = sparse_tensor.storage_specifier.get %[[VAL_22]] pos_mem_sz at 1 : !sparse_tensor.storage_specifier // CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]] = sparse_tensor.push_back %[[VAL_23]], %[[VAL_15]], %[[VAL_10]] : index, memref, index -// CHECK: %[[VAL_26:.*]] = sparse_tensor.storage_specifier.set %[[VAL_22]] ptr_mem_sz at 1 with %[[VAL_25]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_26:.*]] = sparse_tensor.storage_specifier.set %[[VAL_22]] pos_mem_sz at 1 with %[[VAL_25]] : !sparse_tensor.storage_specifier // CHECK: %[[VAL_27:.*]], %[[VAL_28:.*]] = sparse_tensor.push_back %[[VAL_25]], %[[VAL_24]], %[[VAL_10]], %[[VAL_8]] : index, memref, index, index -// CHECK: %[[VAL_29:.*]] = sparse_tensor.storage_specifier.set %[[VAL_26]] ptr_mem_sz at 1 with %[[VAL_28]] : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_29:.*]] = sparse_tensor.storage_specifier.set %[[VAL_26]] pos_mem_sz at 1 with %[[VAL_28]] : !sparse_tensor.storage_specifier // CHECK: %[[VAL_30:.*]] = memref.alloc() : memref<4xf64> // CHECK: %[[VAL_31:.*]] = m // CHECK: %[[VAL_32:.*]] = memref.alloc() : memref<4xindex> @@ -131,7 +131,7 @@ // CHECK: memref.dealloc %[[VAL_30]] : memref<4xf64> // CHECK: memref.dealloc %[[VAL_31]] : memref<4xi1> // CHECK: memref.dealloc %[[VAL_32]] : memref<4xindex> -// CHECK: %[[VAL_76:.*]] = sparse_tensor.storage_specifier.get %[[VAL_77:.*]]#3 ptr_mem_sz at 1 : !sparse_tensor.storage_specifier +// CHECK: %[[VAL_76:.*]] = sparse_tensor.storage_specifier.get %[[VAL_77:.*]]#3 pos_mem_sz at 1 : !sparse_tensor.storage_specifier // CHECK: %[[VAL_78:.*]] = memref.load %[[VAL_77]]#0{{\[}}%[[VAL_10]]] : memref // CHECK: %[[VAL_79:.*]] = scf.for %[[VAL_80:.*]] = %[[VAL_11]] to %[[VAL_76]] step %[[VAL_11]] iter_args(%[[VAL_81:.*]] = %[[VAL_78]]) -> (index) { // CHECK: %[[VAL_82:.*]] = memref.load %[[VAL_77]]#0{{\[}}%[[VAL_80]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir @@ -34,10 +34,10 @@ // CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_12:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32> -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref -// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref +// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "dense", "compressed", "compressed", "dense", "dense", "dense" ] }>> to memref // CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32> diff --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir @@ -27,8 +27,8 @@ // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f32 -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref @@ -60,8 +60,8 @@ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) -> tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> { // CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_3:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_3:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_6:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_2]]] : memref @@ -105,8 +105,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2.000000e+00 : f32 // CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK: %[[VAL_9:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_2]] to %[[VAL_1]] step %[[VAL_3]] iter_args(%[[VAL_11:.*]] = %[[VAL_5]]) -> (tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>>) { // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref @@ -158,19 +158,19 @@ // CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor> // CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor> // CHECK: %[[VAL_7:.*]] = bufferization.alloc_tensor(%[[VAL_5]], %[[VAL_6]]) : tensor> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 2 : index} : tensor> to memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 2 : index} : tensor> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor> to memref // CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK: %[[VAL_15:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_16:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_17:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor> to memref -// CHECK: %[[VAL_18:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor> to memref -// CHECK: %[[VAL_19:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 2 : index} : tensor> to memref -// CHECK: %[[VAL_20:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 2 : index} : tensor> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_19:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 2 : index} : tensor> to memref +// CHECK: %[[VAL_20:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor> to memref // CHECK: %[[VAL_21:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref @@ -321,15 +321,15 @@ // CHECK: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor> // CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor> // CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor(%[[VAL_6]], %[[VAL_7]]) : tensor> -// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor> to memref // CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref -// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK: %[[VAL_16:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor> to memref -// CHECK: %[[VAL_17:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor> to memref // CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir @@ -16,8 +16,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<10xi32, #{{.*}}> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<10xi32, #{{.*}}> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<10xf32> // CHECK: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_8]] : memref<10xf32>) @@ -50,8 +50,8 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<10xf32> -// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<10xi32, #{{.*}}> to memref -// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<10xi32, #{{.*}}> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref // CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_4]] : memref<10xf32> // CHECK: linalg.fill ins(%[[VAL_2]] : f32) outs(%[[VAL_8]] : memref<10xf32>) @@ -83,8 +83,8 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<10xf32>) -> tensor<10xf32> { // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<10xf32, #{{.*}}> to memref -// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<10xf32, #{{.*}}> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #{{.*}}> to memref // CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<10xf32> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir @@ -2,7 +2,7 @@ #COO = #sparse_tensor.encoding<{ dimLevelType = ["compressed-nu", "singleton"], - indexBitWidth=32 + crdWidth=32 }> // CHECK-LABEL: func.func @sparse_pack( @@ -19,19 +19,19 @@ // CHECK: %[[VAL_10:.*]] = sparse_tensor.storage_specifier.init : // CHECK: %[[VAL_11:.*]] = arith.constant 6 : index // CHECK: %[[VAL_12:.*]] = arith.constant 100 : index -// CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.set %[[VAL_10]] dim_sz at 0 with %[[VAL_12]] +// CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.set %[[VAL_10]] lvl_sz at 0 with %[[VAL_12]] // CHECK: %[[VAL_15:.*]] = arith.constant 2 : index -// CHECK: %[[VAL_17:.*]] = sparse_tensor.storage_specifier.set %[[VAL_14]] ptr_mem_sz at 0 with %[[VAL_15]] -// CHECK: %[[VAL_19:.*]] = sparse_tensor.storage_specifier.set %[[VAL_17]] idx_mem_sz at 0 with %[[VAL_11]] -// CHECK: %[[VAL_20:.*]] = sparse_tensor.storage_specifier.set %[[VAL_19]] dim_sz at 1 with %[[VAL_12]] -// CHECK: %[[VAL_21:.*]] = sparse_tensor.storage_specifier.set %[[VAL_20]] idx_mem_sz at 1 with %[[VAL_11]] +// CHECK: %[[VAL_17:.*]] = sparse_tensor.storage_specifier.set %[[VAL_14]] pos_mem_sz at 0 with %[[VAL_15]] +// CHECK: %[[VAL_19:.*]] = sparse_tensor.storage_specifier.set %[[VAL_17]] crd_mem_sz at 0 with %[[VAL_11]] +// CHECK: %[[VAL_20:.*]] = sparse_tensor.storage_specifier.set %[[VAL_19]] lvl_sz at 1 with %[[VAL_12]] +// CHECK: %[[VAL_21:.*]] = sparse_tensor.storage_specifier.set %[[VAL_20]] crd_mem_sz at 1 with %[[VAL_11]] // CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_21]] val_mem_sz with %[[VAL_11]] // CHECK: return %[[VAL_4]], %[[VAL_7]], %[[VAL_9]], %[[VAL_22]] : memref, memref, memref, // CHECK: } -func.func @sparse_pack(%data: tensor<6xf64>, %index: tensor<6x2xi32>) +func.func @sparse_pack(%values: tensor<6xf64>, %coordinates: tensor<6x2xi32>) -> tensor<100x100xf64, #COO> { - %0 = sparse_tensor.pack %data, %index : tensor<6xf64>, tensor<6x2xi32> - to tensor<100x100xf64, #COO> + %0 = sparse_tensor.pack %values, %coordinates + : tensor<6xf64>, tensor<6x2xi32> to tensor<100x100xf64, #COO> return %0 : tensor<100x100xf64, #COO> } diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir @@ -21,8 +21,8 @@ // CHECK-DAG: %[[TMP_c16:.*]] = arith.constant 16 : index // CHECK-DAG: %[[TMP_c0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[TMP_c1:.*]] = arith.constant 1 : index -// CHECK: %[[TMP_0:.*]] = sparse_tensor.pointers %[[TMP_arg0]] {dimension = 1 : index} -// CHECK: %[[TMP_1:.*]] = sparse_tensor.indices %[[TMP_arg0]] {dimension = 1 : index} +// CHECK: %[[TMP_0:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} +// CHECK: %[[TMP_1:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} // CHECK: %[[TMP_2:.*]] = sparse_tensor.values %[[TMP_arg0]] // CHECK: %[[TMP_3:.*]] = bufferization.to_memref %[[TMP_arg1]] : memref<32xf32> // CHECK: %[[TMP_4:.*]] = bufferization.to_memref %[[TMP_arg2]] : memref<16xf32> diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir @@ -47,8 +47,8 @@ // CHECK-RWT-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-RWT-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-RWT: %[[B:.*]] = bufferization.alloc_tensor() -// CHECK-RWT: %[[P0:.*]] = sparse_tensor.pointers %[[S]] {dimension = 0 : index} -// CHECK-RWT: %[[I0:.*]] = sparse_tensor.indices %[[S]] {dimension = 0 : index} +// CHECK-RWT: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index} +// CHECK-RWT: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} // CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[S]] // CHECK-RWT: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref // CHECK-RWT: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref @@ -111,10 +111,10 @@ // CHECK-RWT-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-RWT-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-RWT: %[[B:.*]] = bufferization.alloc_tensor() -// CHECK-RWT: %[[P0:.*]] = sparse_tensor.pointers %[[S]] {dimension = 0 : index} -// CHECK-RWT: %[[I0:.*]] = sparse_tensor.indices %[[S]] {dimension = 0 : index} -// CHECK-RWT: %[[P1:.*]] = sparse_tensor.pointers %[[S]] {dimension = 1 : index} -// CHECK-RWT: %[[I1:.*]] = sparse_tensor.indices %[[S]] {dimension = 1 : index} +// CHECK-RWT: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index} +// CHECK-RWT: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} +// CHECK-RWT: %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index} +// CHECK-RWT: %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index} // CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[S]] // CHECK-RWT: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref // CHECK-RWT: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref @@ -191,8 +191,8 @@ // CHECK-RWT: %[[SD:.*]] = tensor.dim %[[S]], %[[C0]] // CHECK-RWT: %[[DD0:.*]] = arith.divui %[[SD]], %[[C10]] : index // CHECK-RWT: %[[B:.*]] = bufferization.alloc_tensor(%[[DD0]]) -// CHECK-RWT: %[[P0:.*]] = sparse_tensor.pointers %[[S]] {dimension = 0 : index} -// CHECK-RWT: %[[I0:.*]] = sparse_tensor.indices %[[S]] {dimension = 0 : index} +// CHECK-RWT: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index} +// CHECK-RWT: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} // CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[S]] // CHECK-RWT: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref // CHECK-RWT: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref @@ -265,10 +265,10 @@ // CHECK-RWT: %[[SD1:.*]] = tensor.dim %[[S]], %[[C1]] // CHECK-RWT: %[[DD0:.*]] = arith.muli %[[SD1]], %[[C10]] : index // CHECK-RWT: %[[B:.*]] = bufferization.alloc_tensor(%[[DD0]]) -// CHECK-RWT: %[[P0:.*]] = sparse_tensor.pointers %[[S]] {dimension = 0 : index} -// CHECK-RWT: %[[I0:.*]] = sparse_tensor.indices %[[S]] {dimension = 0 : index} -// CHECK-RWT: %[[P1:.*]] = sparse_tensor.pointers %[[S]] {dimension = 1 : index} -// CHECK-RWT: %[[I1:.*]] = sparse_tensor.indices %[[S]] {dimension = 1 : index} +// CHECK-RWT: %[[P0:.*]] = sparse_tensor.positions %[[S]] {level = 0 : index} +// CHECK-RWT: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} +// CHECK-RWT: %[[P1:.*]] = sparse_tensor.positions %[[S]] {level = 1 : index} +// CHECK-RWT: %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index} // CHECK-RWT: %[[V:.*]] = sparse_tensor.values %[[S]] // CHECK-RWT: %[[S0:.*]] = memref.load %[[P0]]{{\[}}%[[C0]]] : memref // CHECK-RWT: %[[E0:.*]] = memref.load %[[P0]]{{\[}}%[[C1]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --linalg-generalize-named-ops --sparsification --cse --canonicalize | FileCheck %s -#COO_2D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ], pointerBitWidth = 32, indexBitWidth = 32 }> -#COO_3D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], pointerBitWidth = 32, indexBitWidth = 32 }> +#COO_2D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_3D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> // CHECK-LABEL: func.func @sparse_reshape_fused( // CHECK-SAME: %[[VAL_0:.*]]: tensor<5x6xf32>, @@ -11,10 +11,10 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = tensor.empty() : tensor<5x6xf32> -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 0 : index} -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 2 : index} +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_6]] : memref<5x6xf32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_5]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir @@ -28,10 +28,10 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.addf %[[VAL_2]], %[[VAL_3]] : f32 -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse_tensor.encoding<{{.*}}>> to memref // CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : memref // CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_4]] : memref<32x16xf32> diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir @@ -68,10 +68,10 @@ // CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<8x8xf64> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> // CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64> -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{.*}}>> to memref // CHECK: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_8]] : memref<8x8xf64> // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref @@ -136,10 +136,10 @@ // CHECK-DAG: %[[VAL_10:.*]] = bufferization.alloc_tensor() {bufferization.escape = [false]} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> // CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> // CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64> -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir @@ -32,10 +32,10 @@ // CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor() : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> // CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> // CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64> -// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref -// CHECK: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_storage.mlir b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_storage.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir @@ -2,14 +2,14 @@ #SparseVector64 = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], - pointerBitWidth = 64, - indexBitWidth = 64 + posWidth = 64, + crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> #trait_mul = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_transpose.mlir @@ -21,10 +21,10 @@ // CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_3:.*]] = bufferization.alloc_tensor() : tensor<4x3xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.convert %[[VAL_0]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> to tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> -// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_4]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_4]] {dimension = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_4]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_4]] {dimension = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 0 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_4]] {level = 1 : index} : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_4]] : tensor<3x4xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)> }>> to memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir @@ -87,8 +87,8 @@ #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> #trait_mul_s = { @@ -310,8 +310,8 @@ #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> #trait_mul_ds = { diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir @@ -25,11 +25,11 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 64 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]] {dimension = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_2]] {dimension = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref -// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_2]] {dimension = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>> to memref // CHECK: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_14]][] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir @@ -25,8 +25,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = tensor.empty() : tensor<8xi64> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : memref<8xi64> // CHECK: linalg.fill ins(%[[VAL_4]] : i64) outs(%[[VAL_11]] : memref<8xi64>) @@ -67,8 +67,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = tensor.empty() : tensor<8xi64> -// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref -// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>> to memref // CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : memref<8xi64> // CHECK: linalg.fill ins(%[[VAL_3]] : i64) outs(%[[VAL_11]] : memref<8xi64>) diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_peeled.mlir @@ -3,8 +3,8 @@ #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> #trait_mul_s = { diff --git a/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir b/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir --- a/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir +++ b/mlir/test/Dialect/SparseTensor/specifier_to_llvm.mlir @@ -21,7 +21,7 @@ // CHECK: %[[CAST:.*]] = arith.index_cast %[[VAL_1]] : i64 to index // CHECK: return %[[CAST]] : index func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#CSR>) -> index { - %0 = sparse_tensor.storage_specifier.get %arg0 dim_sz at 0 + %0 = sparse_tensor.storage_specifier.get %arg0 lvl_sz at 0 : !sparse_tensor.storage_specifier<#CSR> return %0 : index } @@ -34,7 +34,7 @@ // CHECK: return %[[VAL_2]] : !llvm.struct<(array<2 x i64>, array<3 x i64>)> func.func @sparse_set_md(%arg0: !sparse_tensor.storage_specifier<#CSR>, %arg1: index) -> !sparse_tensor.storage_specifier<#CSR> { - %0 = sparse_tensor.storage_specifier.set %arg0 dim_sz at 0 with %arg1 + %0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1 : !sparse_tensor.storage_specifier<#CSR> return %0 : !sparse_tensor.storage_specifier<#CSR> } diff --git a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir --- a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir +++ b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir @@ -35,7 +35,7 @@ // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_5]] : tensor> -// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref // CHECK-ON: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref @@ -66,7 +66,7 @@ // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF: %[[VAL_4:.*]] = tensor.dim %[[VAL_1]], %[[VAL_2]] : tensor> -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_7]][] : memref @@ -121,8 +121,8 @@ // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant dense<0.000000e+00> : vector<8xf64> // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref // CHECK-ON: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref @@ -154,8 +154,8 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref -// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 1 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_7]][] : memref @@ -216,7 +216,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi13> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -242,7 +242,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -294,7 +294,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi13> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -320,7 +320,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -369,7 +369,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -395,7 +395,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -446,7 +446,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -472,7 +472,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -520,7 +520,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -546,7 +546,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -595,7 +595,7 @@ // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_9]][] : memref @@ -621,7 +621,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -669,7 +669,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -695,7 +695,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -743,7 +743,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -769,7 +769,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref @@ -817,7 +817,7 @@ // CHECK-ON-DAG: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> // CHECK-ON-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref @@ -843,7 +843,7 @@ // CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { // CHECK-OFF-DAG: %[[VAL_2:.*]] = arith.constant 0 : index // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor> to memref // CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref // CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir @@ -16,8 +16,8 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -#COO_2D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ], pointerBitWidth = 32, indexBitWidth = 32 }> -#COO_3D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], pointerBitWidth = 32, indexBitWidth = 32 }> +#COO_2D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> +#COO_3D = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> module { func.func private @printMemref3dF32(%ptr : tensor) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -48,12 +48,12 @@ %ts = sparse_tensor.convert %ti : tensor<10x8xf64> to tensor<10x8xf64, #Tensor1> // CHECK: ( 0, 1, 4, 5, 6, 9 ) - %i0 = sparse_tensor.indices %ts { dimension = 0 : index } : tensor<10x8xf64, #Tensor1> to memref + %i0 = sparse_tensor.coordinates %ts { level = 0 : index } : tensor<10x8xf64, #Tensor1> to memref %i0r = vector.transfer_read %i0[%c0], %c0: memref, vector<6xindex> vector.print %i0r : vector<6xindex> // CHECK: ( 0, 7, 2, 2, 3, 4, 6, 7 ) - %i1 = sparse_tensor.indices %ts { dimension = 1 : index } : tensor<10x8xf64, #Tensor1> to memref + %i1 = sparse_tensor.coordinates %ts { level = 1 : index } : tensor<10x8xf64, #Tensor1> to memref %i1r = vector.transfer_read %i1[%c0], %c0: memref, vector<8xindex> vector.print %i1r : vector<8xindex> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -180,7 +180,7 @@ call @dumpf64(%iv) : (memref) -> () // - // Check indices. + // Check coordinates. // // CHECK-NEXT: ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) @@ -219,45 +219,45 @@ // CHECK-NEXT: ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 ) // - %v10 = sparse_tensor.indices %1 { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %v11 = sparse_tensor.indices %1 { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %v12 = sparse_tensor.indices %1 { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %v20 = sparse_tensor.indices %2 { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %v21 = sparse_tensor.indices %2 { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %v22 = sparse_tensor.indices %2 { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %v30 = sparse_tensor.indices %3 { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %v31 = sparse_tensor.indices %3 { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %v32 = sparse_tensor.indices %3 { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %v10 = sparse_tensor.coordinates %1 { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %v11 = sparse_tensor.coordinates %1 { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %v12 = sparse_tensor.coordinates %1 { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %v20 = sparse_tensor.coordinates %2 { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %v21 = sparse_tensor.coordinates %2 { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %v22 = sparse_tensor.coordinates %2 { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %v30 = sparse_tensor.coordinates %3 { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %v31 = sparse_tensor.coordinates %3 { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %v32 = sparse_tensor.coordinates %3 { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %a10 = sparse_tensor.indices %a { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %a11 = sparse_tensor.indices %a { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %a12 = sparse_tensor.indices %a { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %b10 = sparse_tensor.indices %b { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %b11 = sparse_tensor.indices %b { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %b12 = sparse_tensor.indices %b { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %c10 = sparse_tensor.indices %c { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %c11 = sparse_tensor.indices %c { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %c12 = sparse_tensor.indices %c { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %a10 = sparse_tensor.coordinates %a { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %a11 = sparse_tensor.coordinates %a { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %a12 = sparse_tensor.coordinates %a { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %b10 = sparse_tensor.coordinates %b { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %b11 = sparse_tensor.coordinates %b { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %b12 = sparse_tensor.coordinates %b { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %c10 = sparse_tensor.coordinates %c { level = 0 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %c11 = sparse_tensor.coordinates %c { level = 1 : index } : tensor<2x3x4xf64, #Tensor1> to memref + %c12 = sparse_tensor.coordinates %c { level = 2 : index } : tensor<2x3x4xf64, #Tensor1> to memref - %d20 = sparse_tensor.indices %d { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %d21 = sparse_tensor.indices %d { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %d22 = sparse_tensor.indices %d { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %e20 = sparse_tensor.indices %e { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %e21 = sparse_tensor.indices %e { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %e22 = sparse_tensor.indices %e { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %f20 = sparse_tensor.indices %f { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %f21 = sparse_tensor.indices %f { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %f22 = sparse_tensor.indices %f { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %d20 = sparse_tensor.coordinates %d { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %d21 = sparse_tensor.coordinates %d { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %d22 = sparse_tensor.coordinates %d { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %e20 = sparse_tensor.coordinates %e { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %e21 = sparse_tensor.coordinates %e { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %e22 = sparse_tensor.coordinates %e { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %f20 = sparse_tensor.coordinates %f { level = 0 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %f21 = sparse_tensor.coordinates %f { level = 1 : index } : tensor<2x3x4xf64, #Tensor2> to memref + %f22 = sparse_tensor.coordinates %f { level = 2 : index } : tensor<2x3x4xf64, #Tensor2> to memref - %g30 = sparse_tensor.indices %g { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %g31 = sparse_tensor.indices %g { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %g32 = sparse_tensor.indices %g { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %h30 = sparse_tensor.indices %h { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %h31 = sparse_tensor.indices %h { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %h32 = sparse_tensor.indices %h { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %i30 = sparse_tensor.indices %i { dimension = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %i31 = sparse_tensor.indices %i { dimension = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref - %i32 = sparse_tensor.indices %i { dimension = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %g30 = sparse_tensor.coordinates %g { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %g31 = sparse_tensor.coordinates %g { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %g32 = sparse_tensor.coordinates %g { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %h30 = sparse_tensor.coordinates %h { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %h31 = sparse_tensor.coordinates %h { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %h32 = sparse_tensor.coordinates %h { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %i30 = sparse_tensor.coordinates %i { level = 0 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %i31 = sparse_tensor.coordinates %i { level = 1 : index } : tensor<2x3x4xf64, #Tensor3> to memref + %i32 = sparse_tensor.coordinates %i { level = 2 : index } : tensor<2x3x4xf64, #Tensor3> to memref call @dumpidx(%v10) : (memref) -> () call @dumpidx(%v11) : (memref) -> () diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -28,27 +28,27 @@ #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], - pointerBitWidth = 8, - indexBitWidth = 8 + posWidth = 8, + crdWidth = 8 }> #DCSC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, - pointerBitWidth = 64, - indexBitWidth = 64 + posWidth = 64, + crdWidth = 64 }> #CSC = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, - pointerBitWidth = 16, - indexBitWidth = 32 + posWidth = 16, + crdWidth = 32 }> // // Integration test that tests conversions between sparse tensors, -// where the pointer and index sizes in the overhead storage change +// where the position and index sizes in the overhead storage change // in addition to layout. // module { @@ -136,12 +136,12 @@ // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 ) // CHECK-NEXT: ( 0, 1, 63, 0, 1, 0, 63, 0 ) // - %i1 = sparse_tensor.indices %1 { dimension = 1 : index } : tensor<32x64xf64, #DCSR> to memref - %i2 = sparse_tensor.indices %2 { dimension = 1 : index } : tensor<32x64xf64, #DCSC> to memref - %i3 = sparse_tensor.indices %3 { dimension = 1 : index } : tensor<32x64xf64, #CSC> to memref - %i4 = sparse_tensor.indices %4 { dimension = 1 : index } : tensor<32x64xf64, #DCSC> to memref - %i5 = sparse_tensor.indices %5 { dimension = 1 : index } : tensor<32x64xf64, #DCSR> to memref - %i6 = sparse_tensor.indices %6 { dimension = 1 : index } : tensor<32x64xf64, #DCSR> to memref + %i1 = sparse_tensor.coordinates %1 { level = 1 : index } : tensor<32x64xf64, #DCSR> to memref + %i2 = sparse_tensor.coordinates %2 { level = 1 : index } : tensor<32x64xf64, #DCSC> to memref + %i3 = sparse_tensor.coordinates %3 { level = 1 : index } : tensor<32x64xf64, #CSC> to memref + %i4 = sparse_tensor.coordinates %4 { level = 1 : index } : tensor<32x64xf64, #DCSC> to memref + %i5 = sparse_tensor.coordinates %5 { level = 1 : index } : tensor<32x64xf64, #DCSR> to memref + %i6 = sparse_tensor.coordinates %6 { level = 1 : index } : tensor<32x64xf64, #DCSR> to memref call @dumpi08(%i1) : (memref) -> () call @dumpi64(%i2) : (memref) -> () call @dumpi32(%i3) : (memref) -> () diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir @@ -39,7 +39,7 @@ func.func private @createSparseTensorReader(!Filename) -> (!TensorReader) func.func private @delSparseTensorReader(!TensorReader) -> () func.func private @getSparseTensorReaderRank(!TensorReader) -> (index) - func.func private @getSparseTensorReaderNNZ(!TensorReader) -> (index) + func.func private @getSparseTensorReaderNSE(!TensorReader) -> (index) func.func private @getSparseTensorReaderIsSymmetric(!TensorReader) -> (i1) func.func private @copySparseTensorReaderDimSizes(!TensorReader, memref) -> () attributes { llvm.emit_c_interface } @@ -87,12 +87,12 @@ %c2 = arith.constant 2 : index %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index - %nnz = call @getSparseTensorReaderNNZ(%tensor) : (!TensorReader) -> index + %nse = call @getSparseTensorReaderNSE(%tensor) : (!TensorReader) -> index // Assume rank == 2. - %isize = arith.muli %c2, %nnz : index + %isize = arith.muli %c2, %nse : index %xs = memref.alloc(%isize) : memref - %vs = memref.alloc(%nnz) : memref + %vs = memref.alloc(%nse) : memref %dim2lvl = memref.alloca(%c2) : memref memref.store %c0, %dim2lvl[%c0] : memref memref.store %c1, %dim2lvl[%c1] : memref @@ -110,8 +110,8 @@ : (!Filename) -> (!TensorReader) %rank = call @getSparseTensorReaderRank(%tensor) : (!TensorReader) -> index vector.print %rank : index - %nnz = call @getSparseTensorReaderNNZ(%tensor) : (!TensorReader) -> index - vector.print %nnz : index + %nse = call @getSparseTensorReaderNSE(%tensor) : (!TensorReader) -> index + vector.print %nse : index %symmetric = call @getSparseTensorReaderIsSymmetric(%tensor) : (!TensorReader) -> i1 vector.print %symmetric : i1 @@ -122,9 +122,9 @@ %xs, %vs, %isSorted = call @readTensorFile(%tensor) : (!TensorReader) -> (memref, memref, i1) - %x0s = memref.subview %xs[%c0][%nnz][%c2] + %x0s = memref.subview %xs[%c0][%nse][%c2] : memref to memref> - %x1s = memref.subview %xs[%c1][%nnz][%c2] + %x1s = memref.subview %xs[%c1][%nse][%c2] : memref to memref> vector.print %isSorted : i1 call @dumpi2(%x0s) : (memref>) -> () @@ -152,18 +152,18 @@ : (!Filename) -> (!TensorWriter) %rank = call @getSparseTensorReaderRank(%tensor0) : (!TensorReader) -> index - %nnz = call @getSparseTensorReaderNNZ(%tensor0) : (!TensorReader) -> index + %nse = call @getSparseTensorReaderNSE(%tensor0) : (!TensorReader) -> index %dimSizes = memref.alloc(%rank) : memref func.call @copySparseTensorReaderDimSizes(%tensor0, %dimSizes) : (!TensorReader, memref) -> () - call @outSparseTensorWriterMetaData(%tensor1, %rank, %nnz, %dimSizes) + call @outSparseTensorWriterMetaData(%tensor1, %rank, %nse, %dimSizes) : (!TensorWriter, index, index, memref) -> () //TODO: handle isSymmetric. // Assume rank == 2. %indices = memref.alloc(%rank) : memref %value = memref.alloca() : memref - scf.for %i = %c0 to %nnz step %c1 { + scf.for %i = %c0 to %nse step %c1 { func.call @getSparseTensorReaderNextF32(%tensor0, %indices, %value) : (!TensorReader, memref, memref) -> () func.call @outSparseTensorWriterNextF32(%tensor1, %rank, %indices, %value) @@ -223,4 +223,4 @@ return } -} \ No newline at end of file +} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir @@ -36,13 +36,13 @@ module { - // Dumps pointers, indices, values for verification. + // Dumps positions, indices, values for verification. func.func @dump(%argx: tensor<1024xf32, #SparseVector>) { %c0 = arith.constant 0 : index %f0 = arith.constant 0.0 : f32 - %p = sparse_tensor.pointers %argx { dimension = 0 : index } + %p = sparse_tensor.positions %argx { level = 0 : index } : tensor<1024xf32, #SparseVector> to memref - %i = sparse_tensor.indices %argx { dimension = 0 : index } + %i = sparse_tensor.coordinates %argx { level = 0 : index } : tensor<1024xf32, #SparseVector> to memref %v = sparse_tensor.values %argx : tensor<1024xf32, #SparseVector> to memref diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir @@ -57,9 +57,9 @@ %c0 = arith.constant 0 : index %cu = arith.constant -1 : index %fu = arith.constant 99.0 : f64 - %p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<4x3xf64, #SortedCOO> to memref - %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<4x3xf64, #SortedCOO> to memref> - %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<4x3xf64, #SortedCOO> to memref> + %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<4x3xf64, #SortedCOO> to memref + %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<4x3xf64, #SortedCOO> to memref> + %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<4x3xf64, #SortedCOO> to memref> %v = sparse_tensor.values %arg0 : tensor<4x3xf64, #SortedCOO> to memref %vp0 = vector.transfer_read %p0[%c0], %cu: memref, vector<2xindex> vector.print %vp0 : vector<2xindex> @@ -76,8 +76,8 @@ %c0 = arith.constant 0 : index %cu = arith.constant -1 : index %fu = arith.constant 99.0 : f64 - %p1 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<4x3xf64, #CSR> to memref - %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<4x3xf64, #CSR> to memref + %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<4x3xf64, #CSR> to memref + %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<4x3xf64, #CSR> to memref %v = sparse_tensor.values %arg0 : tensor<4x3xf64, #CSR> to memref %vp1 = vector.transfer_read %p1[%c0], %cu: memref, vector<5xindex> vector.print %vp1 : vector<5xindex> @@ -92,10 +92,10 @@ %c0 = arith.constant 0 : index %cu = arith.constant -1 : index %fu = arith.constant 99.0 : f64 - %p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<4x3xf64, #DCSR> to memref - %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<4x3xf64, #DCSR> to memref - %p1 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<4x3xf64, #DCSR> to memref - %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<4x3xf64, #DCSR> to memref + %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<4x3xf64, #DCSR> to memref + %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<4x3xf64, #DCSR> to memref + %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<4x3xf64, #DCSR> to memref + %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<4x3xf64, #DCSR> to memref %v = sparse_tensor.values %arg0 : tensor<4x3xf64, #DCSR> to memref %vp0 = vector.transfer_read %p0[%c0], %cu: memref, vector<2xindex> vector.print %vp0 : vector<2xindex> @@ -114,8 +114,8 @@ %c0 = arith.constant 0 : index %cu = arith.constant -1 : index %fu = arith.constant 99.0 : f64 - %p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<4x3xf64, #Row> to memref - %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<4x3xf64, #Row> to memref + %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<4x3xf64, #Row> to memref + %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<4x3xf64, #Row> to memref %v = sparse_tensor.values %arg0 : tensor<4x3xf64, #Row> to memref %vp0 = vector.transfer_read %p0[%c0], %cu: memref, vector<2xindex> vector.print %vp0 : vector<2xindex> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir @@ -44,10 +44,10 @@ func.func @dump(%arg0: tensor<5x4x3xf64, #TensorCSR>) { %c0 = arith.constant 0 : index %fu = arith.constant 99.0 : f64 - %p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref - %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref - %p2 = sparse_tensor.pointers %arg0 { dimension = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref - %i2 = sparse_tensor.indices %arg0 { dimension = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref + %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref + %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorCSR> to memref + %p2 = sparse_tensor.positions %arg0 { level = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref + %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #TensorCSR> to memref %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #TensorCSR> to memref %vp0 = vector.transfer_read %p0[%c0], %c0: memref, vector<2xindex> vector.print %vp0 : vector<2xindex> @@ -65,10 +65,10 @@ func.func @dump_row(%arg0: tensor<5x4x3xf64, #TensorRow>) { %c0 = arith.constant 0 : index %fu = arith.constant 99.0 : f64 - %p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref - %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref - %p1 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref - %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref + %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref + %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #TensorRow> to memref + %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref + %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #TensorRow> to memref %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #TensorRow> to memref %vp0 = vector.transfer_read %p0[%c0], %c0: memref, vector<2xindex> vector.print %vp0 : vector<2xindex> @@ -86,11 +86,11 @@ func.func @dump_ccoo(%arg0: tensor<5x4x3xf64, #CCoo>) { %c0 = arith.constant 0 : index %fu = arith.constant 99.0 : f64 - %p0 = sparse_tensor.pointers %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref - %i0 = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref - %p1 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref - %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref - %i2 = sparse_tensor.indices %arg0 { dimension = 2 : index } : tensor<5x4x3xf64, #CCoo> to memref + %p0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref + %i0 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<5x4x3xf64, #CCoo> to memref + %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref + %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #CCoo> to memref + %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #CCoo> to memref %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #CCoo> to memref %vp0 = vector.transfer_read %p0[%c0], %c0: memref, vector<2xindex> vector.print %vp0 : vector<2xindex> @@ -110,9 +110,9 @@ func.func @dump_dcoo(%arg0: tensor<5x4x3xf64, #DCoo>) { %c0 = arith.constant 0 : index %fu = arith.constant 99.0 : f64 - %p1 = sparse_tensor.pointers %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref - %i1 = sparse_tensor.indices %arg0 { dimension = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref - %i2 = sparse_tensor.indices %arg0 { dimension = 2 : index } : tensor<5x4x3xf64, #DCoo> to memref + %p1 = sparse_tensor.positions %arg0 { level = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref + %i1 = sparse_tensor.coordinates %arg0 { level = 1 : index } : tensor<5x4x3xf64, #DCoo> to memref + %i2 = sparse_tensor.coordinates %arg0 { level = 2 : index } : tensor<5x4x3xf64, #DCoo> to memref %v = sparse_tensor.values %arg0 : tensor<5x4x3xf64, #DCoo> to memref %vp1 = vector.transfer_read %p1[%c0], %c0: memref, vector<6xindex> vector.print %vp1 : vector<6xindex> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -39,8 +39,8 @@ #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], - pointerBitWidth = 8, - indexBitWidth = 8 + posWidth = 8, + crdWidth = 8 }> #matvec = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir @@ -27,8 +27,8 @@ #SortedCOOI32 = #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> module { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir @@ -76,8 +76,8 @@ %values = sparse_tensor.values %arg0 : tensor to memref %0 = vector.transfer_read %values[%c0], %d0: memref, vector<3xf32> vector.print %0 : vector<3xf32> - %indices = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor to memref - %1 = vector.transfer_read %indices[%c0], %c0: memref, vector<3xindex> + %coordinates = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor to memref + %1 = vector.transfer_read %coordinates[%c0], %c0: memref, vector<3xindex> vector.print %1 : vector<3xindex> return } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -32,8 +32,8 @@ #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], - pointerBitWidth = 32, - indexBitWidth = 32 + posWidth = 32, + crdWidth = 32 }> #trait_sampled_dense_dense = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir @@ -125,11 +125,11 @@ // CHECK-NEXT: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255, 0, 0, 0 ) // CHECK-NEXT: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, 0, 0, 0 ) // - %p0 = sparse_tensor.pointers %0 { dimension = 0 : index } + %p0 = sparse_tensor.positions %0 { level = 0 : index } : tensor to memref - %i00 = sparse_tensor.indices %0 { dimension = 0 : index } + %i00 = sparse_tensor.coordinates %0 { level = 0 : index } : tensor to memref> - %i01 = sparse_tensor.indices %0 { dimension = 1 : index } + %i01 = sparse_tensor.coordinates %0 { level = 1 : index } : tensor to memref> %v0 = sparse_tensor.values %0 : tensor to memref @@ -144,11 +144,11 @@ // CHECK-NEXT: ( 0, 3, 1, 3, 2, 3, 3, 0, 3, 0, 3, 3, 3, 1, 3, 0, 3, 0, 0, 0 ) // CHECK-NEXT: ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17, 0, 0, 0 ) // - %p1 = sparse_tensor.pointers %1 { dimension = 0 : index } + %p1 = sparse_tensor.positions %1 { level = 0 : index } : tensor to memref - %i10 = sparse_tensor.indices %1 { dimension = 0 : index } + %i10 = sparse_tensor.coordinates %1 { level = 0 : index } : tensor to memref> - %i11 = sparse_tensor.indices %1 { dimension = 1 : index } + %i11 = sparse_tensor.coordinates %1 { level = 1 : index } : tensor to memref> %v1 = sparse_tensor.values %1 : tensor to memref @@ -164,13 +164,13 @@ // CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 ) // CHECK-NEXT: ( 3, 63, 11, 100, 66, 61, 13, 43, 77, 10, 46, 61, 53, 3, 75, 22, 18, 0, 0, 0 ) // - %p2 = sparse_tensor.pointers %2 { dimension = 0 : index } + %p2 = sparse_tensor.positions %2 { level = 0 : index } : tensor to memref - %i20 = sparse_tensor.indices %2 { dimension = 0 : index } + %i20 = sparse_tensor.coordinates %2 { level = 0 : index } : tensor to memref> - %i21 = sparse_tensor.indices %2 { dimension = 1 : index } + %i21 = sparse_tensor.coordinates %2 { level = 1 : index } : tensor to memref> - %i22 = sparse_tensor.indices %2 { dimension = 2 : index } + %i22 = sparse_tensor.coordinates %2 { level = 2 : index } : tensor to memref> %v2 = sparse_tensor.values %2 : tensor to memref @@ -187,13 +187,13 @@ // CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 ) // CHECK-NEXT: ( 66, 77, 61, 11, 61, 53, 22, 3, 100, 13, 10, 3, 18, 63, 43, 46, 75, 0, 0, 0 ) // - %p3 = sparse_tensor.pointers %3 { dimension = 0 : index } + %p3 = sparse_tensor.positions %3 { level = 0 : index } : tensor to memref - %i30 = sparse_tensor.indices %3 { dimension = 0 : index } + %i30 = sparse_tensor.coordinates %3 { level = 0 : index } : tensor to memref> - %i31 = sparse_tensor.indices %3 { dimension = 1 : index } + %i31 = sparse_tensor.coordinates %3 { level = 1 : index } : tensor to memref> - %i32 = sparse_tensor.indices %3 { dimension = 2 : index } + %i32 = sparse_tensor.coordinates %3 { level = 2 : index } : tensor to memref> %v3 = sparse_tensor.values %3 : tensor to memref @@ -209,11 +209,11 @@ // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // - %p4 = sparse_tensor.pointers %4 { dimension = 0 : index } + %p4 = sparse_tensor.positions %4 { level = 0 : index } : tensor to memref - %i40 = sparse_tensor.indices %4 { dimension = 0 : index } + %i40 = sparse_tensor.coordinates %4 { level = 0 : index } : tensor to memref> - %i41 = sparse_tensor.indices %4 { dimension = 1 : index } + %i41 = sparse_tensor.coordinates %4 { level = 1 : index } : tensor to memref> %v4 = sparse_tensor.values %4 : tensor to memref @@ -232,11 +232,11 @@ // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 12, 10, 8, 6, 4, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // - %p5 = sparse_tensor.pointers %5 { dimension = 0 : index } + %p5 = sparse_tensor.positions %5 { level = 0 : index } : tensor to memref - %i50 = sparse_tensor.indices %5 { dimension = 0 : index } + %i50 = sparse_tensor.coordinates %5 { level = 0 : index } : tensor to memref> - %i51 = sparse_tensor.indices %5 { dimension = 1 : index } + %i51 = sparse_tensor.coordinates %5 { level = 1 : index } : tensor to memref> %v5 = sparse_tensor.values %5 : tensor to memref diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -119,7 +119,7 @@ // // Inspect storage scheme of CSR. // - // pointers(1) + // positions(1) // indices(1) // values // @@ -127,10 +127,10 @@ // CHECK: ( 0, 2, 7, 2, 3, 4, 1, 2, 7, 2, 6, 7, 1, 2, 6, 7, 6 ) // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ) // - %7 = sparse_tensor.pointers %1 { dimension = 1 : index } : tensor<10x8xf64, #CSR> to memref + %7 = sparse_tensor.positions %1 { level = 1 : index } : tensor<10x8xf64, #CSR> to memref %8 = vector.transfer_read %7[%c0], %c0: memref, vector<11xindex> vector.print %8 : vector<11xindex> - %9 = sparse_tensor.indices %1 { dimension = 1 : index } : tensor<10x8xf64, #CSR> to memref + %9 = sparse_tensor.coordinates %1 { level = 1 : index } : tensor<10x8xf64, #CSR> to memref %10 = vector.transfer_read %9[%c0], %c0: memref, vector<17xindex> vector.print %10 : vector<17xindex> %11 = sparse_tensor.values %1 : tensor<10x8xf64, #CSR> to memref @@ -140,9 +140,9 @@ // // Inspect storage scheme of DCSR. // - // pointers(0) + // positions(0) // indices(0) - // pointers(1) + // positions(1) // indices(1) // values // @@ -152,16 +152,16 @@ // CHECK: ( 0, 2, 7, 2, 3, 4, 1, 2, 7, 2, 6, 7, 1, 2, 6, 7, 6 ) // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ) // - %13 = sparse_tensor.pointers %2 { dimension = 0 : index } : tensor<10x8xf64, #DCSR> to memref + %13 = sparse_tensor.positions %2 { level = 0 : index } : tensor<10x8xf64, #DCSR> to memref %14 = vector.transfer_read %13[%c0], %c0: memref, vector<2xindex> vector.print %14 : vector<2xindex> - %15 = sparse_tensor.indices %2 { dimension = 0 : index } : tensor<10x8xf64, #DCSR> to memref + %15 = sparse_tensor.coordinates %2 { level = 0 : index } : tensor<10x8xf64, #DCSR> to memref %16 = vector.transfer_read %15[%c0], %c0: memref, vector<8xindex> vector.print %16 : vector<8xindex> - %17 = sparse_tensor.pointers %2 { dimension = 1 : index } : tensor<10x8xf64, #DCSR> to memref + %17 = sparse_tensor.positions %2 { level = 1 : index } : tensor<10x8xf64, #DCSR> to memref %18 = vector.transfer_read %17[%c0], %c0: memref, vector<9xindex> vector.print %18 : vector<9xindex> - %19 = sparse_tensor.indices %2 { dimension = 1 : index } : tensor<10x8xf64, #DCSR> to memref + %19 = sparse_tensor.coordinates %2 { level = 1 : index } : tensor<10x8xf64, #DCSR> to memref %20 = vector.transfer_read %19[%c0], %c0: memref, vector<17xindex> vector.print %20 : vector<17xindex> %21 = sparse_tensor.values %2 : tensor<10x8xf64, #DCSR> to memref @@ -171,7 +171,7 @@ // // Inspect storage scheme of CSC. // - // pointers(1) + // positions(1) // indices(1) // values // @@ -179,10 +179,10 @@ // CHECK: ( 0, 5, 7, 0, 2, 5, 6, 7, 3, 4, 6, 7, 9, 0, 5, 6, 7 ) // CHECK: ( 1, 7, 13, 2, 4, 8, 10, 14, 5, 6, 11, 15, 17, 3, 9, 12, 16 ) // - %23 = sparse_tensor.pointers %3 { dimension = 1 : index } : tensor<10x8xf64, #CSC> to memref + %23 = sparse_tensor.positions %3 { level = 1 : index } : tensor<10x8xf64, #CSC> to memref %24 = vector.transfer_read %23[%c0], %c0: memref, vector<9xindex> vector.print %24 : vector<9xindex> - %25 = sparse_tensor.indices %3 { dimension = 1 : index } : tensor<10x8xf64, #CSC> to memref + %25 = sparse_tensor.coordinates %3 { level = 1 : index } : tensor<10x8xf64, #CSC> to memref %26 = vector.transfer_read %25[%c0], %c0: memref, vector<17xindex> vector.print %26 : vector<17xindex> %27 = sparse_tensor.values %3 : tensor<10x8xf64, #CSC> to memref @@ -192,9 +192,9 @@ // // Inspect storage scheme of DCSC. // - // pointers(0) + // positions(0) // indices(0) - // pointers(1) + // positions(1) // indices(1) // values // @@ -204,16 +204,16 @@ // CHECK: ( 0, 5, 7, 0, 2, 5, 6, 7, 3, 4, 6, 7, 9, 0, 5, 6, 7 ) // CHECK: ( 1, 7, 13, 2, 4, 8, 10, 14, 5, 6, 11, 15, 17, 3, 9, 12, 16 ) // - %29 = sparse_tensor.pointers %4 { dimension = 0 : index } : tensor<10x8xf64, #DCSC> to memref + %29 = sparse_tensor.positions %4 { level = 0 : index } : tensor<10x8xf64, #DCSC> to memref %30 = vector.transfer_read %29[%c0], %c0: memref, vector<2xindex> vector.print %30 : vector<2xindex> - %31 = sparse_tensor.indices %4 { dimension = 0 : index } : tensor<10x8xf64, #DCSC> to memref + %31 = sparse_tensor.coordinates %4 { level = 0 : index } : tensor<10x8xf64, #DCSC> to memref %32 = vector.transfer_read %31[%c0], %c0: memref, vector<7xindex> vector.print %32 : vector<7xindex> - %33 = sparse_tensor.pointers %4 { dimension = 1 : index } : tensor<10x8xf64, #DCSC> to memref + %33 = sparse_tensor.positions %4 { level = 1 : index } : tensor<10x8xf64, #DCSC> to memref %34 = vector.transfer_read %33[%c0], %c0: memref, vector<8xindex> vector.print %34 : vector<8xindex> - %35 = sparse_tensor.indices %4 { dimension = 1 : index } : tensor<10x8xf64, #DCSC> to memref + %35 = sparse_tensor.coordinates %4 { level = 1 : index } : tensor<10x8xf64, #DCSC> to memref %36 = vector.transfer_read %35[%c0], %c0: memref, vector<17xindex> vector.print %36 : vector<17xindex> %37 = sparse_tensor.values %4 : tensor<10x8xf64, #DCSC> to memref @@ -223,7 +223,7 @@ // // Inspect storage scheme of BlockRow. // - // pointers(0) + // positions(0) // indices(0) // values // @@ -234,10 +234,10 @@ // CHECK-SAME: 0, 7, 8, 0, 0, 0, 0, 9, 0, 0, 10, 0, 0, 0, 11, 12, // CHECK-SAME: 0, 13, 14, 0, 0, 0, 15, 16, 0, 0, 0, 0, 0, 0, 17, 0 ) // - %39 = sparse_tensor.pointers %x { dimension = 0 : index } : tensor<10x8xf64, #BlockRow> to memref + %39 = sparse_tensor.positions %x { level = 0 : index } : tensor<10x8xf64, #BlockRow> to memref %40 = vector.transfer_read %39[%c0], %c0: memref, vector<2xindex> vector.print %40 : vector<2xindex> - %41 = sparse_tensor.indices %x { dimension = 0 : index } : tensor<10x8xf64, #BlockRow> to memref + %41 = sparse_tensor.coordinates %x { level = 0 : index } : tensor<10x8xf64, #BlockRow> to memref %42 = vector.transfer_read %41[%c0], %c0: memref, vector<8xindex> vector.print %42 : vector<8xindex> %43 = sparse_tensor.values %x : tensor<10x8xf64, #BlockRow> to memref @@ -247,7 +247,7 @@ // // Inspect storage scheme of BlockCol. // - // pointers(0) + // positions(0) // indices(0) // values // @@ -257,10 +257,10 @@ // CHECK-SAME: 0, 8, 10, 14, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, // CHECK-SAME: 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 15, 0, 17, 3, 0, 0, 0, 0, 9, 12, 16, 0, 0 ) // - %45 = sparse_tensor.pointers %y { dimension = 0 : index } : tensor<10x8xf64, #BlockCol> to memref + %45 = sparse_tensor.positions %y { level = 0 : index } : tensor<10x8xf64, #BlockCol> to memref %46 = vector.transfer_read %45[%c0], %c0: memref, vector<2xindex> vector.print %46 : vector<2xindex> - %47 = sparse_tensor.indices %y { dimension = 0 : index } : tensor<10x8xf64, #BlockCol> to memref + %47 = sparse_tensor.coordinates %y { level = 0 : index } : tensor<10x8xf64, #BlockCol> to memref %48 = vector.transfer_read %47[%c0], %c0: memref, vector<7xindex> vector.print %48 : vector<7xindex> %49 = sparse_tensor.values %y : tensor<10x8xf64, #BlockCol> to memref diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py @@ -48,9 +48,9 @@ _TACO_INDEX_PREFIX = "i" _TACO_TENSOR_PREFIX = "A" -# Bitwidths for pointers and indices. -_POINTER_BIT_WIDTH = 0 -_INDEX_BIT_WIDTH = 0 +# Bitwidths for positions and coordinates. +_POS_WIDTH = 0 +_CRD_WIDTH = 0 # The entry point to the JIT compiled program. _ENTRY_NAME = "main" @@ -366,8 +366,7 @@ mlir_storage_format = [f.value for f in self.format_pack.formats] return sparse_tensor.EncodingAttr.get(mlir_storage_format, ir.AffineMap.get_permutation(order), - None, _POINTER_BIT_WIDTH, - _INDEX_BIT_WIDTH) + None, _POS_WIDTH, _CRD_WIDTH) def _make_format(formats: List[ModeFormat], diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py --- a/mlir/test/python/dialects/sparse_tensor/dialect.py +++ b/mlir/test/python/dialects/sparse_tensor/dialect.py @@ -15,10 +15,10 @@ with Context() as ctx: parsed = Attribute.parse('#sparse_tensor.encoding<{' ' dimLevelType = [ "compressed" ],' - ' pointerBitWidth = 16,' - ' indexBitWidth = 32' + ' posWidth = 16,' + ' crdWidth = 32' '}>') - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], pointerBitWidth = 16, indexBitWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], posWidth = 16, crdWidth = 32 }> print(parsed) casted = st.EncodingAttr(parsed) @@ -29,10 +29,10 @@ print(f"dim_level_types: {casted.dim_level_types}") # CHECK: dim_ordering: None print(f"dim_ordering: {casted.dim_ordering}") - # CHECK: pointer_bit_width: 16 - print(f"pointer_bit_width: {casted.pointer_bit_width}") - # CHECK: index_bit_width: 32 - print(f"index_bit_width: {casted.index_bit_width}") + # CHECK: pos_width: 16 + print(f"pos_width: {casted.pos_width}") + # CHECK: crd_width: 32 + print(f"crd_width: {casted.crd_width}") created = st.EncodingAttr.get(casted.dim_level_types, None, None, 0, 0) # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> @@ -43,8 +43,8 @@ # Verify that the factory creates an instance of the proper type. # CHECK: is_proper_instance: True print(f"is_proper_instance: {isinstance(created, st.EncodingAttr)}") - # CHECK: created_pointer_bit_width: 0 - print(f"created_pointer_bit_width: {created.pointer_bit_width}") + # CHECK: created_pos_width: 0 + print(f"created_pos_width: {created.pos_width}") # CHECK-LABEL: TEST: testEncodingAttr2D @@ -54,10 +54,10 @@ parsed = Attribute.parse('#sparse_tensor.encoding<{' ' dimLevelType = [ "dense", "compressed" ],' ' dimOrdering = affine_map<(d0, d1) -> (d1, d0)>,' - ' pointerBitWidth = 8,' - ' indexBitWidth = 32' + ' posWidth = 8,' + ' crdWidth = 32' '}>') - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 8, indexBitWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> print(parsed) casted = st.EncodingAttr(parsed) @@ -68,14 +68,14 @@ print(f"dim_level_types: {casted.dim_level_types}") # CHECK: dim_ordering: (d0, d1) -> (d1, d0) print(f"dim_ordering: {casted.dim_ordering}") - # CHECK: pointer_bit_width: 8 - print(f"pointer_bit_width: {casted.pointer_bit_width}") - # CHECK: index_bit_width: 32 - print(f"index_bit_width: {casted.index_bit_width}") + # CHECK: pos_width: 8 + print(f"pos_width: {casted.pos_width}") + # CHECK: crd_width: 32 + print(f"crd_width: {casted.crd_width}") created = st.EncodingAttr.get(casted.dim_level_types, casted.dim_ordering, casted.higher_ordering, 8, 32) - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 8, indexBitWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> print(created) # CHECK: created_equal: True print(f"created_equal: {created == casted}") @@ -88,12 +88,12 @@ encoding = st.EncodingAttr( Attribute.parse('#sparse_tensor.encoding<{' ' dimLevelType = [ "compressed" ], ' - ' pointerBitWidth = 64,' - ' indexBitWidth = 32' + ' posWidth = 64,' + ' crdWidth = 32' '}>')) tt = RankedTensorType.get((1024,), F32Type.get(), encoding=encoding) - # CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], pointerBitWidth = 64, indexBitWidth = 32 }>> + # CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], posWidth = 64, crdWidth = 32 }>> print(tt) - # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], pointerBitWidth = 64, indexBitWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], posWidth = 64, crdWidth = 32 }> print(tt.encoding) assert tt.encoding == encoding