diff --git a/mlir/include/mlir-c/Dialect/SparseTensor.h b/mlir/include/mlir-c/Dialect/SparseTensor.h --- a/mlir/include/mlir-c/Dialect/SparseTensor.h +++ b/mlir/include/mlir-c/Dialect/SparseTensor.h @@ -49,7 +49,8 @@ MLIR_CAPI_EXPORTED MlirAttribute mlirSparseTensorEncodingAttrGet( MlirContext ctx, intptr_t numDimLevelTypes, enum MlirSparseTensorDimLevelType const *dimLevelTypes, - MlirAffineMap dimOrdering, int pointerBitWidth, int indexBitWidth); + MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, + int pointerBitWidth, int indexBitWidth); /// Returns the number of dim level types in a sparse_tensor.encoding attribute. MLIR_CAPI_EXPORTED intptr_t @@ -63,6 +64,10 @@ MLIR_CAPI_EXPORTED MlirAffineMap mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr); +/// Returns the higher ordering in a sparse_tensor.encoding attribute. +MLIR_CAPI_EXPORTED MlirAffineMap +mlirSparseTensorEncodingAttrGetHigherOrdering(MlirAttribute attr); + /// Returns the pointer bit width in a sparse_tensor.encoding attribute. MLIR_CAPI_EXPORTED int mlirSparseTensorEncodingAttrGetPointerBitWidth(MlirAttribute attr); diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -32,6 +32,7 @@ aware of the semantics of tensor types with such an encoding. The attribute consists of the following fields. + - Dimension level type for each dimension of a tensor type: - **dense** : dimension is dense, all entries along this dimension are stored @@ -49,24 +50,52 @@ In the future, we may introduce many more dimension level types and properties, and separate specifying the two completely rather than using this suffix mechanism. + - An optional dimension ordering on the indices of this tensor type. Unlike - dense storage, most sparse storage schemes do not provide fast random access. - This affine map specifies the order of dimensions that should be supported - by the sparse storage scheme. For example, for a 2-d tensor, "(i,j) -> (i,j)" - requests row-wise storage and "(i,j) -> (j,i)" requests column-wise storage. - By default, an identify mapping is used, which implies that the original - indices directly correspond to stored indices (viz. "(i,j,...) -> (i,j,...)"). + dense storage, most sparse storage schemes do not provide fast random + access. This affine map specifies the order of dimensions that should be + supported by the sparse storage scheme. For example, for a 2-d tensor, + `(i, j) -> (i, j)` requests row-wise storage and `(i, j) -> (j, i)` + requests column-wise storage. By default, an identify mapping is used, + which implies that the original indices directly correspond to stored + indices. + + - An optional higher-ordering mapping from the original index space of + the tensor to a higher-order index space, used to define block-sparse + storage or ELL (jagged diagonal) storage. For example, for a 2-d tensor, + the mapping `(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)` + imposes an higher-order partitioning into 2x3 blocks along the matrix + layout. A dimension ordering can be used to define a desired ordering + on this higher-order index space. Likewise, the dimension level types + define dense or compressed storage along this higher-order index space. + For block-sparse, typically, blocks are stored with compression and + dense storage is used within each block (although hybrid schemes are + possible as well). The higher-order mapping also provides a notion of + "counting a dimension", where every stored element with the same index + is mapped to a new slice. For instance, ELL storage of a 2-d tensor can + be defined with the mapping `(i, j) -> (#i, i, j)` using the notation + of [Chou20]. Lacking the `#` symbol in MLIR's affine mapping, we use + a free symbol `c` to define such counting, together with a constant + that denotes the number of resulting slices. For example, the mapping + `(i, j)[c] -> (c * 3 * i, i, j)` with the first two higher-order indices + stored dense and the innermost compressed denotes ELL storage with + three jagged diagonals that count the dimension `i`. + + TODO: introduce a real counting symbol to MLIR's mapping, since an + expression like 3*c*i has no direct interpretation? + - The required bit width for "pointer" storage (integral offsets into the sparse storage scheme). A narrow width reduces the memory footprint of overhead storage, as long as the width suffices to define the total required range (viz. the maximum number of stored entries over all indirection dimensions). The choices are `8`, `16`, `32`, `64`, or, the default, `0` to indicate the native bit width. + - The required bit width for "index" storage (elements of the coordinates of stored entries). A narrow width reduces the memory footprint of overhead storage, as long as the width suffices to define the total required range (viz. the maximum value of each tensor index over all dimensions). The - choices are `8`, `16`, `32`, `64`, or, the default, `0` to indicate the + choices are `8`, `16`, `32`, `64`, or, the default, `0` to indicate a native bit width. Examples: @@ -87,11 +116,27 @@ // Doubly compressed sparse column storage with specific bitwidths. #DCSC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], - dimOrdering = affine_map<(i,j) -> (j,i)>, + dimOrdering = affine_map<(i, j) -> (j, i)>, pointerBitWidth = 32, indexBitWidth = 8 }> ... tensor<8x8xf64, #DCSC> ... + + // Block sparse row storage (2x3 blocks). + #BCSR = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed", "compressed", "dense", "dense" ], + dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, + higherOrdering = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> + }> + ... tensor<20x30xf32, #BCSR> ... + + // ELL storage (4 jagged diagonals, i.e., at most 4 nonzeros per row). + #ELL = #sparse_tensor.encoding<{ + dimLevelType = [ "dense", "dense", "compressed" ], + dimOrdering = affine_map<(ii, i, j) -> (ii, i, j)>, + higherOrdering = affine_map<(i, j)[c] -> (c * 4 * i, i, j)> + }> + ... tensor ... ``` }]; @@ -104,8 +149,9 @@ "per dimension level type" >: $dimLevelType, // A dimension order on the indices of this tensor type. - // TODO: block structure with higher-dim inputs "AffineMap":$dimOrdering, + // A mapping between the original and higher-ordering index space. + "AffineMap":$higherOrdering, // The required bit width for pointer storage. "unsigned":$pointerBitWidth, // The required bit width for index storage. diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td @@ -63,6 +63,10 @@ * [Chou18] Stephen Chou, Fredrik Berg Kjolstad, and Saman Amarasinghe. Format Abstraction for Sparse Tensor Algebra Compilers. Proceedings of the ACM on Programming Languages, October 2018. + * [Chou20] Stephen Chou, Fredrik Berg Kjolstad, and Saman Amarasinghe. + Automatic Generation of Efficient Sparse Tensor Format Conversion Routines. + Proceedings of the 41st ACM SIGPLAN Conference on Programming Language + Design and Implementation, June, 2020. * [Gustavson72] Fred G. Gustavson. Some basic techniques for solving sparse systems of linear equations. In Sparse Matrices and Their Applications, pages 41–52. Plenum Press, New York, 1972. diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp --- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp +++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp @@ -33,16 +33,18 @@ "get", [](py::object cls, std::vector dimLevelTypes, - llvm::Optional dimOrdering, int pointerBitWidth, + llvm::Optional dimOrdering, + llvm::Optional higherOrdering, int pointerBitWidth, int indexBitWidth, MlirContext context) { return cls(mlirSparseTensorEncodingAttrGet( context, dimLevelTypes.size(), dimLevelTypes.data(), dimOrdering ? *dimOrdering : MlirAffineMap{nullptr}, + higherOrdering ? *higherOrdering : MlirAffineMap{nullptr}, pointerBitWidth, indexBitWidth)); }, py::arg("cls"), py::arg("dim_level_types"), py::arg("dim_ordering"), - py::arg("pointer_bit_width"), py::arg("index_bit_width"), - py::arg("context") = py::none(), + py::arg("higher_ordering"), py::arg("pointer_bit_width"), + py::arg("index_bit_width"), py::arg("context") = py::none(), "Gets a sparse_tensor.encoding from parameters.") .def_property_readonly( "dim_level_types", @@ -64,6 +66,15 @@ return {}; return ret; }) + .def_property_readonly( + "higher_ordering", + [](MlirAttribute self) -> llvm::Optional { + MlirAffineMap ret = + mlirSparseTensorEncodingAttrGetHigherOrdering(self); + if (mlirAffineMapIsNull(ret)) + return {}; + return ret; + }) .def_property_readonly( "pointer_bit_width", [](MlirAttribute self) { diff --git a/mlir/lib/CAPI/Dialect/SparseTensor.cpp b/mlir/lib/CAPI/Dialect/SparseTensor.cpp --- a/mlir/lib/CAPI/Dialect/SparseTensor.cpp +++ b/mlir/lib/CAPI/Dialect/SparseTensor.cpp @@ -56,21 +56,28 @@ MlirAttribute mlirSparseTensorEncodingAttrGet( MlirContext ctx, intptr_t numDimLevelTypes, MlirSparseTensorDimLevelType const *dimLevelTypes, - MlirAffineMap dimOrdering, int pointerBitWidth, int indexBitWidth) { + MlirAffineMap dimOrdering, MlirAffineMap higherOrdering, + int pointerBitWidth, int indexBitWidth) { SmallVector cppDimLevelTypes; cppDimLevelTypes.resize(numDimLevelTypes); for (intptr_t i = 0; i < numDimLevelTypes; ++i) cppDimLevelTypes[i] = static_cast(dimLevelTypes[i]); - return wrap(SparseTensorEncodingAttr::get(unwrap(ctx), cppDimLevelTypes, - unwrap(dimOrdering), - pointerBitWidth, indexBitWidth)); + return wrap(SparseTensorEncodingAttr::get( + unwrap(ctx), cppDimLevelTypes, unwrap(dimOrdering), + unwrap(higherOrdering), pointerBitWidth, indexBitWidth)); } MlirAffineMap mlirSparseTensorEncodingAttrGetDimOrdering(MlirAttribute attr) { return wrap(unwrap(attr).cast().getDimOrdering()); } +MlirAffineMap +mlirSparseTensorEncodingAttrGetHigherOrdering(MlirAttribute attr) { + return wrap( + unwrap(attr).cast().getHigherOrdering()); +} + intptr_t mlirSparseTensorEncodingGetNumDimLevelTypes(MlirAttribute attr) { return unwrap(attr).cast().getDimLevelType().size(); } diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -51,6 +51,7 @@ // Process the data from the parsed dictionary value into struct-like data. SmallVector dlt; AffineMap dimOrd = {}; + AffineMap higherOrd = {}; unsigned ptr = 0; unsigned ind = 0; for (const NamedAttribute &attr : dict) { @@ -102,6 +103,14 @@ return {}; } dimOrd = affineAttr.getValue(); + } else if (attr.getName() == "higherOrdering") { + auto affineAttr = attr.getValue().dyn_cast(); + if (!affineAttr) { + parser.emitError(parser.getNameLoc(), + "expected an affine map for higher ordering"); + return {}; + } + higherOrd = affineAttr.getValue(); } else if (attr.getName() == "pointerBitWidth") { auto intAttr = attr.getValue().dyn_cast(); if (!intAttr) { @@ -125,8 +134,8 @@ } } // Construct struct-like storage for attribute. - return parser.getChecked(parser.getContext(), dlt, - dimOrd, ptr, ind); + return parser.getChecked( + parser.getContext(), dlt, dimOrd, higherOrd, ptr, ind); } void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { @@ -169,6 +178,8 @@ // Print remaining members only for non-default values. if (getDimOrdering() && !getDimOrdering().isIdentity()) printer << ", dimOrdering = affine_map<" << getDimOrdering() << ">"; + if (getHigherOrdering()) + printer << ", higherOrdering = affine_map<" << getHigherOrdering() << ">"; if (getPointerBitWidth()) printer << ", pointerBitWidth = " << getPointerBitWidth(); if (getIndexBitWidth()) @@ -179,7 +190,8 @@ LogicalResult SparseTensorEncodingAttr::verify( function_ref emitError, ArrayRef dimLevelType, AffineMap dimOrdering, - unsigned pointerBitWidth, unsigned indexBitWidth) { + AffineMap higherOrdering, unsigned pointerBitWidth, + unsigned indexBitWidth) { if (!acceptBitWidth(pointerBitWidth)) return emitError() << "unexpected pointer bitwidth: " << pointerBitWidth; if (!acceptBitWidth(indexBitWidth)) @@ -192,6 +204,15 @@ return emitError() << "unexpected mismatch in ordering and dimension " "level types size"; } + if (higherOrdering) { + if (higherOrdering.getNumDims() >= higherOrdering.getNumResults()) + return emitError() << "unexpected higher ordering mapping from " + << higherOrdering.getNumDims() << " to " + << higherOrdering.getNumResults(); + if (higherOrdering.getNumResults() != dimLevelType.size()) + return emitError() << "unexpected mismatch in higher ordering and " + "dimension level types size"; + } return success(); } @@ -200,13 +221,23 @@ function_ref emitError) const { // Check structural integrity. if (failed(verify(emitError, getDimLevelType(), getDimOrdering(), - getPointerBitWidth(), getIndexBitWidth()))) + getHigherOrdering(), getPointerBitWidth(), + getIndexBitWidth()))) return failure(); // Check integrity with tensor type specifics. Dimension ordering is optional, // but we always should have dimension level types for the full rank. unsigned size = shape.size(); if (size == 0) return emitError() << "expected non-scalar sparse tensor"; + if (getHigherOrdering()) { + if (getHigherOrdering().getNumDims() != size) + return emitError() << "expected an affine map of size " << size + << " for higher ordering"; + + // TODO: verification of higher ordering contents + + size = getHigherOrdering().getNumResults(); // higher-order size! + } if (getDimOrdering() && getDimOrdering().getNumResults() != size) return emitError() << "expected an affine map of size " << size << " for dimension ordering"; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -525,7 +525,7 @@ "reshape should not change element type"); // Start an iterator over the source tensor (in original index order). auto noPerm = SparseTensorEncodingAttr::get( - op->getContext(), encSrc.getDimLevelType(), AffineMap(), + op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); SmallVector srcSizes; SmallVector params; @@ -595,7 +595,7 @@ // Start an iterator over the tensor (in original index order). auto noPerm = SparseTensorEncodingAttr::get( - rewriter.getContext(), enc.getDimLevelType(), AffineMap(), + rewriter.getContext(), enc.getDimLevelType(), AffineMap(), AffineMap(), enc.getPointerBitWidth(), enc.getIndexBitWidth()); SmallVector sizes; SmallVector params; @@ -857,7 +857,8 @@ // the correct sparsity information to either of them. auto enc = SparseTensorEncodingAttr::get( op->getContext(), encDst.getDimLevelType(), encDst.getDimOrdering(), - encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); + encDst.getHigherOrdering(), encSrc.getPointerBitWidth(), + encSrc.getIndexBitWidth()); newParams(rewriter, params, loc, stp, enc, Action::kToCOO, sizes, src); Value coo = genNewCall(rewriter, loc, params); params[3] = constantPointerTypeEncoding(rewriter, loc, encDst); @@ -889,7 +890,8 @@ op->getContext(), SmallVector( rank, SparseTensorEncodingAttr::DimLevelType::Dense), - AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); + AffineMap(), AffineMap(), encSrc.getPointerBitWidth(), + encSrc.getIndexBitWidth()); SmallVector sizes; SmallVector params; sizesFromPtr(rewriter, sizes, loc, encSrc, srcTensorTp, src); @@ -1373,7 +1375,7 @@ SmallVector params; sizesFromPtr(rewriter, sizes, loc, encSrc, srcType, src); auto enc = SparseTensorEncodingAttr::get( - op->getContext(), encSrc.getDimLevelType(), AffineMap(), + op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); newParams(rewriter, params, loc, srcType, enc, Action::kToCOO, sizes, src); Value coo = genNewCall(rewriter, loc, params); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -133,7 +133,8 @@ // largest one among them) in the original operation instead of using the // default value. auto enc = SparseTensorEncodingAttr::get( - ctx, dims, AffineMap::getMultiDimIdentityMap(rank, ctx), 0, 0); + ctx, dims, AffineMap::getMultiDimIdentityMap(rank, ctx), + AffineMap(), 0, 0); return RankedTensorType::get(src.getShape(), src.getElementType(), enc); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -1938,7 +1938,9 @@ op->getContext(), srcEnc.getDimLevelType(), permute(getContext(), op.getMatchingIndexingMap(t), topSort), // new order - srcEnc.getPointerBitWidth(), srcEnc.getIndexBitWidth()); + srcEnc.getHigherOrdering(), + srcEnc.getPointerBitWidth(), + srcEnc.getIndexBitWidth()); auto dstTp = RankedTensorType::get(srcTp.getShape(), srcTp.getElementType(), dstEnc); auto convert = rewriter.create(tval.getLoc(), dstTp, tval); diff --git a/mlir/test/CAPI/sparse_tensor.c b/mlir/test/CAPI/sparse_tensor.c --- a/mlir/test/CAPI/sparse_tensor.c +++ b/mlir/test/CAPI/sparse_tensor.c @@ -27,6 +27,7 @@ "#sparse_tensor.encoding<{ " "dimLevelType = [ \"dense\", \"compressed\", \"compressed\"], " "dimOrdering = affine_map<(d0, d1, d2) -> (d0, d1, d2)>, " + "higherOrdering = affine_map<(d0, d1)[s0] -> (s0, d0, d1)>, " "pointerBitWidth = 32, indexBitWidth = 64 }>"; // clang-format on MlirAttribute originalAttr = @@ -38,6 +39,10 @@ mlirSparseTensorEncodingAttrGetDimOrdering(originalAttr); // CHECK: (d0, d1, d2) -> (d0, d1, d2) mlirAffineMapDump(dimOrdering); + MlirAffineMap higherOrdering = + mlirSparseTensorEncodingAttrGetHigherOrdering(originalAttr); + // CHECK: (d0, d1)[s0] -> (s0, d0, d1) + mlirAffineMapDump(higherOrdering); // CHECK: level_type: 0 // CHECK: level_type: 1 // CHECK: level_type: 1 @@ -59,8 +64,8 @@ fprintf(stderr, "index: %d\n", indexBitWidth); MlirAttribute newAttr = mlirSparseTensorEncodingAttrGet( - ctx, numLevelTypes, levelTypes, dimOrdering, pointerBitWidth, - indexBitWidth); + ctx, numLevelTypes, levelTypes, dimOrdering, higherOrdering, + pointerBitWidth, indexBitWidth); mlirAttributeDump(newAttr); // For debugging filecheck output. // CHECK: equal: 1 fprintf(stderr, "equal: %d\n", mlirAttributeEqual(originalAttr, newAttr)); diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir @@ -6,7 +6,7 @@ // ----- #a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> -func.func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}} +func.func private @tensor_dimlevel_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}} // ----- @@ -26,7 +26,12 @@ // ----- #a = #sparse_tensor.encoding<{dimOrdering = "wrong"}> // expected-error {{expected an affine map for dimension ordering}} -func.func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> () +func.func private @tensor_dimorder_mismatch(%arg0: tensor<8xi32, #a>) -> () + +// ----- + +#a = #sparse_tensor.encoding<{higherOrdering = "wrong"}> // expected-error {{expected an affine map for higher ordering}} +func.func private @tensor_highorder_mismatch(%arg0: tensor<8xi32, #a>) -> () // ----- @@ -57,3 +62,9 @@ #a = #sparse_tensor.encoding<{key = 1}> // expected-error {{unexpected key: key}} func.func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> () + +// ----- + +#a = #sparse_tensor.encoding<{dimLevelType = [ "compressed", "compressed", "dense", "dense" ], dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, higherOrdering = affine_map<(i, j) -> (j, i)>}> // expected-error {{unexpected higher ordering mapping from 2 to 2}} +func.func private @tensor_invalid_key(%arg0: tensor<10x60xf32, #a>) -> () + diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -62,3 +62,29 @@ // CHECK-LABEL: func private @sparse_sorted_coo( // CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>) func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>) + +// ----- + +#BCSR = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed", "compressed", "dense", "dense" ], + dimOrdering = affine_map<(ii, jj, i, j) -> (ii, jj, i, j)>, + higherOrdering = affine_map<(i, j) -> (i floordiv 2, j floordiv 3, i mod 2, j mod 3)> +}> + +// CHECK-LABEL: func private @sparse_bcsr( +// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "dense", "dense" ], higherOrdering = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> +func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>) + + +// ----- + +#ELL = #sparse_tensor.encoding<{ + dimLevelType = [ "dense", "dense", "compressed" ], + dimOrdering = affine_map<(ii, i, j) -> (ii, i, j)>, + higherOrdering = affine_map<(i,j)[c] -> (c*4*i, i, j)> +}> + +// CHECK-LABEL: func private @sparse_ell( +// CHECK-SAME: tensor (d0 * (s0 * 4), d0, d1)> }>> +func.func private @sparse_ell(tensor) + diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py @@ -148,7 +148,8 @@ for vec in vec_strategy: for e in [True]: vl = 1 if vec == 0 else 16 - attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth) + attr = st.EncodingAttr.get(level, ordering, None, pwidth, + iwidth) opt = (f'parallelization-strategy=none ' f'vectorization-strategy={vec} ' f'vl={vl} enable-simd-index32={e}') diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py @@ -141,7 +141,7 @@ for ordering in orderings: for pwidth in bitwidths: for iwidth in bitwidths: - attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth) + attr = st.EncodingAttr.get(level, ordering, None, pwidth, iwidth) build_compile_and_run_SpMM(attr, compiler) count = count + 1 # CHECK: Passed 8 tests diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py @@ -89,7 +89,7 @@ for level in levels: for ordering in orderings: for bwidth in bitwidths: - attr = st.EncodingAttr.get(level, ordering, bwidth, bwidth) + attr = st.EncodingAttr.get(level, ordering, None, bwidth, bwidth) build_compile_and_run_output(attr, compiler) count = count + 1 diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py @@ -216,7 +216,7 @@ for ordering in orderings: for pwidth in bitwidths: for iwidth in bitwidths: - attr = st.EncodingAttr.get(level, ordering, pwidth, iwidth) + attr = st.EncodingAttr.get(level, ordering, None, pwidth, iwidth) types.append(ir.RankedTensorType.get(shape, f64, attr)) # # For exhaustiveness we should have one or more StressTest, such diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py @@ -366,7 +366,8 @@ mlir_storage_format = [f.value for f in self.format_pack.formats] return sparse_tensor.EncodingAttr.get(mlir_storage_format, ir.AffineMap.get_permutation(order), - _POINTER_BIT_WIDTH, _INDEX_BIT_WIDTH) + None, _POINTER_BIT_WIDTH, + _INDEX_BIT_WIDTH) def _make_format(formats: List[ModeFormat], diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py --- a/mlir/test/python/dialects/sparse_tensor/dialect.py +++ b/mlir/test/python/dialects/sparse_tensor/dialect.py @@ -34,7 +34,7 @@ # CHECK: index_bit_width: 32 print(f"index_bit_width: {casted.index_bit_width}") - created = st.EncodingAttr.get(casted.dim_level_types, None, 0, 0) + created = st.EncodingAttr.get(casted.dim_level_types, None, None, 0, 0) # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> print(created) # CHECK: created_equal: False @@ -74,7 +74,7 @@ print(f"index_bit_width: {casted.index_bit_width}") created = st.EncodingAttr.get(casted.dim_level_types, casted.dim_ordering, - 8, 32) + casted.higher_ordering, 8, 32) # CHECK: #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 8, indexBitWidth = 32 }> print(created) # CHECK: created_equal: True