diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -244,7 +244,7 @@ // offset = 0, size = 8, and a dynamic stride on the second dimension). #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 1), (0, 8, ?) ] + dimSlices = [ (0, 4, 1), (0, 8, ?) ] }> ... tensor ... @@ -266,9 +266,6 @@ // The required bitwidth for coordinate storage. "unsigned":$crdWidth, // A slice attribute for each dimension of the tensor type. - // FIXME: The name used here is `dimSlices`, however the - // parser/printer uses the name `slice` instead. Therefore - // the parser/printer need to be updated to match. ArrayRefParameter< "::mlir::sparse_tensor::SparseTensorDimSliceAttr", "per dimension slice metadata" diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -408,7 +408,7 @@ // Process the data from the parsed dictionary value into struct-like data. SmallVector lvlTypes; - SmallVector slices; + SmallVector dimSlices; AffineMap dimToLvl = {}; unsigned posWidth = 0; unsigned crdWidth = 0; @@ -416,7 +416,7 @@ StringRef attrName; // Exactly 6 keys. SmallVector keys = {"lvlTypes", "dimToLvl", "posWidth", - "crdWidth", "slice"}; + "crdWidth", "dimSlices"}; while (succeeded(parser.parseOptionalKeyword(&attrName))) { if (!llvm::is_contained(keys, attrName)) { parser.emitError(parser.getNameLoc(), "unexpected key: ") << attrName; @@ -464,13 +464,13 @@ auto intAttr = llvm::dyn_cast(attr); ERROR_IF(!intAttr, "expected an integral index bitwidth") crdWidth = intAttr.getInt(); - } else if (attrName == "slice") { + } else if (attrName == "dimSlices") { RETURN_ON_FAIL(parser.parseLSquare()) // Dispatches to DimSliceAttr to skip mnemonic bool finished = false; while (auto attr = SparseTensorDimSliceAttr::parse(parser, nullptr)) { auto sliceAttr = llvm::cast(attr); - slices.push_back(sliceAttr); + dimSlices.push_back(sliceAttr); if (parser.parseOptionalComma().failed()) { finished = true; break; @@ -494,7 +494,7 @@ // Construct struct-like storage for attribute. return parser.getChecked( - parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, slices); + parser.getContext(), lvlTypes, dimToLvl, posWidth, crdWidth, dimSlices); } void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { @@ -512,7 +512,7 @@ if (getCrdWidth()) printer << ", crdWidth = " << getCrdWidth(); if (!getDimSlices().empty()) { - printer << ", slice = [ "; + printer << ", dimSlices = [ "; llvm::interleaveComma(getDimSlices(), printer, [&](SparseTensorDimSliceAttr attr) { // Calls SparseTensorDimSliceAttr::print directly to diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir --- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir +++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir @@ -41,7 +41,7 @@ #COOSlice = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (2, 2, 1), (12, 13, 1) ] + dimSlices = [ (2, 2, 1), (12, 13, 1) ] }> // CHECK-LABEL: func @sparse_nop_convert( diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -202,7 +202,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> func.func @sparse_slice_offset(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index { @@ -215,7 +215,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> func.func @sparse_slice_stride(%arg0: tensor<2x8xf64, #CSR_SLICE>) -> index { @@ -401,7 +401,7 @@ #CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> func.func @sparse_convert_to_slice(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> { diff --git a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid_encoding.mlir @@ -66,6 +66,6 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}} + dimSlices = [ (-1, ?, 1), (?, 4, 2) ] // expected-error{{expect positive value or ? for slice offset/size/stride}} }> func.func private @sparse_slice(tensor) diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir --- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir @@ -10,7 +10,7 @@ #Slice = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (?, 1, 1), (?, 3, 1) ] + dimSlices = [ (?, 1, 1), (?, 3, 1) ] }> // CHECK-LABEL: func @sparse_nop_cast( diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -144,7 +144,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func @sparse_slice_offset( @@ -160,7 +160,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func @sparse_slice_stride( @@ -189,7 +189,7 @@ #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #SparseVector_Slice = #sparse_tensor.encoding<{ lvlTypes = ["compressed"], - slice = [ (?, ?, ?) ] + dimSlices = [ (?, ?, ?) ] }> // CHECK-LABEL: func @sparse_metadata_init( @@ -221,7 +221,7 @@ #SparseVector_Slice = #sparse_tensor.encoding<{ lvlTypes = ["compressed"], - slice = [ (?, ?, ?) ] + dimSlices = [ (?, ?, ?) ] }> // CHECK-LABEL: func @sparse_get_md( @@ -238,7 +238,7 @@ #SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"], - slice = [ (?, ?, ?) ] + dimSlices = [ (?, ?, ?) ] }> // CHECK-LABEL: func @sparse_get_md( diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -100,31 +100,31 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) // ----- #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) // ----- #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, ?, 1), (?, 4, 2) ] + dimSlices = [ (1, ?, 1), (?, 4, 2) ] }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor> func.func private @sparse_slice(tensor) diff --git a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_extract_slice.mlir @@ -6,7 +6,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 1), (0, 8, 1) ] + dimSlices = [ (0, 4, 1), (0, 8, 1) ] }> // CHECK-LABEL: func.func @sparse_slice( diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir @@ -30,12 +30,12 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (0, 4, 1), (2, 4, 1) ] + dimSlices = [ (0, 4, 1), (2, 4, 1) ] }> #CSR_SLICE_DYN = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (?, ?, ?), (?, ?, ?) ] + dimSlices = [ (?, ?, ?), (?, ?, ?) ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir @@ -16,12 +16,12 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> #CSR_SLICE_DYN = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (?, ?, ?), (?, ?, ?) ] + dimSlices = [ (?, ?, ?), (?, ?, ?) ] }> #COO = #sparse_tensor.encoding<{ @@ -30,12 +30,12 @@ #COO_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (1, 4, 1), (1, 4, 2) ] + dimSlices = [ (1, 4, 1), (1, 4, 2) ] }> #COO_SLICE_DYN = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (?, ?, ?), (?, ?, ?) ] + dimSlices = [ (?, ?, ?), (?, ?, ?) ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir @@ -16,7 +16,7 @@ #DCSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (0, 4, 1), (0, 8, 1) ] + dimSlices = [ (0, 4, 1), (0, 8, 1) ] }> #CSR = #sparse_tensor.encoding<{ @@ -25,7 +25,7 @@ #CSR_SLICE = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 1), (0, 8, 1) ] + dimSlices = [ (0, 4, 1), (0, 8, 1) ] }> #COO = #sparse_tensor.encoding<{ @@ -34,32 +34,32 @@ #CSR_SLICE_1 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (0, 4, 2), (0, 4, 1) ] + dimSlices = [ (0, 4, 2), (0, 4, 1) ] }> #DCSR_SLICE_1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (0, 4, 2), (1, 4, 1) ] + dimSlices = [ (0, 4, 2), (1, 4, 1) ] }> #COO_SLICE_1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (0, 4, 2), (0, 4, 1) ] + dimSlices = [ (0, 4, 2), (0, 4, 1) ] }> #COO_SLICE_2 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], - slice = [ (0, 4, 2), (1, 4, 1) ] + dimSlices = [ (0, 4, 2), (1, 4, 1) ] }> #CSR_SLICE_dyn = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], - slice = [ (?, 4, ?), (?, 4, ?) ] + dimSlices = [ (?, 4, ?), (?, 4, ?) ] }> #DCSR_SLICE_dyn = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], - slice = [ (?, 4, ?), (?, 4, ?) ] + dimSlices = [ (?, 4, ?), (?, 4, ?) ] }> module {