diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -207,27 +207,27 @@
case DimLevelType::Compressed:
return "compressed";
case DimLevelType::CompressedNu:
- return "compressed-nu";
+ return "compressed_nu";
case DimLevelType::CompressedNo:
- return "compressed-no";
+ return "compressed_no";
case DimLevelType::CompressedNuNo:
- return "compressed-nu-no";
+ return "compressed_nu_no";
case DimLevelType::Singleton:
return "singleton";
case DimLevelType::SingletonNu:
- return "singleton-nu";
+ return "singleton_nu";
case DimLevelType::SingletonNo:
- return "singleton-no";
+ return "singleton_no";
case DimLevelType::SingletonNuNo:
- return "singleton-nu-no";
+ return "singleton_nu_no";
case DimLevelType::CompressedWithHi:
- return "compressed-hi";
+ return "compressed_hi";
case DimLevelType::CompressedWithHiNu:
- return "compressed-hi-nu";
+ return "compressed_hi_nu";
case DimLevelType::CompressedWithHiNo:
- return "compressed-hi-no";
+ return "compressed_hi_no";
case DimLevelType::CompressedWithHiNuNo:
- return "compressed-hi-nu-no";
+ return "compressed_hi_nu_no";
case DimLevelType::TwoOutOfFour:
return "compressed24";
}
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -200,7 +200,7 @@
// Sorted Coordinate Scheme.
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
... tensor ...
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -1200,7 +1200,7 @@
uint64_t trailCOOLen = 0, parentSz = 1, bufIdx = 0;
for (uint64_t l = 0; l < lvlRank; l++) {
if (!isUniqueLvl(l) && isCompressedLvl(l)) {
- // A `compressed-nu` level marks the start of trailing COO start level.
+ // A `compressed_nu` level marks the start of trailing COO start level.
// Since the coordinate buffer used for trailing COO are passed in as AoS
// scheme, and SparseTensorStorage uses a SoA scheme, we can not simply
// copy the value from the provided buffers.
@@ -1208,7 +1208,7 @@
break;
}
assert(!isSingletonLvl(l) &&
- "Singleton level not following a compressed-nu level");
+ "Singleton level not following a compressed_nu level");
if (isCompressedLvl(l)) {
P *posPtr = reinterpret_cast
(lvlBufs[bufIdx++]);
C *crdPtr = reinterpret_cast(lvlBufs[bufIdx++]);
diff --git a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
--- a/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
+++ b/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
@@ -21,19 +21,19 @@
.value("dense", MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE)
.value("compressed24", MLIR_SPARSE_TENSOR_DIM_LEVEL_TWO_OUT_OF_FOUR)
.value("compressed", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED)
- .value("compressed-nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU)
- .value("compressed-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO)
- .value("compressed-nu-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO)
+ .value("compressed_nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU)
+ .value("compressed_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NO)
+ .value("compressed_nu_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_NU_NO)
.value("singleton", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON)
- .value("singleton-nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU)
- .value("singleton-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO)
- .value("singleton-nu-no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO)
- .value("compressed-hi", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI)
- .value("compressed-hi-nu",
+ .value("singleton_nu", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU)
+ .value("singleton_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NO)
+ .value("singleton_nu_no", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON_NU_NO)
+ .value("compressed_hi", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI)
+ .value("compressed_hi_nu",
MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU)
- .value("compressed-hi-no",
+ .value("compressed_hi_no",
MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NO)
- .value("compressed-hi-nu-no",
+ .value("compressed_hi_nu_no",
MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED_WITH_HI_NU_NO);
mlir_attribute_subclass(m, "EncodingAttr",
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -821,7 +821,7 @@
//===----------------------------------------------------------------------===//
/// We normalized sparse tensor encoding attribute by always using
-/// ordered/unique DLT such that "compressed-nu-no" and "compressed-nu" (as well
+/// ordered/unique DLT such that "compressed_nu_no" and "compressed_nu" (as well
/// as other variants) lead to the same storage specifier type, and stripping
/// irrelevant fields that do not alter the sparse tensor memory layout.
static SparseTensorEncodingAttr
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1183,7 +1183,7 @@
ConversionPatternRewriter &rewriter) const override {
// Query memSizes for the actually stored values.
// FIXME: the nse value computed in this way might be wrong when there is
- // any "compressed-hi" level.
+ // any "compressed_hi" level.
rewriter.replaceOp(
op, genValMemSize(rewriter, op.getLoc(), adaptor.getTensor()));
return success();
diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
--- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
+++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir
@@ -2,7 +2,7 @@
// RUN: --sparsification="enable-gpu-libgen" | FileCheck %s
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
module {
diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -27,7 +27,7 @@
}>
#UCSR = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-no" ]
+ lvlTypes = [ "dense", "compressed_no" ]
}>
#CSC = #sparse_tensor.encoding<{
@@ -47,16 +47,16 @@
}>
#Coo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CooPNo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-no" ],
+ lvlTypes = [ "compressed_nu", "singleton_no" ],
dimToLvl = affine_map<(i, j) -> (j, i)>
}>
#ccoo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed", "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed", "compressed_nu", "singleton" ]
}>
// CHECK-LABEL: func @sparse_nop(
@@ -523,7 +523,7 @@
return %1 : tensor<8x8xf64, #CSR>
}
-// CHECK-LABEL: func.func private @"_insert_dense_compressed-no_8_8_f64_0_0"(
+// CHECK-LABEL: func.func private @_insert_dense_compressed_no_8_8_f64_0_0(
// CHECK-SAME: %[[A1:.*0]]: memref,
// CHECK-SAME: %[[A2:.*1]]: memref,
// CHECK-SAME: %[[A3:.*2]]: memref,
@@ -549,7 +549,7 @@
// CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref, memref, memref, !sparse_tensor.storage_specifier
// CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref
// CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref
-// CHECK: %[[A21:.*]]:4 = func.call @"_insert_dense_compressed-no_8_8_f64_0_0"(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref, memref, memref, !sparse_tensor.storage_specifier
+// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_no_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref, memref, memref, !sparse_tensor.storage_specifier
// CHECK: memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref
// CHECK: memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref
// CHECK: scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref, memref, memref, !sparse_tensor.storage_specifier
@@ -627,7 +627,7 @@
return %1 : tensor<128xf64, #SparseVector>
}
-// CHECK-LABEL: func.func private @"_insert_compressed-nu_singleton_5_6_f64_0_0"(
+// CHECK-LABEL: func.func private @_insert_compressed_nu_singleton_5_6_f64_0_0(
// CHECK-SAME: %[[A1:.*0]]: memref,
// CHECK-SAME: %[[A2:.*1]]: memref,
// CHECK-SAME: %[[A3:.*2]]: memref,
@@ -643,7 +643,7 @@
// CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier
// CHECK-SAME: %[[A4:.*4]]: index,
// CHECK-SAME: %[[A5:.*5]]: f64)
-// CHECK: %[[R:.*]]:4 = call @"_insert_compressed-nu_singleton_5_6_f64_0_0"(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
+// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nu_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
%0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>
diff --git a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
--- a/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s
#CSR = #sparse_tensor.encoding<{ lvlTypes = ["dense", "compressed"]}>
-#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed-nu", "singleton"]}>
+#COO = #sparse_tensor.encoding<{ lvlTypes = ["compressed_nu", "singleton"]}>
// CHECK-LABEL: func.func @sparse_alloc_copy_CSR(
// CHECK-SAME: %[[VAL_0:.*0]]: memref,
diff --git a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
--- a/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_sparse2sparse.mlir
@@ -26,11 +26,11 @@
}>
#SortedCOO2D = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
}>
#SortedCOO3D = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
}>
@@ -40,7 +40,7 @@
}>
#COOSlice = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (2, 2, 1), (12, 13, 1) ]
}>
diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir
--- a/mlir/test/Dialect/SparseTensor/invalid.mlir
+++ b/mlir/test/Dialect/SparseTensor/invalid.mlir
@@ -32,7 +32,7 @@
// -----
-#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"], posWidth=32, crdWidth=32}>
+#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>)
-> tensor<100x2xf64, #SparseVector> {
@@ -68,7 +68,7 @@
// -----
-#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"], posWidth=32, crdWidth=32}>
+#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"], posWidth=32, crdWidth=32}>
func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) {
// expected-error@+1 {{input/output trailing COO level-ranks don't match}}
@@ -270,7 +270,7 @@
// -----
-#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"]}>
+#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> index {
// expected-error@+1 {{requested position memory size on a singleton level}}
diff --git a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
--- a/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
+++ b/mlir/test/Dialect/SparseTensor/pre_rewriting.mlir
@@ -5,7 +5,7 @@
}>
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#DCSR = #sparse_tensor.encoding<{
@@ -13,7 +13,7 @@
}>
#Slice = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (?, 1, 1), (?, 3, 1) ]
}>
diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
--- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir
@@ -11,12 +11,12 @@
}>
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
// CHECK-LABEL: func.func @sparse_new(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> {
-// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor>
+// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor>
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
// CHECK: bufferization.dealloc_tensor %[[COO]]
// CHECK: return %[[R]]
@@ -27,7 +27,7 @@
// CHECK-LABEL: func.func @sparse_new_csc(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor (d1, d0)> }>> {
-// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor (d1, d0)> }>>
+// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor (d1, d0)> }>>
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
// CHECK: bufferization.dealloc_tensor %[[COO]]
// CHECK: return %[[R]]
@@ -37,8 +37,8 @@
}
// CHECK-LABEL: func.func @sparse_new_coo(
-// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> {
-// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor>
+// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> {
+// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor>
// CHECK: return %[[COO]]
func.func @sparse_new_coo(%arg0: !llvm.ptr) -> tensor {
%0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
--- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir
@@ -103,7 +103,7 @@
// -----
-#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed-nu", "singleton"]}>
+#COO = #sparse_tensor.encoding<{lvlTypes = ["compressed_nu", "singleton"]}>
// CHECK-LABEL: func @sparse_indices_buffer(
// CHECK-SAME: %[[A:.*]]: tensor)
diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
--- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
+++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
@@ -46,31 +46,31 @@
// -----
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu-no", "singleton-no" ]
+ lvlTypes = [ "compressed_nu_no", "singleton_no" ]
}>
// CHECK-LABEL: func private @sparse_coo(
-// CHECK-SAME: tensor>)
+// CHECK-SAME: tensor>)
func.func private @sparse_coo(tensor)
// -----
#BCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ]
+ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]
}>
// CHECK-LABEL: func private @sparse_bcoo(
-// CHECK-SAME: tensor>)
+// CHECK-SAME: tensor>)
func.func private @sparse_bcoo(tensor)
// -----
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
// CHECK-LABEL: func private @sparse_sorted_coo(
-// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>)
+// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>)
func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
// -----
diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
--- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
+++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -sparsification --canonicalize | FileCheck %s
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#trait_scale = {
@@ -37,14 +37,14 @@
//
// CHECK-LABEL: func.func @sparse_scale(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor>) -> tensor> {
+// CHECK-SAME: %[[VAL_0:.*]]: tensor>) -> tensor> {
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2.000000e+00 : f32
-// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref
-// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref>
-// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref
+// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor> to memref
+// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor> to memref>
+// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor> to memref
// CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref
// CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref
// CHECK: %[[VAL_10:.*]] = scf.while (%[[VAL_11:.*]] = %[[VAL_8]]) : (index) -> index {
@@ -75,8 +75,8 @@
// CHECK: } {"Emitted from" = "linalg.generic"}
// CHECK: scf.yield %[[VAL_28:.*]] : index
// CHECK: } attributes {"Emitted from" = "linalg.generic"}
-// CHECK: %[[VAL_29:.*]] = sparse_tensor.load %[[VAL_0]] : tensor>
-// CHECK: return %[[VAL_29]] : tensor>
+// CHECK: %[[VAL_29:.*]] = sparse_tensor.load %[[VAL_0]] : tensor>
+// CHECK: return %[[VAL_29]] : tensor>
// CHECK: }
func.func @sparse_scale(%argx: tensor) -> tensor {
%c = arith.constant 2.0 : f32
@@ -90,16 +90,16 @@
}
// CHECK-LABEL: func.func @matvec(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>,
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<64xf64>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref
-// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref>
-// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref>
-// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref
+// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref
+// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref>
+// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref>
+// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref
@@ -155,21 +155,21 @@
}
// CHECK-LABEL: func.func @mateltmul(
-// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>,
-// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>>,
+// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>,
+// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>,
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x64xf64>) -> tensor<32x64xf64> {
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f64
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
-// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref
-// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref>
-// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref>
-// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref
-// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref
-// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref>
-// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref>
-// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] }>> to memref
+// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref
+// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref>
+// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref>
+// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref
+// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref
+// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref>
+// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref>
+// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> to memref
// CHECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64>
// CHECK: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_15]] : memref<32x64xf64>)
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref
diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
--- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -1050,7 +1050,7 @@
}
#BatchedVector = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi" ],
+ lvlTypes = [ "dense", "compressed_hi" ],
}>
// CHECK-LABEL: func.func @sub_ss_batched(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<2x3xf64, #{{.*}}>>,
diff --git a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
--- a/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_foreach.mlir
@@ -141,7 +141,7 @@
}
#BCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ],
+ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ],
}>
// CHECK-LABEL: func.func @foreach_bcoo(
diff --git a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir
--- a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s --canonicalize --post-sparsification-rewrite="enable-runtime-library=false" --sparse-tensor-codegen -cse --canonicalize | FileCheck %s
#COO = #sparse_tensor.encoding<{
- lvlTypes = ["compressed-nu", "singleton"],
+ lvlTypes = ["compressed_nu", "singleton"],
crdWidth=32
}>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
--- a/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_reshape_dot.mlir
@@ -1,12 +1,12 @@
// RUN: mlir-opt %s --linalg-generalize-named-ops --sparsification --cse --canonicalize | FileCheck %s
-#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
-#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
// CHECK-LABEL: func.func @sparse_reshape_fused(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<5x6xf32>,
-// CHECK-SAME: %[[VAL_1:.*]]: tensor<6x2x3xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>>) -> tensor {
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<6x2x3xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>>) -> tensor {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant false
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 5 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index
diff --git a/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir b/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
--- a/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
+++ b/mlir/test/Dialect/SparseTensor/unsparsifiable_dense_op.mlir
@@ -15,7 +15,7 @@
}
#VEC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 32, crdWidth = 32 }>
-#COO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
#CCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }>
//
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
@@ -31,8 +31,8 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %}
-#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
-#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
+#COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ], posWidth = 32, crdWidth = 32 }>
module {
func.func private @printMemref3dF32(%ptr : tensor) attributes { llvm.emit_c_interface }
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir
@@ -44,11 +44,11 @@
}>
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOPerm = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimToLvl = affine_map<(i,j) -> (j,i)>
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir
@@ -29,7 +29,7 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
#Tensor1 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
}>
#Tensor2 = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir
@@ -31,7 +31,7 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir
@@ -38,16 +38,16 @@
}>
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#COO_SLICE = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (1, 4, 1), (1, 4, 2) ]
}>
#COO_SLICE_DYN = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (?, ?, ?), (?, ?, ?) ]
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir
@@ -32,7 +32,7 @@
}>
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir
@@ -36,11 +36,11 @@
}>
#CCoo = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed", "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed", "compressed_nu", "singleton" ]
}>
#DCoo = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-nu", "singleton" ]
+ lvlTypes = [ "dense", "compressed_nu", "singleton" ]
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir
@@ -41,7 +41,7 @@
}>
#COO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR_SLICE_1 = #sparse_tensor.encoding<{
@@ -55,12 +55,12 @@
}>
#COO_SLICE_1 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (0, 4, 2), (0, 4, 1) ]
}>
#COO_SLICE_2 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimSlices = [ (0, 4, 2), (1, 4, 1) ]
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -27,11 +27,11 @@
// TODO: support sparse_tensor.unpack on libgen path.
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOI32 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
posWidth = 32,
crdWidth = 32
}>
@@ -43,7 +43,7 @@
}>
#BCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ]
+ lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]
}>
module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_libgen.mlir
@@ -27,11 +27,11 @@
// after sparse_tensor.unpack is supported on libgen path.
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOI32 = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
posWidth = 32,
crdWidth = 32
}>
@@ -42,9 +42,9 @@
crdWidth = 32
}>
-// TODO: "compressed-hi" is not supported by libgen path.
+// TODO: "compressed_hi" is not supported by libgen path.
// #BCOO = #sparse_tensor.encoding<{
-// lvlTypes = [ "dense", "compressed-hi-nu", "singleton" ]
+// lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]
//}>
module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
@@ -35,20 +35,20 @@
!Filename = !llvm.ptr
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#SortedCOOPermuted = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton" ],
dimToLvl = affine_map<(i,j) -> (j,i)>
}>
#SortedCOO3D = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ]
}>
#SortedCOO3DPermuted = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ],
+ lvlTypes = [ "compressed_nu", "singleton_nu", "singleton" ],
dimToLvl = affine_map<(i,j,k) -> (k,i,j)>
}>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir
@@ -32,7 +32,7 @@
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
module {
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matmul-lib.mlir
@@ -25,7 +25,7 @@
// RUNNOT: %{compile} enable-runtime-library=false gpu-data-transfer-strategy=zero-copy" | %{run}
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir
@@ -25,7 +25,7 @@
//
#SortedCOO = #sparse_tensor.encoding<{
- lvlTypes = [ "compressed-nu", "singleton" ]
+ lvlTypes = [ "compressed_nu", "singleton" ]
}>
#CSR = #sparse_tensor.encoding<{