diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification="enable-gpu-libgen" | FileCheck %s #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> module { diff --git a/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s --post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" \ // RUN: --cse --canonicalize | FileCheck %s -#SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> +#SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> // CHECK: func.func @sparse_reshape( // CHECK-SAME: %[[S:.*]]: @@ -35,11 +35,11 @@ // CHECK: scf.yield %[[RET_1]] // CHECK: } // CHECK: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts -// CHECK: return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>> +// CHECK: return %[[NT1]] : tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> // func.func @sparse_reshape(%arg0: tensor<4x25xf64, #SparseMatrix>) -> tensor<10x10xf64, #SparseMatrix> { %shape = arith.constant dense <[ 10, 10 ]> : tensor<2xi32> %0 = tensor.reshape %arg0(%shape) : (tensor<4x25xf64, #SparseMatrix>, tensor<2xi32>) -> tensor<10x10xf64, #SparseMatrix> return %0 : tensor<10x10xf64, #SparseMatrix> -} \ No newline at end of file +} diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-matvec-lib.mlir @@ -14,11 +14,11 @@ // TODO: without RT lib (AoS COO): #SortedCOO = #sparse_tensor.encoding<{ - dimLevelType = [ "compressed-nu", "singleton" ] + lvlTypes = [ "compressed-nu", "singleton" ] }> #CSR = #sparse_tensor.encoding<{ - dimLevelType = [ "dense", "compressed" ], + lvlTypes = [ "dense", "compressed" ], posWidth = 32, crdWidth = 32 }>