diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -451,7 +451,7 @@ StridedMemRefRankOf<[Index],[1]>:$added, Index:$count, AnySparseTensor:$tensor, - Variadic:$indices)>, + Variadic:$indices)>, Results<(outs AnySparseTensor:$result)> { string summary = "Compressed an access pattern for insertion"; string description = [{ @@ -477,8 +477,8 @@ }]; let assemblyFormat = "$values `,` $filled `,` $added `,` $count" " `into` $tensor `[` $indices `]` attr-dict" - " `:` type($values) `,` type($filled) `,` type($added)" - " `,` type($tensor)"; + " `:` type($values) `,` type($filled) `,` type($added)" + " `,` type($tensor)"; let hasVerifier = 1; } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir @@ -83,8 +83,8 @@ %v2 = arith.constant sparse< [ [0], [3], [5], [11], [13], [17], [18], [21], [31] ], [ -2147483648, -2147483647, -1000, -1, 0, - 1, 1000, 2147483646, 2147483647 - ] + 1, 1000, 2147483646, 2147483647 + ] > : tensor<32xi32> %sv1 = sparse_tensor.convert %v1 : tensor<32xf64> to tensor diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -48,7 +48,7 @@ // func.func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>, %argx: tensor<7x3xf64>) - -> tensor<7x3xf64> { + -> tensor<7x3xf64> { %0 = linalg.generic #trait_flatten ins(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>) outs(%argx: tensor<7x3xf64>) { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -55,7 +55,7 @@ func.func @kernel_matvec(%arga: tensor, %argb: tensor, %argx: tensor) - -> tensor { + -> tensor { %0 = linalg.generic #matvec ins(%arga, %argb: tensor, tensor) outs(%argx: tensor) { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -49,7 +49,7 @@ %argc: tensor, %argd: tensor, %arga: tensor) - -> tensor { + -> tensor { %0 = linalg.generic #mttkrp ins(%argb, %argc, %argd: tensor, tensor, tensor) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -36,7 +36,7 @@ module { func.func @redsum(%arga: tensor, %argb: tensor) - -> tensor { + -> tensor { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %d0 = tensor.dim %arga, %c0 : tensor diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -59,7 +59,7 @@ %0 = call @quantized_matmul(%input1, %sparse_input2, %output) : (tensor<5x3xi8>, tensor<3x6xi8, #DCSR>, - tensor<5x6xi32>) -> tensor<5x6xi32> + tensor<5x6xi32>) -> tensor<5x6xi32> // // Verify the output.