diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir @@ -9,6 +9,7 @@ // // TODO: support slices on lib path + #CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> @@ -23,6 +24,21 @@ slice = [ (?, ?, ?), (?, ?, ?) ] }> +#COO = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed-nu", "singleton" ] +}> + +#COO_SLICE = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed-nu", "singleton" ], + slice = [ (1, 4, 1), (1, 4, 2) ] +}> + +#COO_SLICE_DYN = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed-nu", "singleton" ], + slice = [ (?, ?, ?), (?, ?, ?) ] +}> + + module { func.func @foreach_print_non_slice(%A: tensor<4x4xf64, #CSR>) { @@ -55,6 +71,26 @@ return } + func.func @foreach_print_slice_coo(%A: tensor<4x4xf64, #COO_SLICE>) { + sparse_tensor.foreach in %A : tensor<4x4xf64, #COO_SLICE> do { + ^bb0(%1: index, %2: index, %v: f64) : + vector.print %1: index + vector.print %2: index + vector.print %v: f64 + } + return + } + + func.func @foreach_print_slice_coo_dyn(%A: tensor) { + sparse_tensor.foreach in %A : tensor do { + ^bb0(%1: index, %2: index, %v: f64) : + vector.print %1: index + vector.print %2: index + vector.print %v: f64 + } + return + } + func.func @entry() { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index @@ -76,6 +112,10 @@ %tmp = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #CSR> %a = tensor.extract_slice %tmp[1, 1][4, 4][1, 2] : tensor<8x8xf64, #CSR> to tensor<4x4xf64, #CSR_SLICE> + + %tmp_coo = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #COO> + %a_coo = tensor.extract_slice %tmp_coo[1, 1][4, 4][1, 2] : tensor<8x8xf64, #COO> to + tensor<4x4xf64, #COO_SLICE> // Foreach on sparse tensor slices directly // // CHECK: 1 @@ -89,6 +129,18 @@ // CHECK-NEXT: 2.1 // call @foreach_print_slice(%a) : (tensor<4x4xf64, #CSR_SLICE>) -> () + // Same results for COO + // CHECK-NEXT: 1 + // CHECK-NEXT: 0 + // CHECK-NEXT: 2.3 + // CHECK-NEXT: 2 + // CHECK-NEXT: 3 + // CHECK-NEXT: 1 + // CHECK-NEXT: 3 + // CHECK-NEXT: 2 + // CHECK-NEXT: 2.1 + // + call @foreach_print_slice_coo(%a_coo) : (tensor<4x4xf64, #COO_SLICE>) -> () %dense = tensor.extract_slice %sa[1, 1][4, 4][1, 2] : tensor<8x8xf64> to tensor<4x4xf64> @@ -111,8 +163,12 @@ // TODO: Investigates why reusing the same %tmp above would cause bufferization // errors. %tmp1 = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #CSR> - %a_dyn = tensor.extract_slice %tmp1[%c1, %c1][%c4, %c4][%c1, %c2] : - tensor<8x8xf64, #CSR> to tensor + %a_dyn = tensor.extract_slice %tmp1[%c1, %c1][%c4, %c4][%c1, %c2] : tensor<8x8xf64, #CSR> to + tensor + + %tmp1_coo = sparse_tensor.convert %sa : tensor<8x8xf64> to tensor<8x8xf64, #COO> + %a_dyn_coo = tensor.extract_slice %tmp1_coo[%c1, %c1][%c4, %c4][%c1, %c2] : tensor<8x8xf64, #COO> to + tensor // // CHECK-NEXT: 1 // CHECK-NEXT: 0 @@ -125,9 +181,22 @@ // CHECK-NEXT: 2.1 // call @foreach_print_slice_dyn(%a_dyn) : (tensor) -> () + // CHECK-NEXT: 1 + // CHECK-NEXT: 0 + // CHECK-NEXT: 2.3 + // CHECK-NEXT: 2 + // CHECK-NEXT: 3 + // CHECK-NEXT: 1 + // CHECK-NEXT: 3 + // CHECK-NEXT: 2 + // CHECK-NEXT: 2.1 + // + call @foreach_print_slice_coo_dyn(%a_dyn_coo) : (tensor) -> () bufferization.dealloc_tensor %tmp : tensor<8x8xf64, #CSR> bufferization.dealloc_tensor %tmp1 : tensor<8x8xf64, #CSR> + bufferization.dealloc_tensor %tmp_coo : tensor<8x8xf64, #COO> + bufferization.dealloc_tensor %tmp1_coo : tensor<8x8xf64, #COO> bufferization.dealloc_tensor %b : tensor<4x4xf64, #CSR> return }