diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir new file mode 100755 --- /dev/null +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir @@ -0,0 +1,238 @@ +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} + +#SparseVector = #sparse_tensor.encoding<{ + dimLevelType = ["compressed"] +}> + +#SparseMatrix = #sparse_tensor.encoding<{ + dimLevelType = ["compressed", "compressed"] +}> + +#Sparse3dTensor = #sparse_tensor.encoding<{ + dimLevelType = ["compressed", "compressed", "compressed"] +}> + +#Sparse4dTensor = #sparse_tensor.encoding<{ + dimLevelType = ["compressed", "compressed", "compressed", "compressed"] +}> + +// +// Test with various forms of the two most elementary reshape +// operations: collapse. +// +module { + + func.func @collapse_dense(%arg0: tensor<3x4xf64>) -> tensor<12xf64> { + %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64> + return %0 : tensor<12xf64> + } + + func.func @collapse_from_sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> { + %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64> + return %0 : tensor<12xf64> + } + + func.func @collapse_to_sparse(%arg0: tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> { + %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64, #SparseVector> + return %0 : tensor<12xf64, #SparseVector> + } + + func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> { + %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector> + return %0 : tensor<12xf64, #SparseVector> + } + + func.func @collapse_dense_6x10(%arg0: tensor<2x3x5x2xf64>) -> tensor<6x10xf64> { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64> into tensor<6x10xf64> + return %0 : tensor<6x10xf64> + } + + func.func @collapse_from_sparse_6x10(%arg0: tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64> { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64, #Sparse4dTensor> into tensor<6x10xf64> + return %0 : tensor<6x10xf64> + } + + func.func @collapse_to_sparse_6x10(%arg0: tensor<2x3x5x2xf64>) -> tensor<6x10xf64, #SparseMatrix> { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64> into tensor<6x10xf64, #SparseMatrix> + return %0 : tensor<6x10xf64, #SparseMatrix> + } + + func.func @collapse_sparse2sparse_6x10(%arg0: tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64, #SparseMatrix> { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64, #Sparse4dTensor> into tensor<6x10xf64, #SparseMatrix> + return %0 : tensor<6x10xf64, #SparseMatrix> + } + + func.func @collapse_dense_dyn(%arg0: tensor) -> tensor { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor + return %0 : tensor + } + + func.func @collapse_from_sparse_dyn(%arg0: tensor) -> tensor { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor + return %0 : tensor + } + + func.func @collapse_to_sparse_dyn(%arg0: tensor) -> tensor { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor + return %0 : tensor + } + + func.func @collapse_sparse2sparse_dyn(%arg0: tensor) -> tensor { + %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor + return %0 : tensor + } + + // + // Main driver. + // + func.func @entry() { + %c0 = arith.constant 0 : index + %df = arith.constant -1.0 : f64 + + // Setup test vectors and matrices.. + %m = arith.constant dense <[ [ 1.1, 0.0, 1.3, 0.0 ], + [ 2.1, 0.0, 2.3, 0.0 ], + [ 3.1, 0.0, 3.3, 0.0 ]]> : tensor<3x4xf64> + %n = arith.constant dense <[ + [ [[ 1.0, 0.0], [ 3.0, 0.0], [ 5.0, 0.0], [ 7.0, 0.0], [ 9.0, 0.0]], + [[ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0]], + [[21.0, 0.0], [23.0, 0.0], [25.0, 0.0], [27.0, 0.0], [29.0, 0.0]] ], + [ [[ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0]], + [[41.0, 0.0], [43.0, 0.0], [45.0, 0.0], [47.0, 0.0], [49.0, 0.0]], + [[ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0], [ 0.0, 0.0]] ] ]> : tensor<2x3x5x2xf64> + %sm = sparse_tensor.convert %m : tensor<3x4xf64> to tensor<3x4xf64, #SparseMatrix> + %sn = sparse_tensor.convert %n : tensor<2x3x5x2xf64> to tensor<2x3x5x2xf64, #Sparse4dTensor> + + %dm = tensor.cast %m : tensor<3x4xf64> to tensor + + %dn = tensor.cast %n : tensor<2x3x5x2xf64> to tensor + %sdn = sparse_tensor.convert %dn : tensor to tensor + + // Call the kernels. + %collapse0 = call @collapse_dense(%m) : (tensor<3x4xf64>) -> tensor<12xf64> + %collapse1 = call @collapse_from_sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> + %collapse2 = call @collapse_to_sparse(%m) : (tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> + %collapse3 = call @collapse_sparse2sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> + %collapse4 = call @collapse_dense_6x10(%n) : (tensor<2x3x5x2xf64>) -> tensor<6x10xf64> + %collapse5 = call @collapse_from_sparse_6x10(%sn) : (tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64> + %collapse6 = call @collapse_to_sparse_6x10(%n) : (tensor<2x3x5x2xf64>) -> tensor<6x10xf64, #SparseMatrix> + %collapse7 = call @collapse_sparse2sparse_6x10(%sn) : (tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64, #SparseMatrix> + %collapse8 = call @collapse_dense_dyn(%dn) : (tensor) -> tensor + %collapse9 = call @collapse_from_sparse_dyn(%sdn) : (tensor) -> tensor + %collapse10 = call @collapse_to_sparse_dyn(%dn) : (tensor) -> tensor + %collapse11 = call @collapse_sparse2sparse_dyn(%sdn) : (tensor) -> tensor + + // + // Verify results of collapse + // + // CHECK: ( 1.1, 0, 1.3, 0, 2.1, 0, 2.3, 0, 3.1, 0, 3.3, 0 ) + // CHECK-NEXT: ( 1.1, 0, 1.3, 0, 2.1, 0, 2.3, 0, 3.1, 0, 3.3, 0 ) + // CHECK-NEXT: ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3 + // CHECK-NEXT: ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3 + // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ) + // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ) + // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47 + // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47 + // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ) + // CHECK-NEXT: ( ( 1, 0, 3, 0, 5, 0, 7, 0, 9, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 21, 0, 23, 0, 25, 0, 27, 0, 29, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), + // CHECK-SAME: ( 41, 0, 43, 0, 45, 0, 47, 0, 49, 0 ), + // CHECK-SAME: ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ) + // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49 + // CHECK-NEXT: ( 1, 3, 5, 7, 9, 21, 23, 25, 27, 29, 41, 43, 45, 47, 49 + + %v0 = vector.transfer_read %collapse0[%c0], %df: tensor<12xf64>, vector<12xf64> + vector.print %v0 : vector<12xf64> + %v1 = vector.transfer_read %collapse1[%c0], %df: tensor<12xf64>, vector<12xf64> + vector.print %v1 : vector<12xf64> + %b2 = sparse_tensor.values %collapse2 : tensor<12xf64, #SparseVector> to memref + %v2 = vector.transfer_read %b2[%c0], %df: memref, vector<12xf64> + vector.print %v2 : vector<12xf64> + %b3 = sparse_tensor.values %collapse3 : tensor<12xf64, #SparseVector> to memref + %v3 = vector.transfer_read %b3[%c0], %df: memref, vector<12xf64> + vector.print %v3 : vector<12xf64> + + %v4 = vector.transfer_read %collapse4[%c0, %c0], %df: tensor<6x10xf64>, vector<6x10xf64> + vector.print %v4 : vector<6x10xf64> + %v5 = vector.transfer_read %collapse5[%c0, %c0], %df: tensor<6x10xf64>, vector<6x10xf64> + vector.print %v5 : vector<6x10xf64> + %b6 = sparse_tensor.values %collapse6 : tensor<6x10xf64, #SparseMatrix> to memref + %v6 = vector.transfer_read %b6[%c0], %df: memref, vector<60xf64> + vector.print %v6 : vector<60xf64> + %b7 = sparse_tensor.values %collapse7 : tensor<6x10xf64, #SparseMatrix> to memref + %v7 = vector.transfer_read %b7[%c0], %df: memref, vector<60xf64> + vector.print %v7 : vector<60xf64> + + %v8 = vector.transfer_read %collapse8[%c0, %c0], %df: tensor, vector<6x10xf64> + vector.print %v8 : vector<6x10xf64> + %v9 = vector.transfer_read %collapse9[%c0, %c0], %df: tensor, vector<6x10xf64> + vector.print %v9 : vector<6x10xf64> + %b10 = sparse_tensor.values %collapse10 : tensor to memref + %v10 = vector.transfer_read %b10[%c0], %df: memref, vector<60xf64> + vector.print %v10 : vector<60xf64> + %b11 = sparse_tensor.values %collapse11 : tensor to memref + %v11 = vector.transfer_read %b11[%c0], %df: memref, vector<60xf64> + vector.print %v11 : vector<60xf64> + + // Release sparse resources. + bufferization.dealloc_tensor %sm : tensor<3x4xf64, #SparseMatrix> + bufferization.dealloc_tensor %sn : tensor<2x3x5x2xf64, #Sparse4dTensor> + bufferization.dealloc_tensor %sdn : tensor + bufferization.dealloc_tensor %collapse2 : tensor<12xf64, #SparseVector> + bufferization.dealloc_tensor %collapse3 : tensor<12xf64, #SparseVector> + bufferization.dealloc_tensor %collapse6 : tensor<6x10xf64, #SparseMatrix> + bufferization.dealloc_tensor %collapse7 : tensor<6x10xf64, #SparseMatrix> + bufferization.dealloc_tensor %collapse10 : tensor + bufferization.dealloc_tensor %collapse11 : tensor + + // Release dense resources. + bufferization.dealloc_tensor %collapse1 : tensor<12xf64> + bufferization.dealloc_tensor %collapse5 : tensor<6x10xf64> + bufferization.dealloc_tensor %collapse9: tensor + + return + } +} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir @@ -0,0 +1,220 @@ +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} + +#SparseVector = #sparse_tensor.encoding<{ + dimLevelType = ["compressed"] +}> + +#SparseMatrix = #sparse_tensor.encoding<{ + dimLevelType = ["compressed", "compressed"] +}> + +#Sparse3dTensor = #sparse_tensor.encoding<{ + dimLevelType = ["compressed", "compressed", "compressed"] +}> + +#Sparse4dTensor = #sparse_tensor.encoding<{ + dimLevelType = ["compressed", "compressed", "compressed", "compressed"] +}> + +// +// Test with various forms of the two most elementary reshape +// operations: expand +// +module { + + func.func @expand_dense(%arg0: tensor<12xf64>) -> tensor<3x4xf64> { + %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64> + return %0 : tensor<3x4xf64> + } + + func.func @expand_from_sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> { + %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64> + return %0 : tensor<3x4xf64> + } + + func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> { + %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64, #SparseMatrix> + return %0 : tensor<3x4xf64, #SparseMatrix> + } + + func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> { + %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix> + return %0 : tensor<3x4xf64, #SparseMatrix> + } + + func.func @expand_dense_3x2x2(%arg0: tensor<3x4xf64>) -> tensor<3x2x2xf64> { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64> into tensor<3x2x2xf64> + return %0 : tensor<3x2x2xf64> + } + + func.func @expand_from_sparse_3x2x2(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64> { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64> + return %0 : tensor<3x2x2xf64> + } + + func.func @expand_to_sparse_3x2x2(%arg0: tensor<3x4xf64>) -> tensor<3x2x2xf64, #Sparse3dTensor> { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64> into tensor<3x2x2xf64, #Sparse3dTensor> + return %0 : tensor<3x2x2xf64, #Sparse3dTensor> + } + + func.func @expand_sparse2sparse_3x2x2(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64, #Sparse3dTensor> { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64, #Sparse3dTensor> + return %0 : tensor<3x2x2xf64, #Sparse3dTensor> + } + + func.func @expand_dense_dyn(%arg0: tensor) -> tensor { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor + return %0 : tensor + } + + func.func @expand_from_sparse_dyn(%arg0: tensor) -> tensor { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor + return %0 : tensor + } + + func.func @expand_to_sparse_dyn(%arg0: tensor) -> tensor { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor + return %0 : tensor + } + + func.func @expand_sparse2sparse_dyn(%arg0: tensor) -> tensor { + %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor + return %0 : tensor + } + + // + // Main driver. + // + func.func @entry() { + %c0 = arith.constant 0 : index + %df = arith.constant -1.0 : f64 + + // Setup test vectors and matrices.. + %v = arith.constant dense <[ 1.0, 0.0, 3.0, 0.0, 5.0, 0.0, + 7.0, 0.0, 9.0, 0.0, 11.0, 0.0]> : tensor<12xf64> + %m = arith.constant dense <[ [ 1.1, 1.2, 1.3, 1.4 ], + [ 2.1, 2.2, 2.3, 2.4 ], + [ 3.1, 3.2, 3.3, 3.4 ]]> : tensor<3x4xf64> + + %sv = sparse_tensor.convert %v : tensor<12xf64> to tensor<12xf64, #SparseVector> + %sm = sparse_tensor.convert %m : tensor<3x4xf64> to tensor<3x4xf64, #SparseMatrix> + + %dm = tensor.cast %m : tensor<3x4xf64> to tensor + %sdm = sparse_tensor.convert %dm : tensor to tensor + + // Call the kernels. + %expand0 = call @expand_dense(%v) : (tensor<12xf64>) -> tensor<3x4xf64> + %expand1 = call @expand_from_sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> + %expand2 = call @expand_to_sparse(%v) : (tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> + %expand3 = call @expand_sparse2sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> + %expand4 = call @expand_dense_3x2x2(%m) : (tensor<3x4xf64>) -> tensor<3x2x2xf64> + %expand5 = call @expand_from_sparse_3x2x2(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64> + %expand6 = call @expand_to_sparse_3x2x2(%m) : (tensor<3x4xf64>) -> tensor<3x2x2xf64, #Sparse3dTensor> + %expand7 = call @expand_sparse2sparse_3x2x2(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64, #Sparse3dTensor> + %expand8 = call @expand_dense_dyn(%dm) : (tensor) -> tensor + %expand9 = call @expand_from_sparse_dyn(%sdm) : (tensor) -> tensor + %expand10 = call @expand_to_sparse_dyn(%dm) : (tensor) -> tensor + %expand11 = call @expand_sparse2sparse_dyn(%sdm) : (tensor) -> tensor + + // + // Verify results of expand + // + // CHECK: ( ( 1, 0, 3, 0 ), ( 5, 0, 7, 0 ), ( 9, 0, 11, 0 ) ) + // CHECK-NEXT: ( ( 1, 0, 3, 0 ), ( 5, 0, 7, 0 ), ( 9, 0, 11, 0 ) ) + // CHECK-NEXT: ( 1, 3, 5, 7, 9, + // CHECK-NEXT: ( 1, 3, 5, 7, 9, + // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) + // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) + // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) + // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) + // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) + // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) + // CHECK-NEXT: 12 + // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) + // CHECK-NEXT: 12 + // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) + // + + %m0 = vector.transfer_read %expand0[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64> + vector.print %m0 : vector<3x4xf64> + %m1 = vector.transfer_read %expand1[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64> + vector.print %m1 : vector<3x4xf64> + %a2 = sparse_tensor.values %expand2 : tensor<3x4xf64, #SparseMatrix> to memref + %m2 = vector.transfer_read %a2[%c0], %df: memref, vector<12xf64> + vector.print %m2 : vector<12xf64> + %a3 = sparse_tensor.values %expand3 : tensor<3x4xf64, #SparseMatrix> to memref + %m3 = vector.transfer_read %a3[%c0], %df: memref, vector<12xf64> + vector.print %m3 : vector<12xf64> + + %m4 = vector.transfer_read %expand4[%c0, %c0, %c0], %df: tensor<3x2x2xf64>, vector<3x2x2xf64> + vector.print %m4 : vector<3x2x2xf64> + %m5 = vector.transfer_read %expand5[%c0, %c0, %c0], %df: tensor<3x2x2xf64>, vector<3x2x2xf64> + vector.print %m5 : vector<3x2x2xf64> + %a6 = sparse_tensor.values %expand6 : tensor<3x2x2xf64, #Sparse3dTensor> to memref + %m6 = vector.transfer_read %a6[%c0], %df: memref, vector<12xf64> + vector.print %m6 : vector<12xf64> + %a7 = sparse_tensor.values %expand7 : tensor<3x2x2xf64, #Sparse3dTensor> to memref + %m7 = vector.transfer_read %a7[%c0], %df: memref, vector<12xf64> + vector.print %m7 : vector<12xf64> + + %m8 = vector.transfer_read %expand8[%c0, %c0, %c0], %df: tensor, vector<3x2x2xf64> + vector.print %m8 : vector<3x2x2xf64> + %m9 = vector.transfer_read %expand9[%c0, %c0, %c0], %df: tensor, vector<3x2x2xf64> + vector.print %m9 : vector<3x2x2xf64> + %n10 = sparse_tensor.number_of_entries %expand10 : tensor + vector.print %n10 : index + %a10 = sparse_tensor.values %expand10 : tensor to memref + %m10 = vector.transfer_read %a10[%c0], %df: memref, vector<12xf64> + vector.print %m10 : vector<12xf64> + %n11 = sparse_tensor.number_of_entries %expand11 : tensor + vector.print %n11 : index + %a11 = sparse_tensor.values %expand11 : tensor to memref + %m11 = vector.transfer_read %a11[%c0], %df: memref, vector<12xf64> + vector.print %m11 : vector<12xf64> + + + // Release sparse resources. + bufferization.dealloc_tensor %sv : tensor<12xf64, #SparseVector> + bufferization.dealloc_tensor %sm : tensor<3x4xf64, #SparseMatrix> + bufferization.dealloc_tensor %sdm : tensor + bufferization.dealloc_tensor %expand2 : tensor<3x4xf64, #SparseMatrix> + bufferization.dealloc_tensor %expand3 : tensor<3x4xf64, #SparseMatrix> + bufferization.dealloc_tensor %expand6 : tensor<3x2x2xf64, #Sparse3dTensor> + bufferization.dealloc_tensor %expand7 : tensor<3x2x2xf64, #Sparse3dTensor> + bufferization.dealloc_tensor %expand10 : tensor + bufferization.dealloc_tensor %expand11 : tensor + + // Release dense resources. + bufferization.dealloc_tensor %expand1 : tensor<3x4xf64> + bufferization.dealloc_tensor %expand5 : tensor<3x2x2xf64> + bufferization.dealloc_tensor %expand9 : tensor + + return + } +} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir deleted file mode 100755 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir +++ /dev/null @@ -1,365 +0,0 @@ -// DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} -// DEFINE: %{run} = mlir-cpu-runner \ -// DEFINE: -e entry -entry-point-result=void \ -// DEFINE: -shared-libs=%mlir_c_runner_utils | \ -// DEFINE: FileCheck %s -// -// RUN: %{compile} | %{run} -// -// Do the same run, but now with direct IR generation. -// REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{compile} | %{run} -// -// Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{compile} | %{run} - -// Do the same run, but now with direct IR generation and, if available, VLA -// vectorization. -// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" -// REDEFINE: %{run} = %lli \ -// REDEFINE: --entry-function=entry_lli \ -// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ -// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ -// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ -// REDEFINE: FileCheck %s -// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} - -#SparseVector = #sparse_tensor.encoding<{ - dimLevelType = ["compressed"] -}> - -#SparseMatrix = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed"] -}> - -#Sparse3dTensor = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed", "compressed"] -}> - -#Sparse4dTensor = #sparse_tensor.encoding<{ - dimLevelType = ["compressed", "compressed", "compressed", "compressed"] -}> - -// -// Test with various forms of the two most elementary reshape -// operations: expand/collapse. -// -module { - - func.func @expand_dense(%arg0: tensor<12xf64>) -> tensor<3x4xf64> { - %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64> - return %0 : tensor<3x4xf64> - } - - func.func @expand_from_sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> { - %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64> - return %0 : tensor<3x4xf64> - } - - func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> { - %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64, #SparseMatrix> - return %0 : tensor<3x4xf64, #SparseMatrix> - } - - func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> { - %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix> - return %0 : tensor<3x4xf64, #SparseMatrix> - } - - func.func @collapse_dense(%arg0: tensor<3x4xf64>) -> tensor<12xf64> { - %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64> - return %0 : tensor<12xf64> - } - - func.func @collapse_from_sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> { - %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64> - return %0 : tensor<12xf64> - } - - func.func @collapse_to_sparse(%arg0: tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> { - %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64, #SparseVector> - return %0 : tensor<12xf64, #SparseVector> - } - - func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> { - %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector> - return %0 : tensor<12xf64, #SparseVector> - } - - func.func @expand_dense_3x2x2(%arg0: tensor<3x4xf64>) -> tensor<3x2x2xf64> { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64> into tensor<3x2x2xf64> - return %0 : tensor<3x2x2xf64> - } - - func.func @expand_from_sparse_3x2x2(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64> { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64> - return %0 : tensor<3x2x2xf64> - } - - func.func @expand_to_sparse_3x2x2(%arg0: tensor<3x4xf64>) -> tensor<3x2x2xf64, #Sparse3dTensor> { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64> into tensor<3x2x2xf64, #Sparse3dTensor> - return %0 : tensor<3x2x2xf64, #Sparse3dTensor> - } - - func.func @expand_sparse2sparse_3x2x2(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64, #Sparse3dTensor> { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64, #Sparse3dTensor> - return %0 : tensor<3x2x2xf64, #Sparse3dTensor> - } - - func.func @collapse_dense_6x10(%arg0: tensor<2x3x5x2xf64>) -> tensor<6x10xf64> { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64> into tensor<6x10xf64> - return %0 : tensor<6x10xf64> - } - - func.func @collapse_from_sparse_6x10(%arg0: tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64> { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64, #Sparse4dTensor> into tensor<6x10xf64> - return %0 : tensor<6x10xf64> - } - - func.func @collapse_to_sparse_6x10(%arg0: tensor<2x3x5x2xf64>) -> tensor<6x10xf64, #SparseMatrix> { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64> into tensor<6x10xf64, #SparseMatrix> - return %0 : tensor<6x10xf64, #SparseMatrix> - } - - func.func @collapse_sparse2sparse_6x10(%arg0: tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64, #SparseMatrix> { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor<2x3x5x2xf64, #Sparse4dTensor> into tensor<6x10xf64, #SparseMatrix> - return %0 : tensor<6x10xf64, #SparseMatrix> - } - - func.func @expand_dense_dyn(%arg0: tensor) -> tensor { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor - return %0 : tensor - } - - func.func @expand_from_sparse_dyn(%arg0: tensor) -> tensor { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor - return %0 : tensor - } - - func.func @expand_to_sparse_dyn(%arg0: tensor) -> tensor { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor - return %0 : tensor - } - - func.func @expand_sparse2sparse_dyn(%arg0: tensor) -> tensor { - %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor into tensor - return %0 : tensor - } - - func.func @collapse_dense_dyn(%arg0: tensor) -> tensor { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor - return %0 : tensor - } - - func.func @collapse_from_sparse_dyn(%arg0: tensor) -> tensor { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor - return %0 : tensor - } - - func.func @collapse_to_sparse_dyn(%arg0: tensor) -> tensor { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor - return %0 : tensor - } - - func.func @collapse_sparse2sparse_dyn(%arg0: tensor) -> tensor { - %0 = tensor.collapse_shape %arg0 [[0, 1], [2, 3]] : tensor into tensor - return %0 : tensor - } - - // - // Main driver. - // - func.func @entry() { - %c0 = arith.constant 0 : index - %df = arith.constant -1.0 : f64 - - // Setup test vectors and matrices.. - %v = arith.constant dense <[ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, - 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]> : tensor<12xf64> - %m = arith.constant dense <[ [ 1.1, 1.2, 1.3, 1.4 ], - [ 2.1, 2.2, 2.3, 2.4 ], - [ 3.1, 3.2, 3.3, 3.4 ]]> : tensor<3x4xf64> - %n = arith.constant dense <[ - [ [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]], - [[11.0, 12.0], [13.0, 14.0], [15.0, 16.0], [17.0, 18.0], [19.0, 20.0]], - [[21.0, 22.0], [23.0, 24.0], [25.0, 26.0], [27.0, 28.0], [29.0, 30.0]] ], - [ [[31.0, 32.0], [33.0, 34.0], [35.0, 36.0], [37.0, 38.0], [39.0, 40.0]], - [[41.0, 42.0], [43.0, 44.0], [45.0, 26.0], [47.0, 48.0], [49.0, 50.0]], - [[51.0, 52.0], [53.0, 54.0], [55.0, 56.0], [57.0, 58.0], [59.0, 60.0]] ] ]> : tensor<2x3x5x2xf64> - %sv = sparse_tensor.convert %v : tensor<12xf64> to tensor<12xf64, #SparseVector> - %sm = sparse_tensor.convert %m : tensor<3x4xf64> to tensor<3x4xf64, #SparseMatrix> - %sn = sparse_tensor.convert %n : tensor<2x3x5x2xf64> to tensor<2x3x5x2xf64, #Sparse4dTensor> - - %dm = tensor.cast %m : tensor<3x4xf64> to tensor - %sdm = sparse_tensor.convert %dm : tensor to tensor - - %dn = tensor.cast %n : tensor<2x3x5x2xf64> to tensor - %sdn = sparse_tensor.convert %dn : tensor to tensor - - // Call the kernels. - %expand0 = call @expand_dense(%v) : (tensor<12xf64>) -> tensor<3x4xf64> - %expand1 = call @expand_from_sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> - %expand2 = call @expand_to_sparse(%v) : (tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> - %expand3 = call @expand_sparse2sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> - %expand4 = call @expand_dense_3x2x2(%m) : (tensor<3x4xf64>) -> tensor<3x2x2xf64> - %expand5 = call @expand_from_sparse_3x2x2(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64> - %expand6 = call @expand_to_sparse_3x2x2(%m) : (tensor<3x4xf64>) -> tensor<3x2x2xf64, #Sparse3dTensor> - %expand7 = call @expand_sparse2sparse_3x2x2(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64, #Sparse3dTensor> - %expand8 = call @expand_dense_dyn(%dm) : (tensor) -> tensor - %expand9 = call @expand_from_sparse_dyn(%sdm) : (tensor) -> tensor - %expand10 = call @expand_to_sparse_dyn(%dm) : (tensor) -> tensor - %expand11 = call @expand_sparse2sparse_dyn(%sdm) : (tensor) -> tensor - - %collapse0 = call @collapse_dense(%m) : (tensor<3x4xf64>) -> tensor<12xf64> - %collapse1 = call @collapse_from_sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64> - %collapse2 = call @collapse_to_sparse(%m) : (tensor<3x4xf64>) -> tensor<12xf64, #SparseVector> - %collapse3 = call @collapse_sparse2sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> - %collapse4 = call @collapse_dense_6x10(%n) : (tensor<2x3x5x2xf64>) -> tensor<6x10xf64> - %collapse5 = call @collapse_from_sparse_6x10(%sn) : (tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64> - %collapse6 = call @collapse_to_sparse_6x10(%n) : (tensor<2x3x5x2xf64>) -> tensor<6x10xf64, #SparseMatrix> - %collapse7 = call @collapse_sparse2sparse_6x10(%sn) : (tensor<2x3x5x2xf64, #Sparse4dTensor>) -> tensor<6x10xf64, #SparseMatrix> - %collapse8 = call @collapse_dense_dyn(%dn) : (tensor) -> tensor - %collapse9 = call @collapse_from_sparse_dyn(%sdn) : (tensor) -> tensor - %collapse10 = call @collapse_to_sparse_dyn(%dn) : (tensor) -> tensor - %collapse11 = call @collapse_sparse2sparse_dyn(%sdn) : (tensor) -> tensor - - // - // Verify results of expand - // - // CHECK: ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) ) - // CHECK-NEXT: ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) ) - // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ) - // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ) - // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) - // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) - // CHECK-NEXT: ( ( ( 1.1, 1.2 ), ( 1.3, 1.4 ) ), ( ( 2.1, 2.2 ), ( 2.3, 2.4 ) ), ( ( 3.1, 3.2 ), ( 3.3, 3.4 ) ) ) - // CHECK-NEXT: 12 - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // CHECK-NEXT: 12 - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // - - %m0 = vector.transfer_read %expand0[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64> - vector.print %m0 : vector<3x4xf64> - %m1 = vector.transfer_read %expand1[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64> - vector.print %m1 : vector<3x4xf64> - %a2 = sparse_tensor.values %expand2 : tensor<3x4xf64, #SparseMatrix> to memref - %m2 = vector.transfer_read %a2[%c0], %df: memref, vector<12xf64> - vector.print %m2 : vector<12xf64> - %a3 = sparse_tensor.values %expand3 : tensor<3x4xf64, #SparseMatrix> to memref - %m3 = vector.transfer_read %a3[%c0], %df: memref, vector<12xf64> - vector.print %m3 : vector<12xf64> - - %m4 = vector.transfer_read %expand4[%c0, %c0, %c0], %df: tensor<3x2x2xf64>, vector<3x2x2xf64> - vector.print %m4 : vector<3x2x2xf64> - %m5 = vector.transfer_read %expand5[%c0, %c0, %c0], %df: tensor<3x2x2xf64>, vector<3x2x2xf64> - vector.print %m5 : vector<3x2x2xf64> - %a6 = sparse_tensor.values %expand6 : tensor<3x2x2xf64, #Sparse3dTensor> to memref - %m6 = vector.transfer_read %a6[%c0], %df: memref, vector<12xf64> - vector.print %m6 : vector<12xf64> - %a7 = sparse_tensor.values %expand7 : tensor<3x2x2xf64, #Sparse3dTensor> to memref - %m7 = vector.transfer_read %a7[%c0], %df: memref, vector<12xf64> - vector.print %m7 : vector<12xf64> - - %m8 = vector.transfer_read %expand8[%c0, %c0, %c0], %df: tensor, vector<3x2x2xf64> - vector.print %m8 : vector<3x2x2xf64> - %m9 = vector.transfer_read %expand9[%c0, %c0, %c0], %df: tensor, vector<3x2x2xf64> - vector.print %m9 : vector<3x2x2xf64> - %n10 = sparse_tensor.number_of_entries %expand10 : tensor - vector.print %n10 : index - %a10 = sparse_tensor.values %expand10 : tensor to memref - %m10 = vector.transfer_read %a10[%c0], %df: memref, vector<12xf64> - vector.print %m10 : vector<12xf64> - %n11 = sparse_tensor.number_of_entries %expand11 : tensor - vector.print %n11 : index - %a11 = sparse_tensor.values %expand11 : tensor to memref - %m11 = vector.transfer_read %a11[%c0], %df: memref, vector<12xf64> - vector.print %m11 : vector<12xf64> - - - // - // Verify results of collapse - // - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 ) - // CHECK-NEXT: ( ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ), ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 ), ( 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 ), ( 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 ), ( 41, 42, 43, 44, 45, 26, 47, 48, 49, 50 ), ( 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) ) - // CHECK-NEXT: ( ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ), ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 ), ( 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 ), ( 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 ), ( 41, 42, 43, 44, 45, 26, 47, 48, 49, 50 ), ( 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) ) - // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 26, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) - // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 26, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) - // CHECK-NEXT: ( ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ), ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 ), ( 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 ), ( 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 ), ( 41, 42, 43, 44, 45, 26, 47, 48, 49, 50 ), ( 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) ) - // CHECK-NEXT: ( ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ), ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 ), ( 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 ), ( 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 ), ( 41, 42, 43, 44, 45, 26, 47, 48, 49, 50 ), ( 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) ) - // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 26, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) - // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 26, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60 ) - // - - %v0 = vector.transfer_read %collapse0[%c0], %df: tensor<12xf64>, vector<12xf64> - vector.print %v0 : vector<12xf64> - %v1 = vector.transfer_read %collapse1[%c0], %df: tensor<12xf64>, vector<12xf64> - vector.print %v1 : vector<12xf64> - %b2 = sparse_tensor.values %collapse2 : tensor<12xf64, #SparseVector> to memref - %v2 = vector.transfer_read %b2[%c0], %df: memref, vector<12xf64> - vector.print %v2 : vector<12xf64> - %b3 = sparse_tensor.values %collapse3 : tensor<12xf64, #SparseVector> to memref - %v3 = vector.transfer_read %b3[%c0], %df: memref, vector<12xf64> - vector.print %v3 : vector<12xf64> - - %v4 = vector.transfer_read %collapse4[%c0, %c0], %df: tensor<6x10xf64>, vector<6x10xf64> - vector.print %v4 : vector<6x10xf64> - %v5 = vector.transfer_read %collapse5[%c0, %c0], %df: tensor<6x10xf64>, vector<6x10xf64> - vector.print %v5 : vector<6x10xf64> - %b6 = sparse_tensor.values %collapse6 : tensor<6x10xf64, #SparseMatrix> to memref - %v6 = vector.transfer_read %b6[%c0], %df: memref, vector<60xf64> - vector.print %v6 : vector<60xf64> - %b7 = sparse_tensor.values %collapse7 : tensor<6x10xf64, #SparseMatrix> to memref - %v7 = vector.transfer_read %b7[%c0], %df: memref, vector<60xf64> - vector.print %v7 : vector<60xf64> - - %v8 = vector.transfer_read %collapse8[%c0, %c0], %df: tensor, vector<6x10xf64> - vector.print %v8 : vector<6x10xf64> - %v9 = vector.transfer_read %collapse9[%c0, %c0], %df: tensor, vector<6x10xf64> - vector.print %v9 : vector<6x10xf64> - %b10 = sparse_tensor.values %collapse10 : tensor to memref - %v10 = vector.transfer_read %b10[%c0], %df: memref, vector<60xf64> - vector.print %v10 : vector<60xf64> - %b11 = sparse_tensor.values %collapse11 : tensor to memref - %v11 = vector.transfer_read %b11[%c0], %df: memref, vector<60xf64> - vector.print %v11 : vector<60xf64> - - - // Release sparse resources. - bufferization.dealloc_tensor %sv : tensor<12xf64, #SparseVector> - bufferization.dealloc_tensor %sm : tensor<3x4xf64, #SparseMatrix> - bufferization.dealloc_tensor %sn : tensor<2x3x5x2xf64, #Sparse4dTensor> - bufferization.dealloc_tensor %sdm : tensor - bufferization.dealloc_tensor %sdn : tensor - bufferization.dealloc_tensor %expand2 : tensor<3x4xf64, #SparseMatrix> - bufferization.dealloc_tensor %expand3 : tensor<3x4xf64, #SparseMatrix> - bufferization.dealloc_tensor %expand6 : tensor<3x2x2xf64, #Sparse3dTensor> - bufferization.dealloc_tensor %expand7 : tensor<3x2x2xf64, #Sparse3dTensor> - bufferization.dealloc_tensor %expand10 : tensor - bufferization.dealloc_tensor %expand11 : tensor - bufferization.dealloc_tensor %collapse2 : tensor<12xf64, #SparseVector> - bufferization.dealloc_tensor %collapse3 : tensor<12xf64, #SparseVector> - bufferization.dealloc_tensor %collapse6 : tensor<6x10xf64, #SparseMatrix> - bufferization.dealloc_tensor %collapse7 : tensor<6x10xf64, #SparseMatrix> - bufferization.dealloc_tensor %collapse10 : tensor - bufferization.dealloc_tensor %collapse11 : tensor - - // Release dense resources. - bufferization.dealloc_tensor %expand1 : tensor<3x4xf64> - bufferization.dealloc_tensor %collapse1 : tensor<12xf64> - bufferization.dealloc_tensor %expand5 : tensor<3x2x2xf64> - bufferization.dealloc_tensor %collapse5 : tensor<6x10xf64> - bufferization.dealloc_tensor %expand9 : tensor - bufferization.dealloc_tensor %collapse9: tensor - - return - } -}