diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg @@ -3,3 +3,31 @@ # No JIT on win32. if sys.platform == 'win32': config.unsupported = True + +# ArmSVE tests must be enabled via build flag. +if config.mlir_run_arm_sve_tests == 'ON': + config.substitutions.append(('%ENABLE_VLA', 'true')) + config.substitutions.append(('%VLA_ARCH_ATTR_OPTIONS', '--march=aarch64 --mattr="+sve"')) + lli_cmd = 'lli' + if config.arm_emulator_lli_executable: + lli_cmd = config.arm_emulator_lli_executable + + if config.arm_emulator_utils_lib_dir: + config.substitutions.append(('%mlir_native_utils_lib_dir', config.arm_emulator_utils_lib_dir)) + else: + config.substitutions.append(('%mlir_native_utils_lib_dir', config.mlir_integration_test_dir)) + + if config.arm_emulator_executable: + # Run test in emulator (qemu or armie) + emulation_cmd = config.arm_emulator_executable + if config.arm_emulator_options: + emulation_cmd = emulation_cmd + ' ' + config.arm_emulator_options + emulation_cmd = emulation_cmd + ' ' + lli_cmd + config.substitutions.append(('%lli', emulation_cmd)) + else: + config.substitutions.append(('%lli', lli_cmd)) +else: + config.substitutions.append(('%lli', 'lli')) + config.substitutions.append(('%mlir_native_utils_lib_dir', config.mlir_integration_test_dir)) + config.substitutions.append(('%ENABLE_VLA', 'false')) + config.substitutions.append(('%VLA_ARCH_ATTR_OPTIONS', '')) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -11,6 +11,13 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s #SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -9,6 +9,13 @@ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -13,6 +13,14 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir @@ -9,6 +9,13 @@ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s #SparseVector = #sparse_tensor.encoding<{ dimLevelType = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -14,6 +14,14 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=16 enable-vla-vectorization=%ENABLE_VLA enable-simd-index32" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -13,6 +13,14 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -13,6 +13,14 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -9,6 +9,13 @@ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -9,6 +9,7 @@ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// #SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> #DV = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_vla.mlir copy from mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir copy to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_vla.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_vla.mlir @@ -1,15 +1,13 @@ -// RUN: mlir-opt %s --sparse-compiler | \ -// RUN: mlir-cpu-runner -e entry -entry-point-result=void \ -// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ -// RUN: FileCheck %s // -// Do the same run, but now with SIMDization as well. This should not change the outcome. +// If SVE is available, test VLA vectorization. // -// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8" | \ -// RUN: mlir-cpu-runner -e entry -entry-point-result=void \ -// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +/ + #SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> #DV = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> @@ -49,30 +47,6 @@ return %0 : tensor } - func.func @prod_reduction_i32(%arga: tensor<32xi32, #DV>, - %argx: tensor) -> tensor { - %0 = linalg.generic #trait_reduction - ins(%arga: tensor<32xi32, #DV>) - outs(%argx: tensor) { - ^bb(%a: i32, %x: i32): - %0 = arith.muli %x, %a : i32 - linalg.yield %0 : i32 - } -> tensor - return %0 : tensor - } - - func.func @prod_reduction_f32(%arga: tensor<32xf32, #DV>, - %argx: tensor) -> tensor { - %0 = linalg.generic #trait_reduction - ins(%arga: tensor<32xf32, #DV>) - outs(%argx: tensor) { - ^bb(%a: f32, %x: f32): - %0 = arith.mulf %x, %a : f32 - linalg.yield %0 : f32 - } -> tensor - return %0 : tensor - } - func.func @and_reduction_i32(%arga: tensor<32xi32, #DV>, %argx: tensor) -> tensor { %0 = linalg.generic #trait_reduction @@ -164,10 +138,6 @@ : (tensor<32xi32, #SV>, tensor) -> tensor %1 = call @sum_reduction_f32(%sparse_input_f32, %rf) : (tensor<32xf32, #SV>, tensor) -> tensor - %2 = call @prod_reduction_i32(%dense_input_i32, %ri) - : (tensor<32xi32, #DV>, tensor) -> tensor - %3 = call @prod_reduction_f32(%dense_input_f32, %rf) - : (tensor<32xf32, #DV>, tensor) -> tensor %4 = call @and_reduction_i32(%dense_input_i32, %ri) : (tensor<32xi32, #DV>, tensor) -> tensor %5 = call @or_reduction_i32(%sparse_input_i32, %ri) @@ -179,8 +149,6 @@ // // CHECK: 26 // CHECK: 27.5 - // CHECK: 3087 - // CHECK: 168 // CHECK: 1 // CHECK: 15 // CHECK: 10 diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -15,6 +15,13 @@ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s // +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4 enable-vla-vectorization=%ENABLE_VLA enable-simd-index32" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir old mode 100755 new mode 100644 --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -9,6 +9,13 @@ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=8 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s #SM = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -11,6 +11,13 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=4 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s #CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -13,6 +13,14 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -13,6 +13,14 @@ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s +// +// If SVE is available, test VLA vectorization. +// +// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2 enable-vla-vectorization=%ENABLE_VLA" | \ +// RUN: mlir-translate -mlir-to-llvmir | \ +// RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ +// RUN: %lli --entry-function=entry %VLA_ARCH_ATTR_OPTIONS --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s !Filename = !llvm.ptr