diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> #MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> #MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir @@ -1,14 +1,20 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{run_option} = +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext %{run_option} | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> #MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #MAT_C_C = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> #MAT_D_C = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -1,18 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_runner_utils | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir @@ -1,14 +1,19 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{command} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{command} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{command} // UNSUPPORTED: target=aarch64{{.*}} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #DenseVector = #sparse_tensor.encoding<{lvlTypes = ["dense"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dual_sparse_conv_2d.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=true enable-index-reduction=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-index-reduction=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-index-reduction=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-index-reduction=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-index-reduction=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> #CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg @@ -15,25 +15,3 @@ config.substitutions.append(("%ENABLE_VLA", "false")) config.substitutions.append(("%VLA_ARCH_ATTR_OPTIONS", "")) config.substitutions.append(("%mlir_native_utils_lib_dir", config.mlir_lib_dir)) - -# CONFIGURATION FOR COMPILATION -# ----------------------------- -# Sparse compiler options. (Re)Define this substitution in every test file. -config.substitutions.insert(0, ("%{sparse_compiler_opts}",'' )) -# The following substitution should only be used _after_ -# %{sparse_compiler_opts} has been updated to enable vectorization. -config.substitutions.insert(0, ("%{sparse_compiler_opts_sve}",'enable-arm-sve=true %{sparse_compiler_opts}')) -# Runs the sparse compiler. -config.substitutions.insert(0, ("%{compile}", 'mlir-opt %s --sparse-compiler="%{sparse_compiler_opts}"')) -# Runs the sparse compiler with SVE/VLA vectorisation. -config.substitutions.insert(0, ("%{compile_sve}", 'mlir-opt %s --sparse-compiler="%{sparse_compiler_opts_sve}"')) - -# CONFIGURATION FOR JIT'ing AND EXECUTION -# --------------------------------------- -# Runtime libraries to use. (Re)Define this substitution in every test file. -config.substitutions.insert(0, ("%{run_libs}", "")) -config.substitutions.insert(0, ("%{run_opts}", "-e entry -entry-point-result=void")) -# JIT and run the compiled test. -config.substitutions.insert(0, ("%{run}", "mlir-cpu-runner %{run_opts} %{run_libs}")) -# JIT and run the compiled test with SVE enabled. -config.substitutions.insert(0, ("%{run_sve}", '%mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}')) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir @@ -1,19 +1,20 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} - +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} #COO_2D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> #COO_3D = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ], posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cmp.mlir @@ -1,17 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=false" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Row = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_collapse_shape.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=true enable-index-reduction=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA enable-index-reduction=true" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=true enable-index-reduction=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-index-reduction=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-index-reduction=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-index-reduction=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-index-reduction=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> #CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nchw_fchw.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=true enable-index-reduction=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA enable-index-reduction=true" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // TODO: we can only support dense output for nchw input because 'c' is a reduction loop diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=true enable-index-reduction=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA enable-index-reduction=true" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-index-reduction=true enable-runtime-library=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA enable-index-reduction=true" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=true enable-index-reduction=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true enable-index-reduction=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA enable-index-reduction=true" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCCCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed", "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_runner_utils | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_element.mlir @@ -1,13 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false s2s-strategy=2 -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=false s2s-strategy=2" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false s2s-strategy=2 vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false s2s-strategy=2 vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton-nu", "singleton" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir @@ -1,18 +1,31 @@ // Force this file to use the kDirect method for sparse2sparse. -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true s2s-strategy=2 -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=true s2s-strategy=2" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false s2s-strategy=2 -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false s2s-strategy=2" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false s2s-strategy=2 vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false s2s-strategy=2 vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} // -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SortedCOO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir @@ -1,17 +1,29 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_runner_utils | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CSC = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir @@ -1,18 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" TENSOR1="" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" TENSOR1="" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" TENSOR1="" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr !TensorReader = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -1,18 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test.tns" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.tns" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.tns" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_runner_utils | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_foreach_slices.mlir @@ -1,6 +1,12 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ +// DEFINE: FileCheck %s +// +// RUN: %{command} +// // TODO: support slices on lib path diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir @@ -1,18 +1,32 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option_vec} = +// DEFINE: %{option} = enable-runtime-library=true + +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option_vec} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true +// REDEFINE: %{option} = "%{option_vec}" +// RUN: %{compile} | %{run} +// Do the same run, but with VLA vectorization. +// REDEFINE: %{option} = "enable-arm-sve=true %{option_vec}" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %if mlir_arm_sve_tests %{ %{compile} | mlir-translate -mlir-to-llvmir | %{run} %} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir @@ -1,17 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // Insertion example using pure codegen (no sparse runtime support lib). diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir @@ -1,13 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Dense = #sparse_tensor.encoding<{ lvlTypes = ["dense", "dense"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir @@ -1,13 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #TensorCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "dense", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -1,25 +1,38 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with parallelization strategy. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true parallelization-strategy=any-storage-any-loop -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=true parallelization-strategy=any-storage-any-loop" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and parallelization strategy. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true parallelization-strategy=any-storage-any-loop -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true parallelization-strategy=any-storage-any-loop" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_runner_utils | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // TODO: Investigate the output generated for SVE, see https://github.com/llvm/llvm-project/issues/60626 diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul_slice.mlir @@ -1,6 +1,12 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// DEFINE: FileCheck %s +// +// RUN: %{command} +// // TODO: support lib path. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_runner_utils | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -1,27 +1,39 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with parallelization strategy. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true parallelization-strategy=any-storage-any-loop -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=true parallelization-strategy=any-storage-any-loop" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and parallelization strategy. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false parallelization-strategy=any-storage-any-loop -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false parallelization-strategy=any-storage-any-loop" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + // Do the same run, but now with direct IR generation and, if available, VLA // vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -1,19 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + // Do the same run, but now with direct IR generation and, if available, VLA // vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext -dlopen=%mlir_runner_utils| \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -1,15 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + // Do the same run, but now with direct IR generation and, if available, VLA // vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir @@ -1,10 +1,23 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with VLA vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4 -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// RUN: %{compile} | %{run} +// + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // TODO: Pack only support CodeGen Path diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir @@ -1,10 +1,15 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = "enable-runtime-library=false enable-index-reduction=true" +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true enable-index-reduction=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=true enable-buffer-initialization=true enable-index-reduction=true" +// RUN: %{compile} | %{run} #CCCC = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed", "compressed" ], posWidth = 32, crdWidth = 32 }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // Reduction in this file _are_ supported by the AArch64 SVE backend diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir @@ -1,14 +1,19 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} // Product reductions - kept in a seperate file as these are not supported by // the AArch64 SVE backend (so the set-up is a bit different to diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_sum.mlir @@ -1,14 +1,19 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{command} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{command} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{command} #SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -1,17 +1,29 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s // Reduction in this file _are_ supported by the AArch64 SVE backend diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir @@ -1,17 +1,19 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{command} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{command} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{command} #SV = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir @@ -1,17 +1,19 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir @@ -1,13 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} module { func.func @entry() { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir @@ -1,13 +1,22 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_runner_utils | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} module { func.func private @printMemref1dI32(%ptr : memref) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir @@ -1,13 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} module { // Stores 5 values to the memref buffer. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -1,19 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + // Do the same run, but now with direct IR generation and, if available, VLA // vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with direct IR generation and VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SM = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -1,17 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4 -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir @@ -1,17 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4 -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)> #SparseMatrix = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir @@ -1,17 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4 enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #CSR = #sparse_tensor.encoding<{lvlTypes = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir @@ -1,17 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4 enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = ["compressed", "compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir @@ -1,17 +1,26 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s -// -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4 enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir @@ -1,19 +1,30 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ -// DEFINE: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// DEFINE: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s -// -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4 enable-buffer-initialization=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// REDEFINE: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -1,18 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // // Several common sparse storage schemes. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -1,18 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric.mtx" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // TODO: The test currently only operates on the triangular part of the // symmetric matrix. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir @@ -1,17 +1,19 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{command} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{command} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{command} // UNSUPPORTED: target=aarch64{{.*}} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir @@ -1,18 +1,32 @@ -// DEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" \ +// DEFINE: mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{env} %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{env} %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" \ +// REDEFINE: %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // TODO: The test currently only operates on the triangular part of the // symmetric matrix. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir @@ -1,21 +1,34 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} // Current fails for SVE, see https://github.com/llvm/llvm-project/issues/60626 // UNSUPPORTED: target=aarch64{{.*}} +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} + #SparseVector = #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> #trait_op = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #ST = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #ST1 = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "compressed"]}> #ST2 = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed", "dense"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose_coo.mlir @@ -1,18 +1,31 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// // FIXME: lib path does not support all of COO yet -// R_U_N: %{compile} | %{run} | FileCheck %s +// R_U_N: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false -// RUN: %{compile} | %{run} | FileCheck %s +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SortedCOO = #sparse_tensor.encoding<{ lvlTypes = [ "compressed-nu", "singleton" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option_vec} = +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{run_option} = +// DEFINE: %{cpu_runner} = mlir-cpu-runner + +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = %{cpu_runner} \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils %{run_option} | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option_vec} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true +// REDEFINE: %{option} = "%{option_vec}" +// RUN: %{compile} | %{run} + +// Do the same run, but with VLA vectorization. +// REDEFINE: %{option} = "enable-arm-sve=true %{option_vec}" +// REDEFINE: %{cpu_runner} = %mcr_aarch64_cmd +// REDEFINE: %{run_option} = %VLA_ARCH_ATTR_OPTIONS +// RUN: %if mlir_arm_sve_tests %{ %{compile} | %{run} %} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #DCSR = #sparse_tensor.encoding<{lvlTypes = ["compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir @@ -1,17 +1,30 @@ -// REDEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=true -// RUN: %{compile} | %{run} | FileCheck %s +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true -// RUN: %{compile} | %{run} | FileCheck %s +// RUN: %{compile} | %{run} // -// Do the same run, but now with vectorization. -// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true -// RUN: %{compile} | %{run} | FileCheck %s +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} // -// Do the same run, but now with VLA vectorization. -// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli_host_or_aarch64_cmd \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}> #DenseVector = #sparse_tensor.encoding<{lvlTypes = ["dense"]}>