diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #MAT_C_C = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> #MAT_D_C = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -1,20 +1,32 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ // DEFINE: mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// REDEFINE: %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -15,7 +15,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = %lli \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_dim.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_codegen_foreach.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Row = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_1d_nwc_wcf.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> #CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_2d_nhwc_hwcf.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCCC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conv_3d_ndhwc_dhwcf.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CCCCC = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed", "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ -// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ -// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2sparse.mlir @@ -1,20 +1,31 @@ // Force this file to use the kDirect method for sparse2sparse. // DEFINE: %{option} = "enable-runtime-library=true s2s-strategy=2" -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false s2s-strategy=2" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false s2s-strategy=2 vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Tensor1 = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "dense", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_dot.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand.mlir @@ -1,19 +1,29 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #CSC = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir @@ -1,21 +1,32 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ -// DEFINE: TENSOR1="" \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" TENSOR1="" \ +// DEFINE: mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" TENSOR1="" \ +// REDEFINE: %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr !TensorReader = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -15,7 +15,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = %lli \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -16,7 +16,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.tns" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ dimLevelType = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_index_dense.mlir @@ -15,7 +15,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = %lli \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=false -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // Insertion example using pure codegen (no sparse runtime support lib). diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_2d.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=false -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #Dense = #sparse_tensor.encoding<{ dimLevelType = ["dense", "dense"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_3d.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=false -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #TensorCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "dense", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -1,27 +1,40 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with parallelization strategy. // REDEFINE: %{option} = "enable-runtime-library=true parallelization-strategy=any-storage-any-loop" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and parallelization strategy. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true parallelization-strategy=any-storage-any-loop" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} + +// TODO: Investigate the output generated for SVE, see https://github.com/llvm/llvm-project/issues/60626 #CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], @@ -273,8 +286,8 @@ // // Sanity check on nonzeros. // - // CHECK: [30.5, 4.2, 4.6, 7, 8 - // CHECK: [30.5, 4.2, 4.6, 7, 8 + // CHECK: [30.5, 4.2, 4.6, 7, 8{{.*}}] + // CHECK: [30.5, 4.2, 4.6, 7, 8{{.*}}] // %val7 = sparse_tensor.values %7 : tensor<4x4xf64, #CSR> to memref %val8 = sparse_tensor.values %8 : tensor<4x4xf64, #DCSR> to memref diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -23,7 +23,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -16,7 +16,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -16,7 +16,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir @@ -1,13 +1,24 @@ // DEFINE: %{option} = enable-runtime-library=false -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} + // TODO: Pack only support CodeGen Path #SortedCOO = #sparse_tensor.encoding<{ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -15,7 +15,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = %lli \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir @@ -1,19 +1,32 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} + +// Reduction in this file _are_ supported by the AArch64 SVE backend #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> @@ -35,63 +48,7 @@ doc = "C(i,j) = SUM_k A(i,k) * B(k,j)" } -#trait_mat_reduce_rowwise = { - indexing_maps = [ - affine_map<(i,j) -> (i,j)>, // A (in) - affine_map<(i,j) -> (i)> // X (out) - ], - iterator_types = ["parallel", "reduction"], - doc = "X(i) = PROD_j A(i,j)" -} - -#trait_mat_reduce_colwise = { - indexing_maps = [ - affine_map<(i,j) -> (i,j)>, // A (in) - affine_map<(i,j) -> (j)> // X (out) - ], - iterator_types = ["reduction", "parallel"], - doc = "X(j) = PROD_i A(i,j)" -} - module { - func.func @redProdLex(%arga: tensor) -> tensor { - %c0 = arith.constant 0 : index - %cf1 = arith.constant 1.0 : f64 - %d0 = tensor.dim %arga, %c0 : tensor - %xv = bufferization.alloc_tensor(%d0): tensor - %0 = linalg.generic #trait_mat_reduce_rowwise - ins(%arga: tensor) - outs(%xv: tensor) { - ^bb(%a: f64, %b: f64): - %1 = sparse_tensor.reduce %a, %b, %cf1 : f64 { - ^bb0(%x: f64, %y: f64): - %2 = arith.mulf %x, %y : f64 - sparse_tensor.yield %2 : f64 - } - linalg.yield %1 : f64 - } -> tensor - return %0 : tensor - } - - func.func @redProdExpand(%arga: tensor) -> tensor { - %c0 = arith.constant 0 : index - %cf1 = arith.constant 1.0 : f64 - %d0 = tensor.dim %arga, %c0 : tensor - %xv = bufferization.alloc_tensor(%d0): tensor - %0 = linalg.generic #trait_mat_reduce_rowwise - ins(%arga: tensor) - outs(%xv: tensor) { - ^bb(%a: f64, %b: f64): - %1 = sparse_tensor.reduce %a, %b, %cf1 : f64 { - ^bb0(%x: f64, %y: f64): - %2 = arith.mulf %x, %y : f64 - sparse_tensor.yield %2 : f64 - } - linalg.yield %1 : f64 - } -> tensor - return %0 : tensor - } - func.func @min_plus_csrcsr(%arga: tensor, %argb: tensor) -> tensor { %c0 = arith.constant 0 : index @@ -201,8 +158,6 @@ %sm2c = sparse_tensor.convert %m2 : tensor<5x4xf64> to tensor // Call sparse matrix kernels. - %1 = call @redProdLex(%sm1) : (tensor) -> tensor - %2 = call @redProdExpand(%sm2c) : (tensor) -> tensor %5 = call @min_plus_csrcsr(%sm1, %sm2r) : (tensor, tensor) -> tensor %6 = call @min_plus_csrcsc(%sm1, %sm2c) @@ -215,10 +170,6 @@ // CHECK-NEXT: ( ( 1, 2, 0, 0, 0 ), ( 3, 0, 0, 0, 0 ), ( 0, 0, 4, 5, 6 ), ( 7, 0, 8, 9, 0 ), ( 0, 0, 0, 0, 0 ) ) // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( ( 6, 0, 0, 0, 0 ), ( 0, 0, 0, 5, 0 ), ( 4, 0, 0, 3, 0 ), ( 0, 2, 0, 0, 0 ), ( 0, 11, 0, 0, 0 ) ) - // CHECK-NEXT: ( 2, 3, 120, 504, 0, 0, 0, 0 ) - // CHECK-NEXT: ( 2, 3, 120, 504, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) - // CHECK-NEXT: ( 6, 5, 12, 2, 11, 0, 0, 0 ) - // CHECK-NEXT: ( 6, 5, 12, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 7, 7, 9, 8, 7, 7, 12, 11, 11, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( ( 7, 0, 0, 7, 0 ), ( 9, 0, 0, 0, 0 ), ( 8, 7, 0, 7, 0 ), ( 12, 11, 0, 11, 0 ), ( 0, 0, 0, 0, 0 ) ) // TODO: Update once identity values are no longer inserted for non-overlapping dot product @@ -227,8 +178,6 @@ // call @dump_mat(%sm1) : (tensor) -> () call @dump_mat(%sm2r) : (tensor) -> () - call @dump_vec(%1) : (tensor) -> () - call @dump_vec(%2) : (tensor) -> () call @dump_mat(%5) : (tensor) -> () call @dump_mat(%6) : (tensor) -> () @@ -236,8 +185,6 @@ bufferization.dealloc_tensor %sm1 : tensor bufferization.dealloc_tensor %sm2r : tensor bufferization.dealloc_tensor %sm2c : tensor - bufferization.dealloc_tensor %1 : tensor - bufferization.dealloc_tensor %2 : tensor bufferization.dealloc_tensor %5 : tensor bufferization.dealloc_tensor %6 : tensor return diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir copy from mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir copy to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom_prod.mlir @@ -1,19 +1,23 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Product reductions - kept in a seperate file as these are not supported by +// the AArch64 SVE backend (so the set-up is a bit different to +// sparse_reducitons.mlir) #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> @@ -25,15 +29,6 @@ // // Traits for tensor operations. // -#trait_matmul = { - indexing_maps = [ - affine_map<(i,j,k) -> (i,k)>, // A - affine_map<(i,j,k) -> (k,j)>, // B - affine_map<(i,j,k) -> (i,j)> // C (out) - ], - iterator_types = ["parallel", "parallel", "reduction"], - doc = "C(i,j) = SUM_k A(i,k) * B(k,j)" -} #trait_mat_reduce_rowwise = { indexing_maps = [ @@ -44,15 +39,6 @@ doc = "X(i) = PROD_j A(i,j)" } -#trait_mat_reduce_colwise = { - indexing_maps = [ - affine_map<(i,j) -> (i,j)>, // A (in) - affine_map<(i,j) -> (j)> // X (out) - ], - iterator_types = ["reduction", "parallel"], - doc = "X(j) = PROD_i A(i,j)" -} - module { func.func @redProdLex(%arga: tensor) -> tensor { %c0 = arith.constant 0 : index @@ -92,68 +78,6 @@ return %0 : tensor } - func.func @min_plus_csrcsr(%arga: tensor, - %argb: tensor) -> tensor { - %c0 = arith.constant 0 : index - %c1 = arith.constant 1 : index - %maxf = arith.constant 1.0e999 : f64 - %d0 = tensor.dim %arga, %c0 : tensor - %d1 = tensor.dim %argb, %c1 : tensor - %xm = bufferization.alloc_tensor(%d0, %d1) : tensor - %0 = linalg.generic #trait_matmul - ins(%arga, %argb: tensor, tensor) - outs(%xm: tensor) { - ^bb(%a: f64, %b: f64, %output: f64): - %1 = sparse_tensor.binary %a, %b : f64, f64 to f64 - overlap = { - ^bb0(%x: f64, %y: f64): - %3 = arith.addf %x, %y : f64 - sparse_tensor.yield %3 : f64 - } - left={} - right={} - %2 = sparse_tensor.reduce %1, %output, %maxf : f64 { - ^bb0(%x: f64, %y: f64): - %cmp = arith.cmpf "olt", %x, %y : f64 - %3 = arith.select %cmp, %x, %y : f64 - sparse_tensor.yield %3 : f64 - } - linalg.yield %2 : f64 - } -> tensor - return %0 : tensor - } - - func.func @min_plus_csrcsc(%arga: tensor, - %argb: tensor) -> tensor { - %c0 = arith.constant 0 : index - %c1 = arith.constant 1 : index - %maxf = arith.constant 1.0e999 : f64 - %d0 = tensor.dim %arga, %c0 : tensor - %d1 = tensor.dim %argb, %c1 : tensor - %xm = bufferization.alloc_tensor(%d0, %d1) : tensor - %0 = linalg.generic #trait_matmul - ins(%arga, %argb: tensor, tensor) - outs(%xm: tensor) { - ^bb(%a: f64, %b: f64, %output: f64): - %1 = sparse_tensor.binary %a, %b : f64, f64 to f64 - overlap = { - ^bb0(%x: f64, %y: f64): - %3 = arith.addf %x, %y : f64 - sparse_tensor.yield %3 : f64 - } - left={} - right={} - %2 = sparse_tensor.reduce %1, %output, %maxf : f64 { - ^bb0(%x: f64, %y: f64): - %cmp = arith.cmpf "olt", %x, %y : f64 - %3 = arith.select %cmp, %x, %y : f64 - sparse_tensor.yield %3 : f64 - } - linalg.yield %2 : f64 - } -> tensor - return %0 : tensor - } - // Dumps a sparse vector of type f64. func.func @dump_vec(%arg0: tensor) { // Dump the values array to verify only sparse contents are stored. @@ -203,34 +127,19 @@ // Call sparse matrix kernels. %1 = call @redProdLex(%sm1) : (tensor) -> tensor %2 = call @redProdExpand(%sm2c) : (tensor) -> tensor - %5 = call @min_plus_csrcsr(%sm1, %sm2r) - : (tensor, tensor) -> tensor - %6 = call @min_plus_csrcsc(%sm1, %sm2c) - : (tensor, tensor) -> tensor // // Verify the results. // - // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 ) - // CHECK-NEXT: ( ( 1, 2, 0, 0, 0 ), ( 3, 0, 0, 0, 0 ), ( 0, 0, 4, 5, 6 ), ( 7, 0, 8, 9, 0 ), ( 0, 0, 0, 0, 0 ) ) - // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) - // CHECK-NEXT: ( ( 6, 0, 0, 0, 0 ), ( 0, 0, 0, 5, 0 ), ( 4, 0, 0, 3, 0 ), ( 0, 2, 0, 0, 0 ), ( 0, 11, 0, 0, 0 ) ) - // CHECK-NEXT: ( 2, 3, 120, 504, 0, 0, 0, 0 ) + // CHECK: ( 2, 3, 120, 504, 0, 0, 0, 0 ) // CHECK-NEXT: ( 2, 3, 120, 504, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK-NEXT: ( 6, 5, 12, 2, 11, 0, 0, 0 ) // CHECK-NEXT: ( 6, 5, 12, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) - // CHECK-NEXT: ( 7, 7, 9, 8, 7, 7, 12, 11, 11, 0, 0, 0, 0, 0, 0, 0 ) - // CHECK-NEXT: ( ( 7, 0, 0, 7, 0 ), ( 9, 0, 0, 0, 0 ), ( 8, 7, 0, 7, 0 ), ( 12, 11, 0, 11, 0 ), ( 0, 0, 0, 0, 0 ) ) - // TODO: Update once identity values are no longer inserted for non-overlapping dot product - // CHECK-NEXT: ( 7, inf, inf, 7, 9, inf, inf, inf, 8, 7, inf, 7, 12, 11, inf, 11 ) - // CHECK-NEXT: ( ( 7, inf, inf, 7, 0 ), ( 9, inf, inf, inf, 0 ), ( 8, 7, inf, 7, 0 ), ( 12, 11, inf, 11, 0 ), ( 0, 0, 0, 0, 0 ) ) // call @dump_mat(%sm1) : (tensor) -> () call @dump_mat(%sm2r) : (tensor) -> () call @dump_vec(%1) : (tensor) -> () call @dump_vec(%2) : (tensor) -> () - call @dump_mat(%5) : (tensor) -> () - call @dump_mat(%6) : (tensor) -> () // Release the resources. bufferization.dealloc_tensor %sm1 : tensor @@ -238,8 +147,6 @@ bufferization.dealloc_tensor %sm2c : tensor bufferization.dealloc_tensor %1 : tensor bufferization.dealloc_tensor %2 : tensor - bufferization.dealloc_tensor %5 : tensor - bufferization.dealloc_tensor %6 : tensor return } } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -15,7 +15,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = %lli \ @@ -25,7 +25,7 @@ // REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ // REDEFINE: FileCheck %s -// Reduction in this file are supported by the AArch64 SVE backend +// Reduction in this file _are_ supported by the AArch64 SVE backend #SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> #DV = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ dimLevelType = ["compressed"] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_push_back.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=false -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} module { func.func @entry() { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort.mlir @@ -1,8 +1,22 @@ -// RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \ -// RUN: mlir-cpu-runner \ -// RUN: -e entry -entry-point-result=void \ -// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ -// RUN: FileCheck %s +// DEFINE: %{option} = enable-runtime-library=false +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext,%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext --dlopen=%mlir_lib_dir/libmlir_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} module { func.func private @printMemref1dI32(%ptr : memref) attributes { llvm.emit_c_interface } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_rewrite_sort_coo.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=false -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} module { // Stores 5 values to the memref buffer. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -16,7 +16,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -15,7 +15,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = %lli \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -11,7 +11,7 @@ // REDEFINE: %{option} = enable-runtime-library=false // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = %lli \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #map = affine_map<(d0, d1, d2) -> (d0, d1, d2)> #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed", "compressed" ] }> @@ -86,4 +97,4 @@ bufferization.dealloc_tensor %sm_f : tensor<2x3x4xf64, #SparseMatrix> return } -} \ No newline at end of file +} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir @@ -1,15 +1,26 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir @@ -1,17 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ // DEFINE: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ // DEFINE: mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. -// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ +// REDEFINE: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ +// REDEFINE: lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -16,7 +16,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} // // Several common sparse storage schemes. diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -16,7 +16,7 @@ // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" // RUN: %{compile} | %{run} -// If SVE is available, do the same run, but now with direct IR generation and VLA +// Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" // REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir @@ -1,20 +1,32 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" \ // DEFINE: mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" \ +// REDEFINE: %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tanh.mlir @@ -1,19 +1,33 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Current fails for SVE, see https://github.com/llvm/llvm-project/issues/60626 +// UNSUPPORTED: target=aarch64{{.*}} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_mul.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #ST = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #ST1 = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed", "compressed"]}> #ST2 = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed", "dense"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_transpose.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} -// DxEFINE: mlir-cpu-runner \ -// DxEFINE: -e entry -entry-point-result=void \ -// DxEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ -// DxEFINE: FileCheck %s +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ +// DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-buffer-initialization=true reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir @@ -1,19 +1,30 @@ // DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \ -// DEFINE: mlir-cpu-runner \ +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ // DEFINE: FileCheck %s // -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true" -// RUN: %{command} +// RUN: %{compile} | %{run} // // Do the same run, but now with direct IR generation and vectorization. // REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{command} +// RUN: %{compile} | %{run} + +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false enable-buffer-initialization=true vl=4 reassociate-fp-reductions=true enable-index-optimizations=true enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> #DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}>