diff --git a/mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h @@ -0,0 +1,70 @@ +//===- Passes.h - Sparse tensor pipeline entry points -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes of all sparse tensor pipelines. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_PIPELINES_PASSES_H_ +#define MLIR_DIALECT_SPARSETENSOR_PIPELINES_PASSES_H_ + +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h" +#include "mlir/Pass/PassOptions.h" + +using namespace mlir::detail; +using namespace llvm::cl; + +namespace mlir { +namespace sparse_tensor { + +/// Options for the "sparse-tensor-standard-pipeline". So far this +/// contains only the same options as the sparsification pass, and must +/// be kept in sync with the `SparseTensor/Transforms/Passes.td` file. +/// In the future this may be extended with options for other passes in +/// the pipeline. +struct StandardPipelineOptions + : public PassPipelineOptions { + PassOptions::Option parallelization{ + *this, "parallelization-strategy", + desc("Set the parallelization strategy"), init(0)}; + PassOptions::Option vectorization{ + *this, "vectorization-strategy", desc("Set the vectorization strategy"), + init(0)}; + PassOptions::Option vectorLength{ + *this, "vl", desc("Set the vector length"), init(1)}; + PassOptions::Option enableSIMDIndex32{ + *this, "enable-simd-index32", + desc("Enable i32 indexing into vectors (for efficiency)"), init(false)}; + + /// Projects out the options for the sparsification pass. + SparsificationOptions sparsificationOptions() const { + return SparsificationOptions(sparseParallelizationStrategy(parallelization), + sparseVectorizationStrategy(vectorization), + vectorLength, enableSIMDIndex32); + } +}; + +//===----------------------------------------------------------------------===// +// Building and Registering. +//===----------------------------------------------------------------------===// + +/// Adds "sparse-tensor-standard-pipeline" to the `OpPassManager`. +/// This is the standard compiler for taking sparsity-agnostic IR +/// using the sparse-tensor type and lowering it to LLVM IR with concrete +/// representations and algorithms for sparse tensors. +void buildStandardPipeline(OpPassManager &pm, + const StandardPipelineOptions &options); + +/// Registers all pipelines for the `sparse_tensor` dialect. At present, +/// this includes only "sparse-tensor-standard-pipeline". +void registerSparseTensorPipelines(); + +} // namespace sparse_tensor +} // namespace mlir + +#endif // MLIR_DIALECT_SPARSETENSOR_PIPELINES_PASSES_H_ diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h @@ -35,6 +35,9 @@ // TODO: support reduction parallelization too? }; +/// Converts command-line parallelization flag to the strategy enum. +SparseParallelizationStrategy sparseParallelizationStrategy(int32_t flag); + /// Defines a vectorization strategy. Any inner loop is a candidate (full SIMD /// for parallel loops and horizontal SIMD for reduction loops). A loop is /// actually vectorized if (1) allowed by the strategy, and (2) the emitted @@ -45,6 +48,9 @@ kAnyStorageInnerLoop }; +/// Converts command-line vectorization flag to the strategy enum. +SparseVectorizationStrategy sparseVectorizationStrategy(int32_t flag); + /// Sparsification options. struct SparsificationOptions { SparsificationOptions(SparseParallelizationStrategy p, @@ -70,6 +76,8 @@ RewritePatternSet &patterns); std::unique_ptr createSparsificationPass(); +std::unique_ptr +createSparsificationPass(const SparsificationOptions &options); std::unique_ptr createSparseTensorConversionPass(); //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h --- a/mlir/include/mlir/InitAllPasses.h +++ b/mlir/include/mlir/InitAllPasses.h @@ -27,6 +27,7 @@ #include "mlir/Dialect/SCF/Passes.h" #include "mlir/Dialect/SPIRV/Transforms/Passes.h" #include "mlir/Dialect/Shape/Transforms/Passes.h" +#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h" #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" #include "mlir/Dialect/StandardOps/Transforms/Passes.h" #include "mlir/Dialect/Tensor/Transforms/Passes.h" @@ -70,6 +71,9 @@ registerStandardPasses(); tensor::registerTensorPasses(); tosa::registerTosaOptPasses(); + + // Dialect pipelines + sparse_tensor::registerSparseTensorPipelines(); } } // namespace mlir diff --git a/mlir/lib/Dialect/SparseTensor/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/CMakeLists.txt --- a/mlir/lib/Dialect/SparseTensor/CMakeLists.txt +++ b/mlir/lib/Dialect/SparseTensor/CMakeLists.txt @@ -1,3 +1,4 @@ add_subdirectory(IR) add_subdirectory(Transforms) +add_subdirectory(Pipelines) add_subdirectory(Utils) diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/Pipelines/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/Pipelines/CMakeLists.txt @@ -0,0 +1,21 @@ +add_mlir_dialect_library(MLIRSparseTensorPipelines + SparseTensorPipelines.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor + + LINK_LIBS PUBLIC + MLIRAffineToStandard + MLIRBufferizationTransforms + MLIRLinalgTransforms + MLIRMathToLLVM + MLIRMemRefToLLVM + MLIRPass + MLIRReconcileUnrealizedCasts + MLIRSCFToStandard + MLIRSparseTensor + MLIRSparseTensorTransforms + MLIRStandardOpsTransforms + MLIRTensorTransforms + MLIRVectorToLLVM +) diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp @@ -0,0 +1,59 @@ +//===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h" + +#include "mlir/Conversion/Passes.h" +#include "mlir/Dialect/Bufferization/Transforms/Passes.h" +#include "mlir/Dialect/Linalg/Passes.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h" +#include "mlir/Dialect/StandardOps/Transforms/Passes.h" +#include "mlir/Dialect/Tensor/Transforms/Passes.h" +#include "mlir/Pass/PassManager.h" + +using namespace mlir; +using namespace mlir::sparse_tensor; + +//===----------------------------------------------------------------------===// +// Pipeline implementation. +//===----------------------------------------------------------------------===// + +void mlir::sparse_tensor::buildStandardPipeline( + OpPassManager &pm, const StandardPipelineOptions &options) { + pm.addPass(createSparsificationPass(options.sparsificationOptions())); + pm.addPass(createSparseTensorConversionPass()); + pm.addPass(createLinalgBufferizePass()); + pm.addPass(createConvertLinalgToLoopsPass()); + pm.addPass(createConvertVectorToSCFPass()); + pm.addPass(createLowerToCFGPass()); // --convert-scf-to-std + pm.addPass(createFuncBufferizePass()); + pm.addPass(createTensorConstantBufferizePass()); + pm.addPass(createTensorBufferizePass()); + pm.addPass(createStdBufferizePass()); + pm.addPass(mlir::bufferization::createFinalizingBufferizePass()); + pm.addPass(createLowerAffinePass()); + pm.addPass(createConvertVectorToLLVMPass()); + pm.addPass(createMemRefToLLVMPass()); + pm.addPass(createConvertMathToLLVMPass()); + pm.addPass(createLowerToLLVMPass()); // --convert-std-to-llvm + pm.addPass(createReconcileUnrealizedCastsPass()); +} + +//===----------------------------------------------------------------------===// +// Pipeline registration. +//===----------------------------------------------------------------------===// + +void mlir::sparse_tensor::registerSparseTensorPipelines() { + PassPipelineRegistration( + "sparse-tensor-standard-pipeline", + "Standard compiler for taking sparsity-agnostic IR using the" + " sparse-tensor type, and lowering it to LLVM IR with concrete" + " representations and algorithms for sparse tensors.", + buildStandardPipeline); +} diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp @@ -34,41 +34,21 @@ SparsificationPass() = default; SparsificationPass(const SparsificationPass &pass) = default; - - /// Returns parallelization strategy given on command line. - SparseParallelizationStrategy parallelOption() { - switch (parallelization) { - default: - return SparseParallelizationStrategy::kNone; - case 1: - return SparseParallelizationStrategy::kDenseOuterLoop; - case 2: - return SparseParallelizationStrategy::kAnyStorageOuterLoop; - case 3: - return SparseParallelizationStrategy::kDenseAnyLoop; - case 4: - return SparseParallelizationStrategy::kAnyStorageAnyLoop; - } - } - - /// Returns vectorization strategy given on command line. - SparseVectorizationStrategy vectorOption() { - switch (vectorization) { - default: - return SparseVectorizationStrategy::kNone; - case 1: - return SparseVectorizationStrategy::kDenseInnerLoop; - case 2: - return SparseVectorizationStrategy::kAnyStorageInnerLoop; - } + SparsificationPass(const SparsificationOptions &options) { + parallelization = static_cast(options.parallelizationStrategy); + vectorization = static_cast(options.vectorizationStrategy); + vectorLength = options.vectorLength; + enableSIMDIndex32 = options.enableSIMDIndex32; } void runOnOperation() override { auto *ctx = &getContext(); RewritePatternSet patterns(ctx); // Translate strategy flags to strategy options. - SparsificationOptions options(parallelOption(), vectorOption(), - vectorLength, enableSIMDIndex32); + SparsificationOptions options( + sparseParallelizationStrategy(parallelization), + sparseVectorizationStrategy(vectorization), vectorLength, + enableSIMDIndex32); // Apply rewriting. populateSparsificationPatterns(patterns, options); vector::populateVectorToVectorCanonicalizationPatterns(patterns); @@ -136,10 +116,42 @@ } // namespace +SparseParallelizationStrategy +mlir::sparseParallelizationStrategy(int32_t flag) { + switch (flag) { + default: + return SparseParallelizationStrategy::kNone; + case 1: + return SparseParallelizationStrategy::kDenseOuterLoop; + case 2: + return SparseParallelizationStrategy::kAnyStorageOuterLoop; + case 3: + return SparseParallelizationStrategy::kDenseAnyLoop; + case 4: + return SparseParallelizationStrategy::kAnyStorageAnyLoop; + } +} + +SparseVectorizationStrategy mlir::sparseVectorizationStrategy(int32_t flag) { + switch (flag) { + default: + return SparseVectorizationStrategy::kNone; + case 1: + return SparseVectorizationStrategy::kDenseInnerLoop; + case 2: + return SparseVectorizationStrategy::kAnyStorageInnerLoop; + } +} + std::unique_ptr mlir::createSparsificationPass() { return std::make_unique(); } +std::unique_ptr +mlir::createSparsificationPass(const SparsificationOptions &options) { + return std::make_unique(options); +} + std::unique_ptr mlir::createSparseTensorConversionPass() { return std::make_unique(); } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: TENSOR1="%mlir_integration_test_dir/data/zero.mtx" \ // RUN: mlir-cpu-runner \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -1,10 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -13,12 +7,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -1,12 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: -sparsification -sparse-tensor-conversion \ -// RUN: -linalg-bufferize -convert-linalg-to-loops \ -// RUN: -convert-vector-to-scf -convert-scf-to-std \ -// RUN: -func-bufferize -tensor-constant-bufferize -tensor-bufferize \ -// RUN: -std-bufferize -finalizing-bufferize \ -// RUN: -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm \ -// RUN: -reconcile-unrealized-casts \ -// RUN: | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -1,11 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -15,12 +10,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -1,12 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=16 enable-simd-index32" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=16 enable-simd-index32" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -1,11 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -15,12 +10,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -15,12 +8,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=8" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=8" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4 enable-simd-index32" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4 enable-simd-index32" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -1,12 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -16,13 +10,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=8" --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=8" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -12,11 +7,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -1978,6 +1978,23 @@ ], ) +cc_library( + name = "SparseTensorPipelines", + srcs = glob(["lib/Dialect/SparseTensor/Pipelines/*.cpp"]), + hdrs = ["include/mlir/Dialect/SparseTensor/Pipelines/Passes.h"], + includes = ["include"], + deps = [ + ":BufferizationTransforms", + ":ConversionPasses", + ":LinalgTransforms", + ":Pass", + ":SparseTensor", + ":SparseTensorTransforms", + ":StandardOpsTransforms", + ":TensorTransforms", + ], +) + td_library( name = "StdOpsTdFiles", srcs = [ @@ -5672,6 +5689,7 @@ ":ShapeTransforms", ":ShapeTransformsPassIncGen", ":SparseTensor", + ":SparseTensorPipelines", ":SparseTensorTransforms", ":StandardOps", ":StandardOpsTransforms",