diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h @@ -15,6 +15,7 @@ #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassOptions.h" namespace mlir { @@ -35,6 +36,9 @@ // TODO: support reduction parallelization too? }; +/// Converts command-line parallelization flag to the strategy enum. +SparseParallelizationStrategy sparseParallelizationStrategy(int32_t flag); + /// Defines a vectorization strategy. Any inner loop is a candidate (full SIMD /// for parallel loops and horizontal SIMD for reduction loops). A loop is /// actually vectorized if (1) allowed by the strategy, and (2) the emitted @@ -45,6 +49,9 @@ kAnyStorageInnerLoop }; +/// Converts command-line vectorization flag to the strategy enum. +SparseVectorizationStrategy sparseVectorizationStrategy(int32_t flag); + /// Sparsification options. struct SparsificationOptions { SparsificationOptions(SparseParallelizationStrategy p, @@ -60,6 +67,35 @@ bool enableSIMDIndex32; }; +/// Options for the sparse-tensor pipeline. So far this contains only +/// the same options as the sparsification pass, and must be kept in +/// sync with the Passes.td file. In the future this may be extended +/// with options for other passes in the pipeline. +struct SparseTensorPipelineOptions + : public PassPipelineOptions { + ::mlir::detail::PassOptions::Option parallelization{ + *this, "parallelization-strategy", + ::llvm::cl::desc("Set the parallelization strategy"), + ::llvm::cl::init(0)}; + ::mlir::detail::PassOptions::Option vectorization{ + *this, "vectorization-strategy", + ::llvm::cl::desc("Set the vectorization strategy"), ::llvm::cl::init(0)}; + ::mlir::detail::PassOptions::Option vectorLength{ + *this, "vl", ::llvm::cl::desc("Set the vector length"), + ::llvm::cl::init(1)}; + ::mlir::detail::PassOptions::Option enableSIMDIndex32{ + *this, "enable-simd-index32", + ::llvm::cl::desc("Enable i32 indexing into vectors (for efficiency)"), + ::llvm::cl::init(false)}; + + /// Projects out the options for the sparsification pass. + SparsificationOptions sparsificationOptions() const { + return SparsificationOptions(sparseParallelizationStrategy(parallelization), + sparseVectorizationStrategy(vectorization), + vectorLength, enableSIMDIndex32); + } +}; + /// Sets up sparsification rewriting rules with the given options. void populateSparsificationPatterns( RewritePatternSet &patterns, @@ -70,6 +106,8 @@ RewritePatternSet &patterns); std::unique_ptr createSparsificationPass(); +std::unique_ptr +createSparsificationPass(const SparsificationOptions &options); std::unique_ptr createSparseTensorConversionPass(); //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp @@ -6,12 +6,18 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Conversion/Passes.h" #include "mlir/Dialect/Bufferization/IR/Bufferization.h" +#include "mlir/Dialect/Bufferization/Transforms/Passes.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/Linalg/Passes.h" #include "mlir/Dialect/Linalg/Transforms/Transforms.h" #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" #include "mlir/Dialect/StandardOps/Transforms/FuncConversions.h" +#include "mlir/Dialect/StandardOps/Transforms/Passes.h" +#include "mlir/Dialect/Tensor/Transforms/Passes.h" +#include "mlir/Pass/PassManager.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" using namespace mlir; @@ -34,41 +40,21 @@ SparsificationPass() = default; SparsificationPass(const SparsificationPass &pass) = default; - - /// Returns parallelization strategy given on command line. - SparseParallelizationStrategy parallelOption() { - switch (parallelization) { - default: - return SparseParallelizationStrategy::kNone; - case 1: - return SparseParallelizationStrategy::kDenseOuterLoop; - case 2: - return SparseParallelizationStrategy::kAnyStorageOuterLoop; - case 3: - return SparseParallelizationStrategy::kDenseAnyLoop; - case 4: - return SparseParallelizationStrategy::kAnyStorageAnyLoop; - } - } - - /// Returns vectorization strategy given on command line. - SparseVectorizationStrategy vectorOption() { - switch (vectorization) { - default: - return SparseVectorizationStrategy::kNone; - case 1: - return SparseVectorizationStrategy::kDenseInnerLoop; - case 2: - return SparseVectorizationStrategy::kAnyStorageInnerLoop; - } + SparsificationPass(const SparsificationOptions &options) { + parallelization = static_cast(options.parallelizationStrategy); + vectorization = static_cast(options.vectorizationStrategy); + vectorLength = options.vectorLength; + enableSIMDIndex32 = options.enableSIMDIndex32; } void runOnOperation() override { auto *ctx = &getContext(); RewritePatternSet patterns(ctx); // Translate strategy flags to strategy options. - SparsificationOptions options(parallelOption(), vectorOption(), - vectorLength, enableSIMDIndex32); + SparsificationOptions options( + sparseParallelizationStrategy(parallelization), + sparseVectorizationStrategy(vectorization), vectorLength, + enableSIMDIndex32); // Apply rewriting. populateSparsificationPatterns(patterns, options); vector::populateVectorToVectorCanonicalizationPatterns(patterns); @@ -133,12 +119,71 @@ } }; +void buildSparseTensorPipeline(OpPassManager &pm, + const SparseTensorPipelineOptions &options) { + pm.addPass(createSparsificationPass(options.sparsificationOptions())); + pm.addPass(createSparseTensorConversionPass()); + pm.addPass(createLinalgBufferizePass()); + pm.addPass(createConvertLinalgToLoopsPass()); + pm.addPass(createConvertVectorToSCFPass()); + pm.addPass(createLowerToCFGPass()); // --convert-scf-to-std + pm.addPass(createFuncBufferizePass()); + pm.addPass(createTensorConstantBufferizePass()); + pm.addPass(createTensorBufferizePass()); + pm.addPass(createStdBufferizePass()); + pm.addPass(mlir::bufferization::createFinalizingBufferizePass()); + pm.addPass(createLowerAffinePass()); + pm.addPass(createConvertVectorToLLVMPass()); + pm.addPass(createMemRefToLLVMPass()); + pm.addPass(createConvertMathToLLVMPass()); + pm.addPass(createLowerToLLVMPass()); // --convert-std-to-llvm + pm.addPass(createReconcileUnrealizedCastsPass()); +} + +static PassPipelineRegistration + registerSparseTensorPipeline("sparse-tensor-standard-pipeline", + "Runs all the standard passes for compiling " + "programs using sparse tensors.", + buildSparseTensorPipeline); + } // namespace +SparseParallelizationStrategy +mlir::sparseParallelizationStrategy(int32_t flag) { + switch (flag) { + default: + return SparseParallelizationStrategy::kNone; + case 1: + return SparseParallelizationStrategy::kDenseOuterLoop; + case 2: + return SparseParallelizationStrategy::kAnyStorageOuterLoop; + case 3: + return SparseParallelizationStrategy::kDenseAnyLoop; + case 4: + return SparseParallelizationStrategy::kAnyStorageAnyLoop; + } +} + +SparseVectorizationStrategy mlir::sparseVectorizationStrategy(int32_t flag) { + switch (flag) { + default: + return SparseVectorizationStrategy::kNone; + case 1: + return SparseVectorizationStrategy::kDenseInnerLoop; + case 2: + return SparseVectorizationStrategy::kAnyStorageInnerLoop; + } +} + std::unique_ptr mlir::createSparsificationPass() { return std::make_unique(); } +std::unique_ptr +mlir::createSparsificationPass(const SparsificationOptions &options) { + return std::make_unique(options); +} + std::unique_ptr mlir::createSparseTensorConversionPass() { return std::make_unique(); } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: TENSOR1="%mlir_integration_test_dir/data/zero.mtx" \ // RUN: mlir-cpu-runner \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -1,10 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -13,12 +7,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -1,12 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: -sparsification -sparse-tensor-conversion \ -// RUN: -linalg-bufferize -convert-linalg-to-loops \ -// RUN: -convert-vector-to-scf -convert-scf-to-std \ -// RUN: -func-bufferize -tensor-constant-bufferize -tensor-bufferize \ -// RUN: -std-bufferize -finalizing-bufferize \ -// RUN: -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm \ -// RUN: -reconcile-unrealized-casts \ -// RUN: | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -1,11 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -15,12 +10,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -1,12 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=16 enable-simd-index32" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=16 enable-simd-index32" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -1,11 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -15,12 +10,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -15,12 +8,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=8" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=8" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4 enable-simd-index32" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4 enable-simd-index32" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -1,12 +1,6 @@ // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -16,13 +10,7 @@ // // RUN: mlir-opt %s \ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ -// RUN: --sparsification="vectorization-strategy=2 vl=8" --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=8" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ @@ -12,11 +7,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=4" | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ @@ -13,11 +8,7 @@ // Do the same run, but now with SIMDization as well. This should not change the outcome. // // RUN: mlir-opt %s \ -// RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: --sparse-tensor-standard-pipeline="vectorization-strategy=2 vl=2" | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir @@ -1,11 +1,4 @@ -// RUN: mlir-opt %s \ -// RUN: --sparsification --sparse-tensor-conversion \ -// RUN: --linalg-bufferize --convert-linalg-to-loops \ -// RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ -// RUN: --std-bufferize --finalizing-bufferize --lower-affine \ -// RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ -// RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ +// RUN: mlir-opt %s --sparse-tensor-standard-pipeline | \ // RUN: mlir-cpu-runner \ // RUN: -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -1923,24 +1923,33 @@ includes = ["include"], deps = [ ":Affine", + ":AffineToStandard", ":ArithmeticDialect", ":BufferizableOpInterface", ":BufferizationDialect", + ":BufferizationTransforms", + ":ConversionPasses", ":IR", ":LLVMDialect", ":LinalgOps", ":LinalgTransforms", + ":MathToLLVM", ":MemRefDialect", ":Pass", ":SCFDialect", + ":SCFToStandard", ":SparseTensor", ":SparseTensorPassIncGen", ":SparseTensorUtils", ":StandardOps", ":StandardOpsTransforms", + ":StandardToLLVM", ":TensorDialect", + ":TensorTransforms", ":Transforms", ":VectorOps", + ":VectorToLLVM", + ":VectorToSCF", "//llvm:Support", ], )