diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1058,80 +1058,6 @@ const FrozenRewritePatternSet &stage2Patterns, function_ref stage3Lambda = nullptr); -//===----------------------------------------------------------------------===// -// Support for sparse tensor code generation. -// -// The sparse compiler part of MLIR lowers a tensor expression formulated as a -// Linalg operation into a sequence of loops depending on what dimensions of the -// tensors are marked dense or sparse. The generated code distinguishes between: -// (1) for-loops that iterate over a single dense dimension, -// (2) for-loops that iterate over a single sparse dimension, -// (3) while-loops that co-iterate over several sparse dimensions. -// The for-loops may be subsequently optimized for parallel or vector execution. -// -// For more details, the Dialect/Linalg/Transforms/Sparsification.cpp file. -//===----------------------------------------------------------------------===// - -/// Defines a parallelization strategy. Any implicit loop in the Linalg -/// operation that is marked "parallel" (thus not "reduction") is a candidate -/// for parallelization. The loop is made parallel if (1) allowed by the -/// strategy (e.g., AnyStorageOuterLoop considers either a dense or sparse -/// outermost loop only), and (2) the generated code is an actual for-loop -/// (and not a co-iterating while-loop). -enum class SparseParallelizationStrategy { - kNone, - kDenseOuterLoop, - kAnyStorageOuterLoop, - kDenseAnyLoop, - kAnyStorageAnyLoop - // TODO: support reduction parallelization too? -}; - -/// Defines a vectorization strategy. Any implicit inner loop in the Linalg -/// operation is a candidate (full SIMD for "parallel" loops and horizontal -/// SIMD for "reduction" loops). A loop is actually vectorized if (1) allowed -/// by the strategy, and (2) the emitted code is an actual for-loop (and not -/// a co-iterating while-loop). -enum class SparseVectorizationStrategy { - kNone, - kDenseInnerLoop, - kAnyStorageInnerLoop -}; - -/// Defines a type for "pointer" and "index" storage in the sparse storage -/// scheme, with a choice between the native platform-dependent index width -/// or any of 64-/32-/16-/8-bit integers. A narrow width obviously reduces -/// the memory footprint of the sparse storage scheme, but the width should -/// suffice to define the total required range (viz. the maximum number of -/// stored entries per indirection level for the "pointers" and the maximum -/// value of each tensor index over all dimensions for the "indices"). -enum class SparseIntType { kNative, kI64, kI32, kI16, kI8 }; - -/// Sparsification options. -struct SparsificationOptions { - SparsificationOptions(SparseParallelizationStrategy p, - SparseVectorizationStrategy v, unsigned vl, - SparseIntType pt, SparseIntType it, bool fo) - : parallelizationStrategy(p), vectorizationStrategy(v), vectorLength(vl), - ptrType(pt), indType(it), fastOutput(fo) {} - SparsificationOptions() - : SparsificationOptions(SparseParallelizationStrategy::kNone, - SparseVectorizationStrategy::kNone, 1u, - SparseIntType::kNative, SparseIntType::kNative, - false) {} - SparseParallelizationStrategy parallelizationStrategy; - SparseVectorizationStrategy vectorizationStrategy; - unsigned vectorLength; - SparseIntType ptrType; - SparseIntType indType; - bool fastOutput; // experimental: fast output buffers -}; - -/// Sets up sparsification rewriting rules with the given options. -void populateSparsificationPatterns( - RewritePatternSet &patterns, - const SparsificationOptions &options = SparsificationOptions()); - } // namespace linalg } // namespace mlir diff --git a/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt --- a/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt @@ -1 +1,2 @@ add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/Transforms/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/CMakeLists.txt @@ -0,0 +1,5 @@ +set(LLVM_TARGET_DEFINITIONS Passes.td) +mlir_tablegen(Passes.h.inc -gen-pass-decls -name SparseTensor) +add_public_tablegen_target(MLIRSparseTensorPassIncGen) + +add_mlir_doc(Passes SparseTensorPasses ./ -gen-pass-doc) diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h @@ -0,0 +1,95 @@ +//===- Passes.h - Sparse tensor pass entry points ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This header file defines prototypes of all sparse tensor passes. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES_H_ + +#include "mlir/IR/PatternMatch.h" +#include "mlir/Pass/Pass.h" + +namespace mlir { + +/// Defines a parallelization strategy. Any independent loop is a candidate +/// for parallelization. The loop is made parallel if (1) allowed by the +/// strategy (e.g., AnyStorageOuterLoop considers either a dense or sparse +/// outermost loop only), and (2) the generated code is an actual for-loop +/// (and not a co-iterating while-loop). +enum class SparseParallelizationStrategy { + kNone, + kDenseOuterLoop, + kAnyStorageOuterLoop, + kDenseAnyLoop, + kAnyStorageAnyLoop + // TODO: support reduction parallelization too? +}; + +/// Defines a vectorization strategy. Any inner loop is a candidate (full SIMD +/// for parallel loops and horizontal SIMD for reduction loops). A loop is +/// actually vectorized if (1) allowed by the strategy, and (2) the emitted +/// code is an actual for-loop (and not a co-iterating while-loop). +enum class SparseVectorizationStrategy { + kNone, + kDenseInnerLoop, + kAnyStorageInnerLoop +}; + +/// Defines a type for "pointer" and "index" storage in the sparse storage +/// scheme, with a choice between the native platform-dependent index width +/// or any of 64-/32-/16-/8-bit integers. A narrow width obviously reduces +/// the memory footprint of the sparse storage scheme, but the width should +/// suffice to define the total required range (viz. the maximum number of +/// stored entries per indirection level for the "pointers" and the maximum +/// value of each tensor index over all dimensions for the "indices"). +enum class SparseIntType { kNative, kI64, kI32, kI16, kI8 }; + +/// Sparsification options. +struct SparsificationOptions { + SparsificationOptions(SparseParallelizationStrategy p, + SparseVectorizationStrategy v, unsigned vl, + SparseIntType pt, SparseIntType it, bool fo) + : parallelizationStrategy(p), vectorizationStrategy(v), vectorLength(vl), + ptrType(pt), indType(it), fastOutput(fo) {} + SparsificationOptions() + : SparsificationOptions(SparseParallelizationStrategy::kNone, + SparseVectorizationStrategy::kNone, 1u, + SparseIntType::kNative, SparseIntType::kNative, + false) {} + SparseParallelizationStrategy parallelizationStrategy; + SparseVectorizationStrategy vectorizationStrategy; + unsigned vectorLength; + SparseIntType ptrType; + SparseIntType indType; + bool fastOutput; // experimental: fast output buffers +}; + +/// Sets up sparsification rewriting rules with the given options. +void populateSparsificationPatterns( + RewritePatternSet &patterns, + const SparsificationOptions &options = SparsificationOptions()); + +/// Sets up sparse tensor conversion rules. +void populateSparseTensorConversionPatterns(RewritePatternSet &patterns); + +std::unique_ptr createSparsificationPass(); +std::unique_ptr createSparseTensorConversionPass(); + +//===----------------------------------------------------------------------===// +// Registration. +//===----------------------------------------------------------------------===// + +/// Generate the code for registering passes. +#define GEN_PASS_REGISTRATION +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc" + +} // namespace mlir + +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES_H_ diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td @@ -0,0 +1,39 @@ +//===-- Passes.td - Sparse tensor pass definition file -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES + +include "mlir/Pass/PassBase.td" + +def Sparsification : Pass<"sparsification", "ModuleOp"> { + let summary = "Automatically generate sparse tensor code from annotations"; + let constructor = "mlir::createSparsificationPass()"; + let dependentDialects = [ + "LLVM::LLVMDialect", + "memref::MemRefDialect", + "scf::SCFDialect", + "sparse_tensor::SparseTensorDialect", + "vector::VectorDialect", + ]; +} + +def SparseTensorConversion : Pass<"sparse-tensor-conversion", "ModuleOp"> { + let summary = "Apply conversion rules to sparse tensors"; + let constructor = "mlir::createSparseTensorConversionPass()"; + let dependentDialects = [ + "LLVM::LLVMDialect", + "memref::MemRefDialect", + "scf::SCFDialect", + "sparse_tensor::SparseTensorDialect", + "vector::VectorDialect", + ]; +} + +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES + diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h deleted file mode 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h +++ /dev/null @@ -1,23 +0,0 @@ -//===- Transforms.h - Sparse tensor transformations -------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ -#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ - -#include "mlir/IR/PatternMatch.h" - -namespace mlir { -namespace sparse_tensor { - -/// Sets up sparsification conversion rules with the given options. -void populateSparsificationConversionPatterns(RewritePatternSet &patterns); - -} // namespace sparse_tensor -} // namespace mlir - -#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h --- a/mlir/include/mlir/InitAllPasses.h +++ b/mlir/include/mlir/InitAllPasses.h @@ -25,6 +25,7 @@ #include "mlir/Dialect/SCF/Passes.h" #include "mlir/Dialect/SPIRV/Transforms/Passes.h" #include "mlir/Dialect/Shape/Transforms/Passes.h" +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h" #include "mlir/Dialect/StandardOps/Transforms/Passes.h" #include "mlir/Dialect/Tensor/Transforms/Passes.h" #include "mlir/Dialect/Tosa/Transforms/Passes.h" @@ -55,6 +56,7 @@ registerGpuSerializeToCubinPass(); registerGpuSerializeToHsacoPass(); registerLinalgPasses(); + registerSparseTensorPasses(); LLVM::registerLLVMPasses(); memref::registerMemRefPasses(); quant::registerQuantPasses(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt @@ -11,7 +11,6 @@ Interchange.cpp Loops.cpp Promotion.cpp - Sparsification.cpp Tiling.cpp Transforms.cpp Vectorization.cpp @@ -37,7 +36,6 @@ MLIRSCF MLIRSCFTransforms MLIRPass - MLIRSparseTensor MLIRStandard MLIRStandardOpsTransforms MLIRStandardToLLVM diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt @@ -1,14 +1,24 @@ add_mlir_dialect_library(MLIRSparseTensorTransforms - SparseTensorLowering.cpp + Sparsification.cpp + SparseTensorConversion.cpp + SparseTensorPasses.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor + DEPENDS + MLIRSparseTensorPassIncGen + LINK_LIBS PUBLIC MLIRIR MLIRLLVMIR + MLIRLinalg + MLIRLinalgTransforms + MLIRMemRef MLIRPass + MLIRSCF MLIRStandard MLIRSparseTensor MLIRTransforms + MLIRVector ) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp rename from mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp rename to mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -17,7 +17,7 @@ #include "mlir/Dialect/LLVMIR/LLVMTypes.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" -#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h" +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Transforms/DialectConversion.h" @@ -42,7 +42,7 @@ } /// Sparse conversion rule to remove opaque pointer cast. -class TensorFromPointerConverter +class SparseTensorFromPointerConverter : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; LogicalResult @@ -54,7 +54,8 @@ }; /// Sparse conversion rule for dimension accesses. -class TensorToDimSizeConverter : public OpConversionPattern { +class SparseTensorToDimSizeConverter + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult @@ -71,7 +72,7 @@ }; /// Sparse conversion rule for pointer accesses. -class TensorToPointersConverter +class SparseTensorToPointersConverter : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -98,7 +99,7 @@ }; /// Sparse conversion rule for index accesses. -class TensorToIndicesConverter +class SparseTensorToIndicesConverter : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -125,7 +126,7 @@ }; /// Sparse conversion rule for value accesses. -class TensorToValuesConverter +class SparseTensorToValuesConverter : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -157,9 +158,8 @@ /// Populates the given patterns list with conversion rules required for /// the sparsification of linear algebra operations. -void sparse_tensor::populateSparsificationConversionPatterns( - RewritePatternSet &patterns) { - patterns.add(patterns.getContext()); +void mlir::populateSparseTensorConversionPatterns(RewritePatternSet &patterns) { + patterns.add(patterns.getContext()); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp @@ -0,0 +1,141 @@ +//===- SparsificationPass.cpp - Pass for autogen spares tensor code -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/Linalg/Transforms/Transforms.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +using namespace mlir; + +namespace { + +//===----------------------------------------------------------------------===// +// Passes declaration. +//===----------------------------------------------------------------------===// + +#define GEN_PASS_CLASSES +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc" + +//===----------------------------------------------------------------------===// +// Passes implementation. +//===----------------------------------------------------------------------===// + +struct SparsificationPass : public SparsificationBase { + + SparsificationPass() = default; + SparsificationPass(const SparsificationPass &pass) {} + + Option parallelization{ + *this, "parallelization-strategy", + llvm::cl::desc("Set the parallelization strategy"), llvm::cl::init(0)}; + + Option vectorization{ + *this, "vectorization-strategy", + llvm::cl::desc("Set the vectorization strategy"), llvm::cl::init(0)}; + + Option vectorLength{ + *this, "vl", llvm::cl::desc("Set the vector length"), llvm::cl::init(1)}; + + Option ptrType{*this, "ptr-type", + llvm::cl::desc("Set the pointer type"), + llvm::cl::init(0)}; + + Option indType{*this, "ind-type", + llvm::cl::desc("Set the index type"), + llvm::cl::init(0)}; + + Option fastOutput{*this, "fast-output", + llvm::cl::desc("Allows fast output buffers"), + llvm::cl::init(false)}; + + /// Returns parallelization strategy given on command line. + SparseParallelizationStrategy parallelOption() { + switch (parallelization) { + default: + return SparseParallelizationStrategy::kNone; + case 1: + return SparseParallelizationStrategy::kDenseOuterLoop; + case 2: + return SparseParallelizationStrategy::kAnyStorageOuterLoop; + case 3: + return SparseParallelizationStrategy::kDenseAnyLoop; + case 4: + return SparseParallelizationStrategy::kAnyStorageAnyLoop; + } + } + + /// Returns vectorization strategy given on command line. + SparseVectorizationStrategy vectorOption() { + switch (vectorization) { + default: + return SparseVectorizationStrategy::kNone; + case 1: + return SparseVectorizationStrategy::kDenseInnerLoop; + case 2: + return SparseVectorizationStrategy::kAnyStorageInnerLoop; + } + } + + /// Returns the requested integer type. + SparseIntType typeOption(int32_t option) { + switch (option) { + default: + return SparseIntType::kNative; + case 1: + return SparseIntType::kI64; + case 2: + return SparseIntType::kI32; + case 3: + return SparseIntType::kI16; + case 4: + return SparseIntType::kI8; + } + } + + void runOnOperation() override { + auto *ctx = &getContext(); + RewritePatternSet patterns(ctx); + // Translate strategy flags to strategy options. + SparsificationOptions options(parallelOption(), vectorOption(), + vectorLength, typeOption(ptrType), + typeOption(indType), fastOutput); + // Apply rewriting. + populateSparsificationPatterns(patterns, options); + vector::populateVectorToVectorCanonicalizationPatterns(patterns); + (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); + } +}; + +struct SparseTensorConversionPass + : public SparseTensorConversionBase { + void runOnOperation() override { + auto *ctx = &getContext(); + RewritePatternSet conversionPatterns(ctx); + ConversionTarget target(*ctx); + target + .addIllegalOp(); + target.addLegalOp(); + populateSparseTensorConversionPatterns(conversionPatterns); + if (failed(applyPartialConversion(getOperation(), target, + std::move(conversionPatterns)))) + signalPassFailure(); + } +}; + +} // end anonymous namespace + +std::unique_ptr mlir::createSparsificationPass() { + return std::make_unique(); +} + +std::unique_ptr mlir::createSparseTensorConversionPass() { + return std::make_unique(); +} diff --git a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp rename from mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp rename to mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -42,12 +42,14 @@ //===----------------------------------------------------------------------===// #include "mlir/Dialect/Linalg/IR/LinalgOps.h" -#include "mlir/Dialect/Linalg/Transforms/Transforms.h" #include "mlir/Dialect/Linalg/Utils/Utils.h" #include "mlir/Dialect/SCF/SCF.h" #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/Passes.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Dialect/Vector/VectorOps.h" #include "mlir/IR/Matchers.h" +#include "llvm/ADT/SmallBitVector.h" using namespace mlir; @@ -294,8 +296,7 @@ // Code generation. struct CodeGen { - CodeGen(linalg::SparsificationOptions o, unsigned numTensors, - unsigned numLoops) + CodeGen(mlir::SparsificationOptions o, unsigned numTensors, unsigned numLoops) : options(o), loops(numLoops), sizes(numLoops), buffers(numTensors), pointers(numTensors, std::vector(numLoops)), indices(numTensors, std::vector(numLoops)), @@ -304,7 +305,7 @@ idxs(numTensors, std::vector(numLoops)), redExp(-1u), redVal(), curVecLength(1), curVecMask() {} /// Sparsification options. - linalg::SparsificationOptions options; + mlir::SparsificationOptions options; /// Universal dense indices and upper bounds (by index). The loops array /// is updated with the value of the universal dense index in the current /// loop. The sizes array is set once with the inferred dimension sizes. @@ -506,17 +507,17 @@ } /// Maps sparse integer option to actual integral storage type. -static Type genIntType(PatternRewriter &rewriter, linalg::SparseIntType tp) { +static Type genIntType(PatternRewriter &rewriter, SparseIntType tp) { switch (tp) { - case linalg::SparseIntType::kNative: + case SparseIntType::kNative: return rewriter.getIndexType(); - case linalg::SparseIntType::kI64: + case SparseIntType::kI64: return rewriter.getIntegerType(64); - case linalg::SparseIntType::kI32: + case SparseIntType::kI32: return rewriter.getIntegerType(32); - case linalg::SparseIntType::kI16: + case SparseIntType::kI16: return rewriter.getIntegerType(16); - case linalg::SparseIntType::kI8: + case SparseIntType::kI8: return rewriter.getIntegerType(8); } llvm_unreachable("unexpected SparseIntType"); @@ -960,11 +961,11 @@ /// depends on the requested strategy. static bool isVectorFor(CodeGen &codegen, bool isInner, bool isSparse) { switch (codegen.options.vectorizationStrategy) { - case linalg::SparseVectorizationStrategy::kNone: + case SparseVectorizationStrategy::kNone: return false; - case linalg::SparseVectorizationStrategy::kDenseInnerLoop: + case SparseVectorizationStrategy::kDenseInnerLoop: return isInner && !isSparse; - case linalg::SparseVectorizationStrategy::kAnyStorageInnerLoop: + case SparseVectorizationStrategy::kAnyStorageInnerLoop: return isInner; } llvm_unreachable("unexpected vectorization strategy"); @@ -976,15 +977,15 @@ static bool isParallelFor(CodeGen &codegen, bool isOuter, bool isReduction, bool isSparse, bool isVector) { switch (codegen.options.parallelizationStrategy) { - case linalg::SparseParallelizationStrategy::kNone: + case SparseParallelizationStrategy::kNone: return false; - case linalg::SparseParallelizationStrategy::kDenseOuterLoop: + case SparseParallelizationStrategy::kDenseOuterLoop: return isOuter && !isSparse && !isReduction && !isVector; - case linalg::SparseParallelizationStrategy::kAnyStorageOuterLoop: + case SparseParallelizationStrategy::kAnyStorageOuterLoop: return isOuter && !isReduction && !isVector; - case linalg::SparseParallelizationStrategy::kDenseAnyLoop: + case SparseParallelizationStrategy::kDenseAnyLoop: return !isSparse && !isReduction && !isVector; - case linalg::SparseParallelizationStrategy::kAnyStorageAnyLoop: + case SparseParallelizationStrategy::kAnyStorageAnyLoop: return !isReduction && !isVector; } llvm_unreachable("unexpected parallelization strategy"); @@ -1355,7 +1356,7 @@ /// Sparse rewriting rule for generic Lingalg operation. struct GenericOpSparsifier : public OpRewritePattern { public: - GenericOpSparsifier(MLIRContext *context, linalg::SparsificationOptions o) + GenericOpSparsifier(MLIRContext *context, SparsificationOptions o) : OpRewritePattern(context), options(o) {} LogicalResult matchAndRewrite(linalg::GenericOp op, @@ -1398,14 +1399,14 @@ private: /// Options to control sparse code generation. - linalg::SparsificationOptions options; + SparsificationOptions options; }; } // namespace /// Populates the given patterns list with rewriting rules required for /// the sparsification of linear algebra operations. -void linalg::populateSparsificationPatterns( +void mlir::populateSparsificationPatterns( RewritePatternSet &patterns, const SparsificationOptions &options) { patterns.add(patterns.getContext(), options); } diff --git a/mlir/test/Dialect/SparseTensor/lowering.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir rename from mlir/test/Dialect/SparseTensor/lowering.mlir rename to mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/lowering.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt --test-sparsification="lower" %s | FileCheck %s +// RUN: mlir-opt --sparse-tensor-conversion %s | FileCheck %s !SparseTensor = type !llvm.ptr diff --git a/mlir/test/Dialect/Linalg/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir rename from mlir/test/Dialect/Linalg/sparse_1d.mlir rename to mlir/test/Dialect/SparseTensor/sparse_1d.mlir --- a/mlir/test/Dialect/Linalg/sparse_1d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir @@ -1,5 +1,5 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py -// RUN: mlir-opt %s -test-sparsification | FileCheck %s +// RUN: mlir-opt %s -sparsification | FileCheck %s #trait_d = { indexing_maps = [ diff --git a/mlir/test/Dialect/Linalg/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir rename from mlir/test/Dialect/Linalg/sparse_2d.mlir rename to mlir/test/Dialect/SparseTensor/sparse_2d.mlir --- a/mlir/test/Dialect/Linalg/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -1,5 +1,5 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py -// RUN: mlir-opt %s -test-sparsification | FileCheck %s +// RUN: mlir-opt %s -sparsification | FileCheck %s #trait_dd = { indexing_maps = [ diff --git a/mlir/test/Dialect/Linalg/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir rename from mlir/test/Dialect/Linalg/sparse_3d.mlir rename to mlir/test/Dialect/SparseTensor/sparse_3d.mlir --- a/mlir/test/Dialect/Linalg/sparse_3d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir @@ -1,5 +1,5 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py -// RUN: mlir-opt %s -test-sparsification | FileCheck %s +// RUN: mlir-opt %s -sparsification | FileCheck %s #trait_ddd = { indexing_maps = [ diff --git a/mlir/test/Dialect/Linalg/sparse_invalid.mlir b/mlir/test/Dialect/SparseTensor/sparse_invalid.mlir rename from mlir/test/Dialect/Linalg/sparse_invalid.mlir rename to mlir/test/Dialect/SparseTensor/sparse_invalid.mlir diff --git a/mlir/test/Dialect/Linalg/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir rename from mlir/test/Dialect/Linalg/sparse_lower.mlir rename to mlir/test/Dialect/SparseTensor/sparse_lower.mlir --- a/mlir/test/Dialect/Linalg/sparse_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir @@ -1,15 +1,18 @@ -// RUN: mlir-opt %s -test-sparsification | \ +// RUN: mlir-opt %s -sparsification | \ // RUN: FileCheck %s --check-prefix=CHECK-HIR // -// RUN: mlir-opt %s -test-sparsification="lower" --convert-linalg-to-loops | \ +// RUN: mlir-opt %s -sparsification \ +// RUN: --sparse-tensor-conversion --convert-linalg-to-loops | \ // RUN: FileCheck %s --check-prefix=CHECK-MIR // -// RUN: mlir-opt %s -test-sparsification="lower" --convert-linalg-to-loops \ +// RUN: mlir-opt %s -sparsification \ +// RUN: --sparse-tensor-conversion --convert-linalg-to-loops \ // RUN: --func-bufferize --tensor-constant-bufferize \ // RUN: --tensor-bufferize --finalizing-bufferize | \ // RUN: FileCheck %s --check-prefix=CHECK-LIR // -// RUN: mlir-opt %s -test-sparsification="lower fast-output" --convert-linalg-to-loops \ +// RUN: mlir-opt %s -sparsification="fast-output" \ +// RUN: --sparse-tensor-conversion --convert-linalg-to-loops \ // RUN: --func-bufferize --tensor-constant-bufferize \ // RUN: --tensor-bufferize --finalizing-bufferize | \ // RUN: FileCheck %s --check-prefix=CHECK-FAST diff --git a/mlir/test/Dialect/Linalg/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir rename from mlir/test/Dialect/Linalg/sparse_nd.mlir rename to mlir/test/Dialect/SparseTensor/sparse_nd.mlir --- a/mlir/test/Dialect/Linalg/sparse_nd.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir @@ -1,5 +1,5 @@ // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py -// RUN: mlir-opt %s -test-sparsification | FileCheck %s +// RUN: mlir-opt %s -sparsification | FileCheck %s // Example with cyclic iteration graph with sparse and dense constraints, // but an acyclic iteration graph using sparse constraints only. diff --git a/mlir/test/Dialect/Linalg/sparse_parallel.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir rename from mlir/test/Dialect/Linalg/sparse_parallel.mlir rename to mlir/test/Dialect/SparseTensor/sparse_parallel.mlir --- a/mlir/test/Dialect/Linalg/sparse_parallel.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_parallel.mlir @@ -1,12 +1,12 @@ -// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=0" | \ +// RUN: mlir-opt %s -sparsification="parallelization-strategy=0" | \ // RUN: FileCheck %s --check-prefix=CHECK-PAR0 -// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=1" | \ +// RUN: mlir-opt %s -sparsification="parallelization-strategy=1" | \ // RUN: FileCheck %s --check-prefix=CHECK-PAR1 -// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=2" | \ +// RUN: mlir-opt %s -sparsification="parallelization-strategy=2" | \ // RUN: FileCheck %s --check-prefix=CHECK-PAR2 -// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=3" | \ +// RUN: mlir-opt %s -sparsification="parallelization-strategy=3" | \ // RUN: FileCheck %s --check-prefix=CHECK-PAR3 -// RUN: mlir-opt %s -test-sparsification="parallelization-strategy=4" | \ +// RUN: mlir-opt %s -sparsification="parallelization-strategy=4" | \ // RUN: FileCheck %s --check-prefix=CHECK-PAR4 #trait_dd = { diff --git a/mlir/test/Dialect/Linalg/sparse_storage.mlir b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir rename from mlir/test/Dialect/Linalg/sparse_storage.mlir rename to mlir/test/Dialect/SparseTensor/sparse_storage.mlir --- a/mlir/test/Dialect/Linalg/sparse_storage.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_storage.mlir @@ -1,14 +1,14 @@ -// RUN: mlir-opt %s -test-sparsification="ptr-type=1 ind-type=1" | \ +// RUN: mlir-opt %s -sparsification="ptr-type=1 ind-type=1" | \ // RUN: FileCheck %s --check-prefix=CHECK-TYPE0 -// RUN: mlir-opt %s -test-sparsification="ptr-type=1 ind-type=2" | \ +// RUN: mlir-opt %s -sparsification="ptr-type=1 ind-type=2" | \ // RUN: FileCheck %s --check-prefix=CHECK-TYPE1 -// RUN: mlir-opt %s -test-sparsification="ptr-type=2 ind-type=1" | \ +// RUN: mlir-opt %s -sparsification="ptr-type=2 ind-type=1" | \ // RUN: FileCheck %s --check-prefix=CHECK-TYPE2 -// RUN: mlir-opt %s -test-sparsification="ptr-type=2 ind-type=2" | \ +// RUN: mlir-opt %s -sparsification="ptr-type=2 ind-type=2" | \ // RUN: FileCheck %s --check-prefix=CHECK-TYPE3 -// RUN: mlir-opt %s -test-sparsification="ptr-type=3 ind-type=3" | \ +// RUN: mlir-opt %s -sparsification="ptr-type=3 ind-type=3" | \ // RUN: FileCheck %s --check-prefix=CHECK-TYPE4 -// RUN: mlir-opt %s -test-sparsification="ptr-type=4 ind-type=4" | \ +// RUN: mlir-opt %s -sparsification="ptr-type=4 ind-type=4" | \ // RUN: FileCheck %s --check-prefix=CHECK-TYPE5 #trait_mul_1d = { diff --git a/mlir/test/Dialect/Linalg/sparse_vector.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir rename from mlir/test/Dialect/Linalg/sparse_vector.mlir rename to mlir/test/Dialect/SparseTensor/sparse_vector.mlir --- a/mlir/test/Dialect/Linalg/sparse_vector.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir @@ -1,10 +1,10 @@ -// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=0 ptr-type=2 ind-type=2 vl=16" | \ +// RUN: mlir-opt %s -sparsification="vectorization-strategy=0 ptr-type=2 ind-type=2 vl=16" | \ // RUN: FileCheck %s --check-prefix=CHECK-VEC0 -// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=1 ptr-type=2 ind-type=2 vl=16" | \ +// RUN: mlir-opt %s -sparsification="vectorization-strategy=1 ptr-type=2 ind-type=2 vl=16" | \ // RUN: FileCheck %s --check-prefix=CHECK-VEC1 -// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=2 ptr-type=2 ind-type=2 vl=16" | \ +// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 ptr-type=2 ind-type=2 vl=16" | \ // RUN: FileCheck %s --check-prefix=CHECK-VEC2 -// RUN: mlir-opt %s -test-sparsification="vectorization-strategy=2 ptr-type=0 ind-type=0 vl=16" | \ +// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 ptr-type=0 ind-type=0 vl=16" | \ // RUN: FileCheck %s --check-prefix=CHECK-VEC3 #trait_scale_d = { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s \ -// RUN: --test-sparsification="lower ptr-type=4 ind-type=4" \ +// RUN: --sparsification="ptr-type=4 ind-type=4" --sparse-tensor-conversion \ // RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \ // RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ @@ -11,7 +11,7 @@ // RUN: FileCheck %s // // RUN: mlir-opt %s \ -// RUN: --test-sparsification="lower vectorization-strategy=2 ptr-type=4 ind-type=4 vl=16" \ +// RUN: --sparsification="vectorization-strategy=2 ptr-type=4 ind-type=4 vl=16" --sparse-tensor-conversion \ // RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \ // RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s \ -// RUN: --test-sparsification="lower ptr-type=2 ind-type=2 fast-output" \ +// RUN: --sparsification="ptr-type=2 ind-type=2 fast-output" --sparse-tensor-conversion \ // RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \ // RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s \ -// RUN: --test-sparsification="lower" \ +// RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \ // RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ diff --git a/mlir/test/lib/Transforms/CMakeLists.txt b/mlir/test/lib/Transforms/CMakeLists.txt --- a/mlir/test/lib/Transforms/CMakeLists.txt +++ b/mlir/test/lib/Transforms/CMakeLists.txt @@ -34,7 +34,6 @@ TestMemRefDependenceCheck.cpp TestMemRefStrideCalculation.cpp TestSCFUtils.cpp - TestSparsification.cpp TestVectorTransforms.cpp EXCLUDE_FROM_LIBMLIR diff --git a/mlir/test/lib/Transforms/TestSparsification.cpp b/mlir/test/lib/Transforms/TestSparsification.cpp deleted file mode 100644 --- a/mlir/test/lib/Transforms/TestSparsification.cpp +++ /dev/null @@ -1,144 +0,0 @@ -//===- TestSparsification.cpp - Test sparsification of tensors ------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "mlir/Dialect/LLVMIR/LLVMDialect.h" -#include "mlir/Dialect/Linalg/Transforms/Transforms.h" -#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" -#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h" -#include "mlir/Dialect/Vector/VectorOps.h" -#include "mlir/Pass/Pass.h" -#include "mlir/Transforms/GreedyPatternRewriteDriver.h" - -using namespace mlir; - -namespace { - -struct TestSparsification - : public PassWrapper> { - - TestSparsification() = default; - TestSparsification(const TestSparsification &pass) {} - - Option parallelization{ - *this, "parallelization-strategy", - llvm::cl::desc("Set the parallelization strategy"), llvm::cl::init(0)}; - - Option vectorization{ - *this, "vectorization-strategy", - llvm::cl::desc("Set the vectorization strategy"), llvm::cl::init(0)}; - - Option vectorLength{ - *this, "vl", llvm::cl::desc("Set the vector length"), llvm::cl::init(1)}; - - Option ptrType{*this, "ptr-type", - llvm::cl::desc("Set the pointer type"), - llvm::cl::init(0)}; - - Option indType{*this, "ind-type", - llvm::cl::desc("Set the index type"), - llvm::cl::init(0)}; - - Option fastOutput{*this, "fast-output", - llvm::cl::desc("Allows fast output buffers"), - llvm::cl::init(false)}; - - Option lower{*this, "lower", llvm::cl::desc("Lower sparse primitives"), - llvm::cl::init(false)}; - - /// Registers all dialects required by testing. - void getDependentDialects(DialectRegistry ®istry) const override { - registry.insert(); - } - - /// Returns parallelization strategy given on command line. - linalg::SparseParallelizationStrategy parallelOption() { - switch (parallelization) { - default: - return linalg::SparseParallelizationStrategy::kNone; - case 1: - return linalg::SparseParallelizationStrategy::kDenseOuterLoop; - case 2: - return linalg::SparseParallelizationStrategy::kAnyStorageOuterLoop; - case 3: - return linalg::SparseParallelizationStrategy::kDenseAnyLoop; - case 4: - return linalg::SparseParallelizationStrategy::kAnyStorageAnyLoop; - } - } - - /// Returns vectorization strategy given on command line. - linalg::SparseVectorizationStrategy vectorOption() { - switch (vectorization) { - default: - return linalg::SparseVectorizationStrategy::kNone; - case 1: - return linalg::SparseVectorizationStrategy::kDenseInnerLoop; - case 2: - return linalg::SparseVectorizationStrategy::kAnyStorageInnerLoop; - } - } - - /// Returns the requested integer type. - linalg::SparseIntType typeOption(int32_t option) { - switch (option) { - default: - return linalg::SparseIntType::kNative; - case 1: - return linalg::SparseIntType::kI64; - case 2: - return linalg::SparseIntType::kI32; - case 3: - return linalg::SparseIntType::kI16; - case 4: - return linalg::SparseIntType::kI8; - } - } - - /// Runs the test on a function. - void runOnOperation() override { - auto *ctx = &getContext(); - RewritePatternSet patterns(ctx); - // Translate strategy flags to strategy options. - linalg::SparsificationOptions options(parallelOption(), vectorOption(), - vectorLength, typeOption(ptrType), - typeOption(indType), fastOutput); - // Apply rewriting. - linalg::populateSparsificationPatterns(patterns, options); - vector::populateVectorToVectorCanonicalizationPatterns(patterns); - (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); - // Lower sparse primitives to calls into runtime support library. - if (lower) { - RewritePatternSet conversionPatterns(ctx); - ConversionTarget target(*ctx); - target.addIllegalOp< - sparse_tensor::FromPointerOp, sparse_tensor::ToPointersOp, - sparse_tensor::ToIndicesOp, sparse_tensor::ToValuesOp>(); - target.addLegalOp(); - sparse_tensor::populateSparsificationConversionPatterns( - conversionPatterns); - if (failed(applyPartialConversion(getOperation(), target, - std::move(conversionPatterns)))) - signalPassFailure(); - } - } -}; - -} // end anonymous namespace - -namespace mlir { -namespace test { - -void registerTestSparsification() { - PassRegistration sparsificationPass( - "test-sparsification", "Test automatic generation of sparse tensor code"); -} - -} // namespace test -} // namespace mlir diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp --- a/mlir/tools/mlir-opt/mlir-opt.cpp +++ b/mlir/tools/mlir-opt/mlir-opt.cpp @@ -99,7 +99,6 @@ void registerTestPreparationPassWithAllowedMemrefResults(); void registerTestRecursiveTypesPass(); void registerTestSCFUtilsPass(); -void registerTestSparsification(); void registerTestVectorConversions(); } // namespace test } // namespace mlir @@ -177,7 +176,6 @@ test::registerTestPDLByteCodePass(); test::registerTestRecursiveTypesPass(); test::registerTestSCFUtilsPass(); - test::registerTestSparsification(); test::registerTestVectorConversions(); } #endif