diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt --- a/mlir/include/mlir/Dialect/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CMakeLists.txt @@ -17,6 +17,7 @@ add_subdirectory(Quant) add_subdirectory(SCF) add_subdirectory(Shape) +add_subdirectory(SparseTensor) add_subdirectory(SPIRV) add_subdirectory(StandardOps) add_subdirectory(Tensor) diff --git a/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt --- a/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt @@ -80,12 +80,6 @@ add_dependencies(MLIRLinalgStructuredOpsIncGen LinalgOdsGen) add_dependencies(mlir-headers MLIRLinalgStructuredOpsIncGen) -set(LLVM_TARGET_DEFINITIONS LinalgSparseOps.td) -mlir_tablegen(LinalgSparseOps.h.inc -gen-op-decls) -mlir_tablegen(LinalgSparseOps.cpp.inc -gen-op-defs) -add_public_tablegen_target(MLIRLinalgSparseOpsIncGen) -add_dependencies(mlir-headers MLIRLinalgSparseOpsIncGen) - set(LLVM_TARGET_DEFINITIONS LinalgInterfaces.td) mlir_tablegen(LinalgInterfaces.h.inc -gen-op-interface-decls) mlir_tablegen(LinalgInterfaces.cpp.inc -gen-op-interface-defs) diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h @@ -127,7 +127,4 @@ #define GET_OP_CLASSES #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.h.inc" -#define GET_OP_CLASSES -#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.h.inc" - #endif // MLIR_DIALECT_LINALG_LINALGOPS_H_ diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1129,9 +1129,6 @@ RewritePatternSet &patterns, const SparsificationOptions &options = SparsificationOptions()); -/// Sets up sparsification conversion rules with the given options. -void populateSparsificationConversionPatterns(RewritePatternSet &patterns); - } // namespace linalg } // namespace mlir diff --git a/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(IR) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt @@ -0,0 +1,2 @@ +add_mlir_dialect(SparseTensorOps sparse_tensor) +add_mlir_doc(SparseTensorOps SparseTensorOps Dialects/ -gen-dialect-doc) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h @@ -0,0 +1,23 @@ +//===- SparseTensor.h - Sparse tensor dialect -------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ +#define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Dialect.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/IR/OpImplementation.h" +#include "mlir/Interfaces/SideEffectInterfaces.h" + +#define GET_OP_CLASSES +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.h.inc" + +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOpsDialect.h.inc" + +#endif // MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td @@ -0,0 +1,29 @@ +//===- SparseTensorBase.td - Sparse tensor dialect base ----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SPARSETENSOR_BASE +#define SPARSETENSOR_BASE + +include "mlir/IR/OpBase.td" + +def SparseTensor_Dialect : Dialect { + let name = "sparse_tensor"; + let cppNamespace = "::mlir::sparse_tensor"; + let description = [{ + The `sparse tensor` dialect is intended to hold primitives that + form a bridge between high-level operations on sparse tensors + and lower-level operations on the actual sparse storage schemes + consisting of pointers, indices, and values. This bridge + simplifies a `sparse compiler` pass by postponing actual + code generation for the supported primitives to a later phase, + either by generating calls into a runtime support library + or by further lowering the primitives into actual code. + }]; +} + +#endif // SPARSETENSOR_BASE diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgSparseOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td rename from mlir/include/mlir/Dialect/Linalg/IR/LinalgSparseOps.td rename to mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgSparseOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -1,49 +1,27 @@ -//===- LinalgSparseOps.td - Linalg dialect sparse ops ------*- tablegen -*-===// +//===- SparseTensorOps.td - Sparse tensor dialect ops ------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// -// -// The following operations bootstrap working with sparse tensors solely -// within the Linalg dialect. They provide temporary bridges between a -// future SparseTensorType (now an opaque pointer), the actual TensorType, -// and MemRef arrays underlying an actual sparse storage scheme in memory. -// -// Lacking a proper sparse tensor type, the 'sparse_tensor' operation -// provides a bridge between an opaque pointer and a regular tensor type -// just to simplify feeding the value into a Linalg op. The operation -// simply disappears during lowering. -// -// The other operations form the bridge between the opaque pointer and -// the actual storage of pointers, indices, and values. These operations -// resemble 'buffer_cast' in the sense that they map tensors to -// their bufferized memrefs, but they lower into actual calls since -// sparse storage does not bufferize into a single memrefs, as dense -// tensors do, but into a hierarchical storage scheme where pointers -// access memrefs with indices and eventually into values. -// -// TODO: introduce SparseTensorType as first class citizen in MLIR -// -//===----------------------------------------------------------------------===// -#ifndef LINALG_SPARSE_OPS -#define LINALG_SPARSE_OPS +#ifndef SPARSETENSOR_OPS +#define SPARSETENSOR_OPS -include "mlir/Dialect/Linalg/IR/LinalgBase.td" +include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td" include "mlir/Interfaces/SideEffectInterfaces.td" // Base class. -class Linalg_SparseOp traits = []> - : Op { +class SparseTensor_Op traits = []> + : Op { let printer = [{ return ::print(p, *this); }]; let verifier = ?; let parser = [{ return ::parse$cppClass(parser, result); }]; } -def Linalg_SparseTensorFromPointerOp : - Linalg_SparseOp<"sparse_tensor">, +// TODO: remove me +def SparseTensor_FromPointerOp : SparseTensor_Op<"fromPtr">, Arguments<(ins AnyType:$ptr)>, Results<(outs AnyTensor:$result)> { let summary = "Views an opaque sparse tensor pointer as a tensor"; @@ -60,14 +38,13 @@ ```mlir !SparseTensor = type !llvm.ptr - %0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<64x64xf64> + %0 = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<64x64xf64> ``` }]; let assemblyFormat = "$ptr attr-dict `:` type($ptr) `to` type($result)"; } -def Linalg_SparseTensorToPointersMemRefOp : - Linalg_SparseOp<"sparse_pointers", [NoSideEffect]>, +def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>, Arguments<(ins AnyTensor:$tensor, Index:$dim)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extract pointers array at given dimension from a tensor"; @@ -83,15 +60,14 @@ Example: ```mlir - %1 = linalg.sparse_pointers %0, %c1 : tensor<64x64xf64> to memref + %1 = sparse_tensor.pointers %0, %c1 : tensor<64x64xf64> to memref ``` }]; let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)" " `to` type($result)"; } -def Linalg_SparseTensorToIndicesMemRefOp : - Linalg_SparseOp<"sparse_indices", [NoSideEffect]>, +def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [NoSideEffect]>, Arguments<(ins AnyTensor:$tensor, Index:$dim)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extract indices array at given dimension from a tensor"; @@ -107,15 +83,14 @@ Example: ```mlir - %1 = linalg.sparse_indices %0, %c1 : tensor<64x64xf64> to memref + %1 = sparse_tensor.indices %0, %c1 : tensor<64x64xf64> to memref ``` }]; let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)" " `to` type($result)"; } -def Linalg_SparseTensorToValuesMemRefOp : - Linalg_SparseOp<"sparse_values", [NoSideEffect]>, +def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [NoSideEffect]>, Arguments<(ins AnyTensor:$tensor)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extract numerical values array from a tensor"; @@ -131,10 +106,10 @@ Example: ```mlir - %1 = linalg.sparse_values %0 : tensor<64x64xf64> to memref + %1 = sparse_tensor.values %0 : tensor<64x64xf64> to memref ``` }]; let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)"; } -#endif // LINALG_SPARSE_OPS +#endif // SPARSETENSOR_OPS diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h @@ -0,0 +1,23 @@ +//===- Transforms.h - Sparse tensor transformations -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ + +#include "mlir/IR/PatternMatch.h" + +namespace mlir { +namespace sparse_tensor { + +/// Sets up sparsification conversion rules with the given options. +void populateSparsificationConversionPatterns(RewritePatternSet &patterns); + +} // namespace sparse_tensor +} // namespace mlir + +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -37,6 +37,7 @@ #include "mlir/Dialect/SDBM/SDBMDialect.h" #include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h" #include "mlir/Dialect/Shape/IR/Shape.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" @@ -74,6 +75,7 @@ ROCDL::ROCDLDialect, SDBMDialect, shape::ShapeDialect, + sparse_tensor::SparseTensorDialect, tensor::TensorDialect, tosa::TosaDialect, x86vector::X86VectorDialect>(); diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt --- a/mlir/lib/Dialect/CMakeLists.txt +++ b/mlir/lib/Dialect/CMakeLists.txt @@ -18,6 +18,7 @@ add_subdirectory(SCF) add_subdirectory(SDBM) add_subdirectory(Shape) +add_subdirectory(SparseTensor) add_subdirectory(SPIRV) add_subdirectory(StandardOps) add_subdirectory(Tensor) diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -2384,9 +2384,6 @@ #define GET_OP_CLASSES #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc" -#define GET_OP_CLASSES -#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc" - /// Return the dims that are `iteratorTypeName` loops in the LinalgOp `op`. /// Assumes `op` is a LinalgOp. void mlir::linalg::getDimsOfType(Operation *op, StringRef iteratorTypeName, diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp @@ -99,10 +99,6 @@ #define GET_OP_LIST #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc" >(); - addOperations< -#define GET_OP_LIST -#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc" - >(); // Fill the Linalg-specific OpName to RegionBuilder map. addNamedOpBuilders< diff --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt @@ -11,7 +11,6 @@ Interchange.cpp Loops.cpp Promotion.cpp - SparseLowering.cpp Sparsification.cpp Tiling.cpp Transforms.cpp @@ -38,6 +37,7 @@ MLIRSCF MLIRSCFTransforms MLIRPass + MLIRSparseTensor MLIRStandard MLIRStandardOpsTransforms MLIRStandardToLLVM diff --git a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp b/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp @@ -45,6 +45,7 @@ #include "mlir/Dialect/Linalg/Transforms/Transforms.h" #include "mlir/Dialect/Linalg/Utils/Utils.h" #include "mlir/Dialect/SCF/SCF.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Matchers.h" @@ -360,7 +361,7 @@ /// Returns true if tensor was set up with sparse storage scheme. static bool linkedSparse(linalg::GenericOp op, unsigned tensor) { if (tensor < op.getNumInputs()) - return isa_and_nonnull( + return isa_and_nonnull( op.getInput(tensor).getDefiningOp()); return false; } @@ -576,12 +577,10 @@ dynShape, genIntType(rewriter, codegen.options.indType)); Value dim = rewriter.create(loc, d); // Generate sparse primitives to obtains pointer and indices. - codegen.pointers[t][i] = - rewriter.create( - loc, ptrTp, tensor, dim); - codegen.indices[t][i] = - rewriter.create(loc, indTp, - tensor, dim); + codegen.pointers[t][i] = rewriter.create( + loc, ptrTp, tensor, dim); + codegen.indices[t][i] = rewriter.create( + loc, indTp, tensor, dim); } // Find lower and upper bound in current dimension. Value up; @@ -608,8 +607,7 @@ auto dynShape = {ShapedType::kDynamicSize}; auto sparseTp = MemRefType::get(dynShape, tensorType.getElementType()); codegen.buffers[t] = - rewriter.create(loc, sparseTp, - tensor); + rewriter.create(loc, sparseTp, tensor); } } } diff --git a/mlir/lib/Dialect/SparseTensor/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt @@ -0,0 +1,13 @@ +add_mlir_dialect_library(MLIRSparseTensor + SparseTensorDialect.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor + + DEPENDS + MLIRSparseTensorOpsIncGen + + LINK_LIBS PUBLIC + MLIRDialect + MLIRIR + ) diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -0,0 +1,25 @@ +//===- SparseTensorDialect.cpp - Sparse tensor dialect implementation -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" + +#include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" + +using namespace mlir; +using namespace mlir::sparse_tensor; + +void SparseTensorDialect::initialize() { + addOperations< +#define GET_OP_LIST +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc" + >(); +} + +#define GET_OP_CLASSES +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc" diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt @@ -0,0 +1,14 @@ +add_mlir_dialect_library(MLIRSparseTensorTransforms + SparseTensorLowering.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor + + LINK_LIBS PUBLIC + MLIRIR + MLIRLLVMIR + MLIRPass + MLIRStandard + MLIRSparseTensor + MLIRTransforms +) diff --git a/mlir/lib/Dialect/Linalg/Transforms/SparseLowering.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp rename from mlir/lib/Dialect/Linalg/Transforms/SparseLowering.cpp rename to mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/SparseLowering.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp @@ -1,14 +1,25 @@ -//===- SparseLowering.cpp - Lowers sparse primitives to library calls. ---===// +//===- SparseTensorLowering.cpp - Sparse tensor primitives lowering -------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// +// +// Lower sparse tensor primitives to calls into a runtime support library. +// Note that this is a current implementation choice to keep the lowering +// simple. In principle, these primitives could also be lowered to actual +// elaborate IR code that implements the primitives on the selected sparse +// tensor storage schemes. +// +//===----------------------------------------------------------------------===// #include "mlir/Dialect/LLVMIR/LLVMTypes.h" -#include "mlir/Dialect/Linalg/IR/LinalgOps.h" -#include "mlir/Dialect/Linalg/Transforms/Transforms.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Transforms/DialectConversion.h" using namespace mlir; @@ -32,11 +43,10 @@ /// Sparse conversion rule to remove opaque pointer cast. class TensorFromPointerConverter - : public OpConversionPattern { + : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorFromPointerOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::FromPointerOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOp(op, operands[0]); return success(); @@ -62,12 +72,11 @@ /// Sparse conversion rule for pointer accesses. class TensorToPointersConverter - : public OpConversionPattern { + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorToPointersMemRefOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::ToPointersOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Type resType = op.getType(); Type eltType = resType.cast().getElementType(); @@ -90,12 +99,11 @@ /// Sparse conversion rule for index accesses. class TensorToIndicesConverter - : public OpConversionPattern { + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorToIndicesMemRefOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::ToIndicesOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Type resType = op.getType(); Type eltType = resType.cast().getElementType(); @@ -118,12 +126,11 @@ /// Sparse conversion rule for value accesses. class TensorToValuesConverter - : public OpConversionPattern { + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorToValuesMemRefOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::ToValuesOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Type resType = op.getType(); Type eltType = resType.cast().getElementType(); @@ -150,7 +157,7 @@ /// Populates the given patterns list with conversion rules required for /// the sparsification of linear algebra operations. -void linalg::populateSparsificationConversionPatterns( +void sparse_tensor::populateSparsificationConversionPatterns( RewritePatternSet &patterns) { patterns.add to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32xf32>, memref<32xf32> @@ -148,9 +148,9 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_2:.*]] = constant 0 : index // CHECK: %[[VAL_3:.*]] = constant 1 : index -// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref -// CHECK: %[[VAL_5:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_8:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<32xf32>, memref<32xf32> @@ -189,9 +189,9 @@ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_9:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_8]], %[[VAL_9]]) : memref<32xf32>, memref<32xf32> @@ -320,9 +320,9 @@ // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 1 : index // CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32xf32> -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32> @@ -378,9 +378,9 @@ // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index // CHECK: %[[VAL_5:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32xf32> -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_10:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<32xf32>, memref<32xf32> @@ -430,9 +430,9 @@ // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> @@ -488,9 +488,9 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_10:.*]] = memref.alloc() : memref<32xf32> @@ -539,12 +539,12 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32> @@ -623,12 +623,12 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32> @@ -701,12 +701,12 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref<16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<16xf32> // CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32> @@ -794,12 +794,12 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref<16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<16xf32> // CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32> @@ -898,8 +898,8 @@ // CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK: %[[VAL_2:.*]] = constant 0 : index // CHECK: %[[VAL_3:.*]] = constant 1 : index -// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor to memref -// CHECK: %[[VAL_5:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_6:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_7:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_6]], %[[VAL_7]]) : memref, memref @@ -947,12 +947,12 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref, memref @@ -1062,13 +1062,13 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_2]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref, memref @@ -1189,13 +1189,13 @@ // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 1 : index // CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_0]] : memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_pointers %[[VAL_3]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_indices %[[VAL_3]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_values %[[VAL_3]] : tensor to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_3]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_3]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor to memref // CHECK: %[[VAL_16:.*]] = memref.dim %[[VAL_4]], %[[VAL_5]] : tensor // CHECK: %[[VAL_17:.*]] = memref.buffer_cast %[[VAL_4]] : memref // CHECK: %[[VAL_18:.*]] = memref.alloc(%[[VAL_16]]) : memref @@ -1371,15 +1371,15 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref, memref diff --git a/mlir/test/Dialect/Linalg/sparse_2d.mlir b/mlir/test/Dialect/Linalg/sparse_2d.mlir --- a/mlir/test/Dialect/Linalg/sparse_2d.mlir +++ b/mlir/test/Dialect/Linalg/sparse_2d.mlir @@ -110,9 +110,9 @@ // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16xf32> @@ -172,9 +172,9 @@ // CHECK: %[[VAL_3:.*]] = constant 32 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32x16xf32> @@ -229,9 +229,9 @@ // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16xf32> @@ -296,9 +296,9 @@ // CHECK: %[[VAL_3:.*]] = constant 16 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32x16xf32> @@ -354,11 +354,11 @@ // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16xf32> @@ -446,11 +446,11 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16xf32> @@ -505,16 +505,16 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -670,16 +670,16 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -782,16 +782,16 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -947,16 +947,16 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -1060,9 +1060,9 @@ // CHECK: %[[VAL_3:.*]] = constant 16 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16x32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<16xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<16xf32> @@ -1116,8 +1116,8 @@ // CHECK: %[[VAL_2:.*]] = constant 10 : index // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32> to memref // CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_8:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref, memref @@ -1166,9 +1166,9 @@ // CHECK-DAG: %[[VAL_3:.*]] = constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = constant 1 : index // CHECK-DAG: %[[VAL_2:.*]] = constant 2.000000e+00 : f64 -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_1]], %[[VAL_3]] : tensor // CHECK: %[[VAL_9:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : tensor // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref @@ -1224,11 +1224,11 @@ // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_12:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : tensor // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref @@ -1308,17 +1308,17 @@ // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref -// CHECK: %[[VAL_17:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_18:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_19:.*]] = linalg.sparse_values %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_18:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_20:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_21:.*]] = memref.buffer_cast %[[VAL_4]] : memref // CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_5]], %[[VAL_6]] : tensor diff --git a/mlir/test/Dialect/Linalg/sparse_3d.mlir b/mlir/test/Dialect/Linalg/sparse_3d.mlir --- a/mlir/test/Dialect/Linalg/sparse_3d.mlir +++ b/mlir/test/Dialect/Linalg/sparse_3d.mlir @@ -118,9 +118,9 @@ // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant true // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -186,9 +186,9 @@ // CHECK: %[[VAL_5:.*]] = constant 16 : index // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -248,9 +248,9 @@ // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -319,9 +319,9 @@ // CHECK: %[[VAL_4:.*]] = constant 8 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -382,11 +382,11 @@ // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 0 : index // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_16:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_17:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -479,11 +479,11 @@ // CHECK: %[[VAL_4:.*]] = constant 32 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -545,9 +545,9 @@ // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -621,9 +621,9 @@ // CHECK: %[[VAL_4:.*]] = constant 8 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -685,11 +685,11 @@ // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 0 : index // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_16:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_17:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -787,11 +787,11 @@ // CHECK: %[[VAL_4:.*]] = constant 16 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -854,11 +854,11 @@ // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -959,11 +959,11 @@ // CHECK: %[[VAL_3:.*]] = constant 8 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -1027,13 +1027,13 @@ // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 0 : index // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_17:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_18:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_19:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -1158,13 +1158,13 @@ // CHECK: %[[VAL_3:.*]] = constant 2 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -1229,9 +1229,9 @@ // CHECK: %[[VAL_4:.*]] = constant 2 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_10:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : tensor // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref @@ -1300,10 +1300,10 @@ // CHECK: %[[VAL_2:.*]] = constant 2 : index // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<10x20x30xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20x30xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<10x20x30xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_10:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref, memref diff --git a/mlir/test/Dialect/Linalg/sparse_lower.mlir b/mlir/test/Dialect/Linalg/sparse_lower.mlir --- a/mlir/test/Dialect/Linalg/sparse_lower.mlir +++ b/mlir/test/Dialect/Linalg/sparse_lower.mlir @@ -41,10 +41,10 @@ // CHECK-HIR: %[[VAL_3:.*]] = constant 64 : index // CHECK-HIR: %[[VAL_4:.*]] = constant 0 : index // CHECK-HIR: %[[VAL_5:.*]] = constant 1 : index -// CHECK-HIR: %[[VAL_6:.*]] = linalg.sparse_tensor %[[VAL_0]] : !llvm.ptr to tensor<64x64xf64> -// CHECK-HIR: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref -// CHECK-HIR: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref -// CHECK-HIR: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_6]] : tensor<64x64xf64> to memref +// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.fromPtr %[[VAL_0]] : !llvm.ptr to tensor<64x64xf64> +// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref +// CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref +// CHECK-HIR: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_6]] : tensor<64x64xf64> to memref // CHECK-HIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64> // CHECK-HIR: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<64xf64> // CHECK-HIR: %[[VAL_12:.*]] = memref.alloc() : memref<64xf64> @@ -168,7 +168,7 @@ !SparseTensor = type !llvm.ptr func @matvec(%argA: !SparseTensor, %argb: tensor<64xf64>, %argx: tensor<64xf64>) -> tensor<64xf64> { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<64x64xf64> + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<64x64xf64> %0 = linalg.generic #trait_matvec ins(%arga, %argb : tensor<64x64xf64>, tensor<64xf64>) outs(%argx: tensor<64xf64>) { diff --git a/mlir/test/Dialect/Linalg/sparse_nd.mlir b/mlir/test/Dialect/Linalg/sparse_nd.mlir --- a/mlir/test/Dialect/Linalg/sparse_nd.mlir +++ b/mlir/test/Dialect/Linalg/sparse_nd.mlir @@ -34,11 +34,11 @@ // CHECK: %[[VAL_11:.*]] = constant 0 : index // CHECK: %[[VAL_12:.*]] = constant 1 : index // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32> -// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_16:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_17:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_18:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32> to memref // CHECK: %[[VAL_19:.*]] = memref.buffer_cast %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32> // CHECK: %[[VAL_20:.*]] = memref.alloc() : memref<10x20x30x40x50x60x70x80xf32> // CHECK: linalg.copy(%[[VAL_19]], %[[VAL_20]]) : memref<10x20x30x40x50x60x70x80xf32>, memref<10x20x30x40x50x60x70x80xf32> diff --git a/mlir/test/Dialect/Linalg/sparse_vector.mlir b/mlir/test/Dialect/Linalg/sparse_vector.mlir --- a/mlir/test/Dialect/Linalg/sparse_vector.mlir +++ b/mlir/test/Dialect/Linalg/sparse_vector.mlir @@ -228,8 +228,8 @@ // !SparseTensor = type !llvm.ptr func @mul_s_alt(%argA: !SparseTensor, %argB: !SparseTensor, %argx: tensor<1024xf32>) -> tensor<1024xf32> { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<1024xf32> - %argb = linalg.sparse_tensor %argB : !SparseTensor to tensor<1024xf32> + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<1024xf32> + %argb = sparse_tensor.fromPtr %argB : !SparseTensor to tensor<1024xf32> %0 = linalg.generic #trait_mul_s ins(%arga, %argb: tensor<1024xf32>, tensor<1024xf32>) outs(%argx: tensor<1024xf32>) { diff --git a/mlir/test/Dialect/Linalg/sparse_lower_calls.mlir b/mlir/test/Dialect/SparseTensor/lowering.mlir rename from mlir/test/Dialect/Linalg/sparse_lower_calls.mlir rename to mlir/test/Dialect/SparseTensor/lowering.mlir --- a/mlir/test/Dialect/Linalg/sparse_lower_calls.mlir +++ b/mlir/test/Dialect/SparseTensor/lowering.mlir @@ -8,9 +8,9 @@ // CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_pointers(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_pointers %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.pointers %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -20,9 +20,9 @@ // CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_pointers32(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_pointers %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.pointers %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -32,9 +32,9 @@ // CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_indices(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_indices %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.indices %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -44,9 +44,9 @@ // CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_indices32(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_indices %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.indices %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -55,8 +55,8 @@ // CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesf64(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> - %0 = linalg.sparse_values %a : tensor<128xf64> to memref + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> + %0 = sparse_tensor.values %a : tensor<128xf64> to memref return %0 : memref } @@ -65,7 +65,7 @@ // CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesf32(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf32> - %0 = linalg.sparse_values %a : tensor<128xf32> to memref + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf32> + %0 = sparse_tensor.values %a : tensor<128xf32> to memref return %0 : memref } diff --git a/mlir/test/Dialect/Linalg/sparse_roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir rename from mlir/test/Dialect/Linalg/sparse_roundtrip.mlir rename to mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/Linalg/sparse_roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -4,10 +4,10 @@ // CHECK-LABEL: func @sparse_tensor( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -// CHECK: %[[T:.*]] = linalg.sparse_tensor %[[A]] : !llvm.ptr to tensor<128xf64> +// CHECK: %[[T:.*]] = sparse_tensor.fromPtr %[[A]] : !llvm.ptr to tensor<128xf64> // CHECK: return %[[T]] : tensor<128xf64> func @sparse_tensor(%arg0: !SparseTensor) -> tensor<128xf64> { - %0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %0 = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> return %0 : tensor<128xf64> } @@ -16,11 +16,11 @@ // CHECK-LABEL: func @sparse_pointers( // CHECK-SAME: %[[A:.*]]: tensor<128xf64>) // CHECK: %[[C:.*]] = constant 1 : index -// CHECK: %[[T:.*]] = linalg.sparse_pointers %[[A]], %[[C]] : tensor<128xf64> to memref +// CHECK: %[[T:.*]] = sparse_tensor.pointers %[[A]], %[[C]] : tensor<128xf64> to memref // CHECK: return %[[T]] : memref func @sparse_pointers(%arg0: tensor<128xf64>) -> memref { %c = constant 1 : index - %0 = linalg.sparse_pointers %arg0, %c : tensor<128xf64> to memref + %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64> to memref return %0 : memref } @@ -29,11 +29,11 @@ // CHECK-LABEL: func @sparse_indices( // CHECK-SAME: %[[A:.*]]: tensor<128xf64>) // CHECK: %[[C:.*]] = constant 1 : index -// CHECK: %[[T:.*]] = linalg.sparse_indices %[[A]], %[[C]] : tensor<128xf64> to memref +// CHECK: %[[T:.*]] = sparse_tensor.indices %[[A]], %[[C]] : tensor<128xf64> to memref // CHECK: return %[[T]] : memref func @sparse_indices(%arg0: tensor<128xf64>) -> memref { %c = constant 1 : index - %0 = linalg.sparse_indices %arg0, %c : tensor<128xf64> to memref + %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64> to memref return %0 : memref } @@ -41,9 +41,9 @@ // CHECK-LABEL: func @sparse_values( // CHECK-SAME: %[[A:.*]]: tensor<128xf64>) -// CHECK: %[[T:.*]] = linalg.sparse_values %[[A]] : tensor<128xf64> to memref +// CHECK: %[[T:.*]] = sparse_tensor.values %[[A]] : tensor<128xf64> to memref // CHECK: return %[[T]] : memref func @sparse_values(%arg0: tensor<128xf64>) -> memref { - %0 = linalg.sparse_values %arg0 : tensor<128xf64> to memref + %0 = sparse_tensor.values %arg0 : tensor<128xf64> to memref return %0 : memref } diff --git a/mlir/test/Integration/Sparse/CPU/frostt-example.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir rename from mlir/test/Integration/Sparse/CPU/frostt-example.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir diff --git a/mlir/test/Integration/Sparse/CPU/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg rename from mlir/test/Integration/Sparse/CPU/lit.local.cfg rename to mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg diff --git a/mlir/test/Integration/Sparse/CPU/matrix-market-example.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir rename from mlir/test/Integration/Sparse/CPU/matrix-market-example.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir diff --git a/mlir/test/Integration/Sparse/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir rename from mlir/test/Integration/Sparse/CPU/sparse_matvec.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Sparse/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -56,7 +56,7 @@ func @kernel_matvec(%argA: !SparseTensor, %argb: tensor, %argx: tensor) -> tensor { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor %0 = linalg.generic #matvec ins(%arga, %argb: tensor, tensor) outs(%argx: tensor) { diff --git a/mlir/test/Integration/Sparse/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir rename from mlir/test/Integration/Sparse/CPU/sparse_sampled_matmul.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Sparse/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -47,7 +47,7 @@ %arga: tensor, %argb: tensor, %argx: tensor) -> tensor { - %args = linalg.sparse_tensor %argS : !SparseTensor to tensor + %args = sparse_tensor.fromPtr %argS : !SparseTensor to tensor %0 = linalg.generic #trait_sampled_dense_dense ins(%args, %arga, %argb: tensor, tensor, tensor) outs(%argx: tensor) { diff --git a/mlir/test/Integration/Sparse/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir rename from mlir/test/Integration/Sparse/CPU/sparse_sum.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Sparse/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -41,7 +41,7 @@ // func @kernel_sum_reduce(%argA: !SparseTensor, %argx: tensor) -> tensor { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor %0 = linalg.generic #trait_sum_reduce ins(%arga: tensor) outs(%argx: tensor) { diff --git a/mlir/test/lib/Transforms/CMakeLists.txt b/mlir/test/lib/Transforms/CMakeLists.txt --- a/mlir/test/lib/Transforms/CMakeLists.txt +++ b/mlir/test/lib/Transforms/CMakeLists.txt @@ -64,6 +64,8 @@ MLIRSCF MLIRSCFTransforms MLIRStandardOpsTransforms + MLIRSparseTensor + MLIRSparseTensorTransforms MLIRTargetLLVMIRExport MLIRTestDialect MLIRTransformUtils diff --git a/mlir/test/lib/Transforms/TestSparsification.cpp b/mlir/test/lib/Transforms/TestSparsification.cpp --- a/mlir/test/lib/Transforms/TestSparsification.cpp +++ b/mlir/test/lib/Transforms/TestSparsification.cpp @@ -8,6 +8,8 @@ #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/Linalg/Transforms/Transforms.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h" #include "mlir/Dialect/Vector/VectorOps.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" @@ -51,7 +53,8 @@ /// Registers all dialects required by testing. void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); + sparse_tensor::SparseTensorDialect, vector::VectorDialect, + LLVM::LLVMDialect>(); } /// Returns parallelization strategy given on command line. @@ -114,12 +117,12 @@ if (lower) { RewritePatternSet conversionPatterns(ctx); ConversionTarget target(*ctx); - target.addIllegalOp(); + target.addIllegalOp< + sparse_tensor::FromPointerOp, sparse_tensor::ToPointersOp, + sparse_tensor::ToIndicesOp, sparse_tensor::ToValuesOp>(); target.addLegalOp(); - linalg::populateSparsificationConversionPatterns(conversionPatterns); + sparse_tensor::populateSparsificationConversionPatterns( + conversionPatterns); if (failed(applyPartialConversion(getOperation(), target, std::move(conversionPatterns)))) signalPassFailure(); diff --git a/mlir/test/mlir-opt/commandline.mlir b/mlir/test/mlir-opt/commandline.mlir --- a/mlir/test/mlir-opt/commandline.mlir +++ b/mlir/test/mlir-opt/commandline.mlir @@ -22,6 +22,7 @@ // CHECK-NEXT: scf // CHECK-NEXT: sdbm // CHECK-NEXT: shape +// CHECK-NEXT: sparse // CHECK-NEXT: spv // CHECK-NEXT: std // CHECK-NEXT: tensor