diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt --- a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt @@ -4,4 +4,12 @@ set(LLVM_TARGET_DEFINITIONS SparseTensorAttrDefs.td) mlir_tablegen(SparseTensorAttrDefs.h.inc -gen-attrdef-decls) mlir_tablegen(SparseTensorAttrDefs.cpp.inc -gen-attrdef-defs) +mlir_tablegen(SparseTensorAttrEnums.h.inc -gen-enum-decls) +mlir_tablegen(SparseTensorAttrEnums.cpp.inc -gen-enum-defs) add_public_tablegen_target(MLIRSparseTensorAttrDefsIncGen) + +set(LLVM_TARGET_DEFINITIONS SparseTensorTypes.td) +mlir_tablegen(SparseTensorTypes.h.inc -gen-typedef-decls) +mlir_tablegen(SparseTensorTypes.cpp.inc -gen-typedef-defs) +add_public_tablegen_target(MLIRSparseTensorTypesIncGen) + diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h @@ -18,8 +18,16 @@ #include "mlir/Interfaces/InferTypeOpInterface.h" #include "mlir/Interfaces/SideEffectInterfaces.h" +// We must include Enums.h.inc before AttrDefs.h.inc due to dependency between +// StorageSpecifierKindAttr and StorageSpeciferKind Enum. +// clang-format off #define GET_ATTRDEF_CLASSES +#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrEnums.h.inc" #include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.h.inc" +// clang-format on + +#define GET_TYPEDEF_CLASSES +#include "mlir/Dialect/SparseTensor/IR/SparseTensorTypes.h.inc" #define GET_OP_CLASSES #include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.h.inc" diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -10,6 +10,7 @@ #define SPARSETENSOR_ATTRDEFS include "mlir/IR/AttrTypeBase.td" +include "mlir/IR/EnumAttr.td" include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td" include "mlir/IR/TensorEncoding.td" @@ -170,6 +171,25 @@ let hasCustomAssemblyFormat = 1; } +// The C++ enum for Metadata kind +def SparseTensorStorageSpecifierKindEnum + : I32EnumAttr<"StorageSpecifierKind", "sparse tensor storage specifier kind", [ + I32EnumAttrCase<"DimSize", 0, "dim_sz">, + I32EnumAttrCase<"PtrMemSize", 1, "ptr_mem_sz">, + I32EnumAttrCase<"IdxMemSize", 2, "idx_mem_sz">, + I32EnumAttrCase<"ValMemSize", 3, "val_mem_sz">, + ]> { + let genSpecializedAttr = 0; + let cppNamespace = SparseTensor_Dialect.cppNamespace; +} + +// Define the enum StorageSpecifier kind attribute. +def SparseTensorStorageSpecifierKindAttr + : EnumAttr { + let mnemonic = "kind"; +} + def IsSparseTensorPred : CPred<"!!::mlir::sparse_tensor::getSparseTensorEncoding($_self)">; diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td @@ -82,6 +82,7 @@ }]; let useDefaultAttributePrinterParser = 1; + let useDefaultTypePrinterParser = 1; } #endif // SPARSETENSOR_BASE diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -11,6 +11,7 @@ include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td" include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td" +include "mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td" include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/Interfaces/SideEffectInterfaces.td" @@ -176,6 +177,78 @@ let hasVerifier = 1; } +def SparseTensor_ToStorageSpecifierOp : SparseTensor_Op<"storage_specifier", [Pure]>, + Arguments<(ins AnySparseTensor:$tensor)>, + Results<(outs SparseTensorStorageSpecifier:$result)> { + let summary = ""; + let description = [{ + Returns the storage specifier value for the given sparse tensor. + A storage specifier value holds the sizes for tensor dimensions, pointer arrays, + index arrays and the value array. + + Example: + + ```mlir + %0 = sparse_tensor.storage_specifier %arg0 : tensor to + !sparse_tensor.storage_specifier<#CSR> + ``` + }]; + + let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` qualified(type($result))"; + let hasVerifier = 1; +} + +def SparseTensor_GetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.get", [Pure]>, + Arguments<(ins SparseTensorStorageSpecifier:$specifier, + SparseTensorStorageSpecifierKindAttr:$specifierKind, + OptionalAttr:$dim)>, + Results<(outs AnyType:$result)> { + let summary = ""; + let description = [{ + Returns the requested field of the given storage_specifier. + + Example: + + To query the size of the index array for level 0, one can use + + ```mlir + %0 = sparse_tensor.storage_specifier.get %arg0 idx_mem_sz at 0 + : !sparse_tensor.storage_specifier<#COO> to i64 + ``` + }]; + + let assemblyFormat = "$specifier $specifierKind (`at` $dim^)? attr-dict `:` " + "qualified(type($specifier)) `to` type($result)"; + let hasVerifier = 1; +} + +def SparseTensor_SetStorageSpecifierOp : SparseTensor_Op<"storage_specifier.set", + [Pure, AllTypesMatch<["result", "specifier"]>]>, + Arguments<(ins SparseTensorStorageSpecifier:$specifier, + SparseTensorStorageSpecifierKindAttr:$specifierKind, + OptionalAttr:$dim, + AnyType:$value)>, + Results<(outs SparseTensorStorageSpecifier:$result)> { + let summary = ""; + let description = [{ + Set the field of the storage specifier to the given input value. Returns + the updated storage_specifier as a new SSA value. + + Example: + + To update the sizes of the index array for level 0, one can use + + ```mlir + %0 = sparse_tensor.storage_specifier.set %arg0 idx_mem_sz at 0 with %new_sz + : i32, !sparse_tensor.storage_specifier<#COO> + + ``` + }]; + let assemblyFormat = "$specifier $specifierKind (`at` $dim^)? `with` $value attr-dict `:` " + "type($value) `,` qualified(type($result))"; + let hasVerifier = 1; +} + def SparseTensor_NumberOfEntriesOp : SparseTensor_Op<"number_of_entries", [Pure]>, Arguments<(ins AnySparseTensor:$tensor)>, Results<(outs Index:$result)> { diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.h new file mode 100644 diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td new file mode 100644 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td @@ -0,0 +1,73 @@ +//===- SparseTensorOps.td - Sparse tensor dialect ops ------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#ifndef SPARSETENSOR_TYPES +#define SPARSETENSOR_TYPES + +include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td" +include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td" + +//===----------------------------------------------------------------------===// +// Base class. +//===----------------------------------------------------------------------===// + +// Base class for Builtin dialect types. +class SparseTensor_Type traits = [], + string baseCppClass = "::mlir::Type"> + : TypeDef {} + +//===----------------------------------------------------------------------===// +// Sparse Tensor Types. +//===----------------------------------------------------------------------===// + +def SparseTensor_StorageSpecifier : SparseTensor_Type<"StorageSpecifier"> { + let mnemonic = "storage_specifier"; + + let summary = ""; + let description = [{ + Syntax: + + ``` + ``` + + Examples: + + ```mlir + ``` + }]; + let parameters = (ins SparseTensorEncodingAttr : $encoding); + let builders = [ + TypeBuilderWithInferredContext<(ins "SparseTensorEncodingAttr":$encoding), [{ + assert(encoding && "sparse tensor encoding should not be null"); + return $_get(encoding.getContext(), encoding); + }]>, + TypeBuilderWithInferredContext<(ins "Type":$type), [{ + return get(getSparseTensorEncoding(type)); + }]>, + TypeBuilderWithInferredContext<(ins "Value":$tensor), [{ + return get(tensor.getType()); + }]> + ]; + + let extraClassDeclaration = [{ + // Get the integer type used to store memory and dimension sizes. + IntegerType getSizesType() const; + Type getFieldType(StorageSpecifierKind kind, Optional dim) const; + Type getFieldType(StorageSpecifierKind kind, Optional dim) const; + }]; + + let assemblyFormat="`<` qualified($encoding) `>`"; +} + +def IsSparseTensorStorageSpecifierTypePred + : CPred<"$_self.isa<::mlir::sparse_tensor::StorageSpecifierType>()">; + +def SparseTensorStorageSpecifier + : Type()">, "metadata", + "::mlir::sparse_tensor::StorageSpecifierType">; + +#endif // SPARSETENSOR_TYPES \ No newline at end of file diff --git a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt --- a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt @@ -59,6 +59,7 @@ DEPENDS MLIRSparseTensorAttrDefsIncGen MLIRSparseTensorOpsIncGen + MLIRSparseTensorTypesIncGen LINK_LIBS PUBLIC MLIRArithDialect diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -27,6 +27,7 @@ #define GET_ATTRDEF_CLASSES #include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc" +#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrEnums.cpp.inc" static bool acceptBitWidth(unsigned bitWidth) { switch (bitWidth) { @@ -273,6 +274,8 @@ mlir::sparse_tensor::getSparseTensorEncoding(Type type) { if (auto ttp = type.dyn_cast()) return ttp.getEncoding().dyn_cast_or_null(); + if (auto mdtp = type.dyn_cast()) + return mdtp.getEncoding(); return nullptr; } @@ -332,7 +335,41 @@ } //===----------------------------------------------------------------------===// -// TensorDialect Operations. +// SparseTensorDialect Types. +//===----------------------------------------------------------------------===// + +#define GET_TYPEDEF_CLASSES +#include "mlir/Dialect/SparseTensor/IR/SparseTensorTypes.cpp.inc" + +IntegerType StorageSpecifierType::getSizesType() const { + unsigned idxBitWidth = + getEncoding().getIndexBitWidth() ? getEncoding().getIndexBitWidth() : 64u; + unsigned ptrBitWidth = + getEncoding().getIndexBitWidth() ? getEncoding().getIndexBitWidth() : 64u; + + return IntegerType::get(getContext(), std::max(idxBitWidth, ptrBitWidth)); +} + +Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind, + Optional dim) const { + if (kind != StorageSpecifierKind::ValMemSize) + assert(dim); + + // Right now, we store every sizes metadata using the same size type. + // TODO: the field size type can be defined dimensional wise after sparse + // tensor encoding supports per dimension index/pointer bitwidth. + return getSizesType(); +} + +Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind, + Optional dim) const { + Optional intDim = std::nullopt; + if (dim) + intDim = dim.value().getZExtValue(); + return getFieldType(kind, intDim); +} +//===----------------------------------------------------------------------===// +// SparseTensorDialect Operations. //===----------------------------------------------------------------------===// static LogicalResult isInBounds(uint64_t dim, Value tensor) { @@ -349,6 +386,34 @@ return failure(); } +static LogicalResult +verifySparsifierGetterSetter(StorageSpecifierKind mdKind, Optional dim, + TypedValue md, + Operation *op) { + if (mdKind == StorageSpecifierKind::ValMemSize && dim) { + return op->emitError( + "redundant dimension argument for querying value memory size"); + } + + auto enc = md.getType().getEncoding(); + ArrayRef dlts = enc.getDimLevelType(); + unsigned rank = dlts.size(); + + if (mdKind != StorageSpecifierKind::ValMemSize) { + if (!dim) + return op->emitError("missing dimension argument"); + + unsigned d = dim.value().getZExtValue(); + if (d >= rank) + return op->emitError("requested dimension out of bound"); + + if (mdKind == StorageSpecifierKind::PtrMemSize && isSingletonDLT(dlts[d])) + return op->emitError( + "requested pointer memory size on a singleton level"); + } + return success(); +} + LogicalResult NewOp::verify() { if (getExpandSymmetry() && getResult().getType().cast().getRank() != 2) @@ -412,6 +477,45 @@ return success(); } +LogicalResult ToStorageSpecifierOp::verify() { + if (getSparseTensorEncoding(getTensor().getType()) != + getSparseTensorEncoding(getResult().getType())) { + return emitError( + "the metadata encoding mismatches the input tensor encoding"); + } + return success(); +} + +LogicalResult GetStorageSpecifierOp::verify() { + if (failed(verifySparsifierGetterSetter(getSpecifierKind(), getDim(), + getSpecifier(), getOperation()))) { + return failure(); + } + + // Checks the result type + if (getSpecifier().getType().getFieldType(getSpecifierKind(), getDim()) != + getResult().getType()) { + return emitError( + "type mismatch between requested specifier field and result value"); + } + return success(); +} + +LogicalResult SetStorageSpecifierOp::verify() { + if (failed(verifySparsifierGetterSetter(getSpecifierKind(), getDim(), + getSpecifier(), getOperation()))) { + return failure(); + } + + // Checks the input type + if (getSpecifier().getType().getFieldType(getSpecifierKind(), getDim()) != + getValue().getType()) { + return emitError( + "type mismatch between requested specifier field and input value"); + } + return success(); +} + //===----------------------------------------------------------------------===// // TensorDialect Linalg.Generic Operations. //===----------------------------------------------------------------------===// @@ -801,6 +905,10 @@ #define GET_ATTRDEF_LIST #include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc" >(); + addTypes< +#define GET_TYPEDEF_LIST +#include "mlir/Dialect/SparseTensor/IR/SparseTensorTypes.cpp.inc" + >(); addOperations< #define GET_OP_LIST #include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc" diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -98,6 +98,86 @@ // ----- +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}> + +func.func @sparse_metadata(%arg0: tensor<128xf64, #SparseVector>) -> !sparse_tensor.storage_specifier<#DenseVector> { + // expected-error@+1 {{the metadata encoding mismatches the input tensor encoding}} + %0 = sparse_tensor.storage_specifier %arg0 : tensor<128xf64, #SparseVector> to + !sparse_tensor.storage_specifier<#DenseVector> + return %0 : !sparse_tensor.storage_specifier<#DenseVector> +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> i64 { + // expected-error@+1 {{redundant dimension argument for querying value memory size}} + %0 = sparse_tensor.storage_specifier.get %arg0 val_mem_sz at 0 + : !sparse_tensor.storage_specifier<#SparseVector> to i64 + return %0 : i64 +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> i64 { + // expected-error@+1 {{missing dimension argument}} + %0 = sparse_tensor.storage_specifier.get %arg0 idx_mem_sz + : !sparse_tensor.storage_specifier<#SparseVector> to i64 + return %0 : i64 +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> i64 { + // expected-error@+1 {{requested dimension out of bound}} + %0 = sparse_tensor.storage_specifier.get %arg0 dim_sz at 1 + : !sparse_tensor.storage_specifier<#SparseVector> to i64 + return %0 : i64 +} + +// ----- + +#COO = #sparse_tensor.encoding<{dimLevelType = ["compressed-nu", "singleton"]}> + +func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> i64 { + // expected-error@+1 {{requested pointer memory size on a singleton level}} + %0 = sparse_tensor.storage_specifier.get %arg0 ptr_mem_sz at 1 + : !sparse_tensor.storage_specifier<#COO> to i64 + return %0 : i64 +} + +// ----- + +#COO = #sparse_tensor.encoding<{dimLevelType = ["compressed-nu", "singleton"]}> + +func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#COO>) -> i64 { + // expected-error@+1 {{type mismatch between requested }} + %0 = sparse_tensor.storage_specifier.get %arg0 ptr_mem_sz at 0 + : !sparse_tensor.storage_specifier<#COO> to i32 + return %0 : i32 +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +func.func @sparse_set_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, + %arg1: i32) + -> !sparse_tensor.storage_specifier<#SparseVector> { + // expected-error@+1 {{type mismatch between requested }} + %0 = sparse_tensor.storage_specifier.set %arg0 dim_sz at 0 with %arg1 + : i32, !sparse_tensor.storage_specifier<#SparseVector> + return %0 : !sparse_tensor.storage_specifier<#SparseVector> +} + +// ----- + func.func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> { // expected-error@+1 {{'sparse_tensor.load' op operand #0 must be sparse tensor of any type values, but got 'tensor<16x32xf64>'}} %0 = sparse_tensor.load %arg0 : tensor<16x32xf64> diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -106,6 +106,50 @@ #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> +// CHECK-LABEL: func @sparse_metadata( +// CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) +// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier %[[A]] : tensor<128xf64, #{{.*}}> to !sparse_tensor.storage_specifier<#{{.*}}> +// CHECK: return %[[T]] : !sparse_tensor.storage_specifier<#{{.*}}> +func.func @sparse_metadata(%arg0: tensor<128xf64, #SparseVector>) -> !sparse_tensor.storage_specifier<#SparseVector> { + %0 = sparse_tensor.storage_specifier %arg0 : tensor<128xf64, #SparseVector> to + !sparse_tensor.storage_specifier<#SparseVector> + return %0 : !sparse_tensor.storage_specifier<#SparseVector> +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +// CHECK-LABEL: func @sparse_get_md( +// CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}> +// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.get %[[A]] dim_sz at 0 +// CHECK: return %[[T]] : i64 +func.func @sparse_get_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>) -> i64 { + %0 = sparse_tensor.storage_specifier.get %arg0 dim_sz at 0 + : !sparse_tensor.storage_specifier<#SparseVector> to i64 + return %0 : i64 +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +// CHECK-LABEL: func @sparse_set_md( +// CHECK-SAME: %[[A:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>, +// CHECK-SAME: %[[I:.*]]: i64) +// CHECK: %[[T:.*]] = sparse_tensor.storage_specifier.set %[[A]] dim_sz at 0 with %[[I]] +// CHECK: return %[[T]] : !sparse_tensor.storage_specifier<#{{.*}}> +func.func @sparse_set_md(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: i64) + -> !sparse_tensor.storage_specifier<#SparseVector> { + %0 = sparse_tensor.storage_specifier.set %arg0 dim_sz at 0 with %arg1 + : i64, !sparse_tensor.storage_specifier<#SparseVector> + return %0 : !sparse_tensor.storage_specifier<#SparseVector> +} + +// ----- + +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + // CHECK-LABEL: func @sparse_noe( // CHECK-SAME: %[[A:.*]]: tensor<128xf64, #{{.*}}>) // CHECK: %[[T:.*]] = sparse_tensor.number_of_entries %[[A]] : tensor<128xf64, #{{.*}}> @@ -444,7 +488,7 @@ return } -// ---- +// ----- // CHECK-LABEL: func @sparse_sort_1d0v( // CHECK-SAME: %[[A:.*]]: index, diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -2064,6 +2064,7 @@ "include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td", "include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td", "include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td", + "include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td", ], includes = ["include"], deps = [ @@ -2085,6 +2086,14 @@ ["--gen-attrdef-defs"], "include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc", ), + ( + ["--gen-enum-decls"], + "include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrEnums.h.inc", + ), + ( + ["--gen-enum-defs"], + "include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrEnums.cpp.inc", + ), ], tblgen = ":mlir-tblgen", td_file = "include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td", @@ -2127,6 +2136,24 @@ deps = [":SparseTensorTdFiles"], ) +gentbl_cc_library( + name = "SparseTensorTypesIncGen", + strip_include_prefix = "include", + tbl_outs = [ + ( + ["--gen-typedef-decls"], + "include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.h.inc", + ), + ( + ["--gen-typedef-defs"], + "include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.cpp.inc", + ), + ], + tblgen = ":mlir-tblgen", + td_file = "include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td", + deps = [":SparseTensorTdFiles"], +) + gentbl_cc_library( name = "SparseTensorPassIncGen", strip_include_prefix = "include", @@ -2179,6 +2206,7 @@ ":SparseTensorAttrDefsIncGen", ":SparseTensorEnums", ":SparseTensorOpsIncGen", + ":SparseTensorTypesIncGen", "//llvm:Support", ], )