diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -41,6 +41,19 @@ } } +/// Returns internal dimension level type encoding. +static unsigned +getDimLevelTypeEncoding(SparseTensorEncodingAttr::DimLevelType dlt) { + switch (dlt) { + case SparseTensorEncodingAttr::DimLevelType::Dense: + return 0; + case SparseTensorEncodingAttr::DimLevelType::Compressed: + return 1; + case SparseTensorEncodingAttr::DimLevelType::Singleton: + return 2; + } +} + /// Returns function reference (first hit also inserts into module). static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type result, ValueRange operands) { @@ -107,12 +120,12 @@ // Sparsity annotations in tensor constant form. Note that we cast // the static shape into a dynamic shape to ensure that the method // signature remains uniform accross different tensor dimensions. - SmallVector attrs; + SmallVector attrs; unsigned sz = enc.getDimLevelType().size(); for (unsigned i = 0; i < sz; i++) - attrs.push_back(enc.getDimLevelType()[i] == - SparseTensorEncodingAttr::DimLevelType::Compressed); - Type etp = rewriter.getIntegerType(1); + attrs.push_back( + APInt(8, getDimLevelTypeEncoding(enc.getDimLevelType()[i]))); + Type etp = rewriter.getIntegerType(8); RankedTensorType tt1 = RankedTensorType::get({sz}, etp); RankedTensorType tt2 = RankedTensorType::get({ShapedType::kDynamicSize}, etp); diff --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp @@ -112,6 +112,8 @@ /// function overloading to implement "partial" method specialization. class SparseTensorStorageBase { public: + enum DimLevelType : uint8_t { kDense = 0, kCompressed = 1, kSingleton = 2 }; + virtual uint64_t getDimSize(uint64_t) = 0; // Overhead storage. @@ -152,7 +154,7 @@ public: /// Constructs sparse tensor storage scheme following the given /// per-rank dimension dense/sparse annotations. - SparseTensorStorage(SparseTensor *tensor, bool *sparsity) + SparseTensorStorage(SparseTensor *tensor, uint8_t *sparsity) : sizes(tensor->getSizes()), pointers(getRank()), indices(getRank()) { // Provide hints on capacity. // TODO: needs fine-tuning based on sparsity @@ -160,10 +162,12 @@ values.reserve(nnz); for (uint64_t d = 0, s = 1, rank = getRank(); d < rank; d++) { s *= sizes[d]; - if (sparsity[d]) { + if (sparsity[d] == kCompressed) { pointers[d].reserve(s + 1); indices[d].reserve(s); s = 1; + } else { + assert(sparsity[d] == kDense && "singleton not yet supported"); } } // Then setup the tensor. @@ -190,8 +194,8 @@ /// representation of an external sparse tensor. This method prepares /// the pointers and indices arrays under the given per-rank dimension /// dense/sparse annotations. - void traverse(SparseTensor *tensor, bool *sparsity, uint64_t lo, uint64_t hi, - uint64_t d) { + void traverse(SparseTensor *tensor, uint8_t *sparsity, uint64_t lo, + uint64_t hi, uint64_t d) { const std::vector &elements = tensor->getElements(); // Once dimensions are exhausted, insert the numerical values. if (d == getRank()) { @@ -199,7 +203,7 @@ return; } // Prepare a sparse pointer structure at this dimension. - if (sparsity[d] && pointers[d].empty()) + if (sparsity[d] == kCompressed && pointers[d].empty()) pointers[d].push_back(0); // Visit all elements in this interval. uint64_t full = 0; @@ -210,7 +214,7 @@ while (seg < hi && elements[seg].indices[d] == idx) seg++; // Handle segment in interval for sparse or dense dimension. - if (sparsity[d]) { + if (sparsity[d] == kCompressed) { indices[d].push_back(idx); } else { for (; full < idx; full++) @@ -222,7 +226,7 @@ lo = seg; } // Finalize the sparse pointer structure at this dimension. - if (sparsity[d]) { + if (sparsity[d] == kCompressed) { pointers[d].push_back(indices[d].size()); } else { for (uint64_t sz = tensor->getSizes()[d]; full < sz; full++) @@ -239,7 +243,7 @@ /// Templated reader. template -void *newSparseTensor(char *filename, bool *sparsity, uint64_t size) { +void *newSparseTensor(char *filename, uint8_t *sparsity, uint64_t size) { uint64_t idata[64]; SparseTensor *t = static_cast(openTensorC(filename, idata)); assert(size == t->getRank()); // sparsity array must match rank @@ -509,11 +513,11 @@ kI8 = 5 }; -void *newSparseTensor(char *filename, bool *abase, bool *adata, uint64_t aoff, - uint64_t asize, uint64_t astride, uint64_t ptrTp, - uint64_t indTp, uint64_t valTp) { +void *newSparseTensor(char *filename, uint8_t *abase, uint8_t *adata, + uint64_t aoff, uint64_t asize, uint64_t astride, + uint64_t ptrTp, uint64_t indTp, uint64_t valTp) { assert(astride == 1); - bool *sparsity = adata + aoff; + uint8_t *sparsity = adata + aoff; // The most common cases: 64-bit or 32-bit overhead, double/float values. CASE(kU64, kU64, kF64, uint64_t, uint64_t, double); diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -33,9 +33,9 @@ // CHECK-LABEL: func @sparse_new1d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr -// CHECK: %[[D:.*]] = constant dense : tensor<1xi1> -// CHECK: %[[C:.*]] = tensor.cast %[[D]] : tensor<1xi1> to tensor -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[A]], %[[C]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, tensor, i64, i64, i64) -> !llvm.ptr +// CHECK: %[[D:.*]] = constant dense<1> : tensor<1xi8> +// CHECK: %[[C:.*]] = tensor.cast %[[D]] : tensor<1xi8> to tensor +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[A]], %[[C]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, tensor, i64, i64, i64) -> !llvm.ptr // CHECK: return %[[T]] : !llvm.ptr func @sparse_new1d(%arg0: !llvm.ptr) -> tensor<128xf64, #SparseVector> { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<128xf64, #SparseVector> @@ -44,9 +44,9 @@ // CHECK-LABEL: func @sparse_new2d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr -// CHECK: %[[D:.*]] = constant dense<[false, true]> : tensor<2xi1> -// CHECK: %[[C:.*]] = tensor.cast %[[D]] : tensor<2xi1> to tensor -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[A]], %[[C]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, tensor, i64, i64, i64) -> !llvm.ptr +// CHECK: %[[D:.*]] = constant dense<[0, 1]> : tensor<2xi8> +// CHECK: %[[C:.*]] = tensor.cast %[[D]] : tensor<2xi8> to tensor +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[A]], %[[C]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, tensor, i64, i64, i64) -> !llvm.ptr // CHECK: return %[[T]] : !llvm.ptr func @sparse_new2d(%arg0: !llvm.ptr) -> tensor { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor