diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -29,6 +29,16 @@ namespace { +/// New tensor storage action times.. Keep these values consistent with +/// the sparse runtime support library. +enum Action : uint32_t { + kEmpty = 0, + kFromFile = 1, + kFromCOO = 2, + kEmptyCOO = 3, + kToCOO = 4 +}; + //===----------------------------------------------------------------------===// // Helper methods. //===----------------------------------------------------------------------===// @@ -105,18 +115,10 @@ return rewriter.create(loc, i, 32); } -/// Returns integers of given width and values as a constant tensor. -/// We cast the static shape into a dynamic shape to ensure that the -/// method signature remains uniform across different tensor dimensions. -static Value getTensor(ConversionPatternRewriter &rewriter, unsigned width, - Location loc, ArrayRef values) { - Type etp = rewriter.getIntegerType(width); - unsigned sz = values.size(); - RankedTensorType tt1 = RankedTensorType::get({sz}, etp); - RankedTensorType tt2 = RankedTensorType::get({ShapedType::kDynamicSize}, etp); - auto elts = rewriter.create( - loc, DenseElementsAttr::get(tt1, values)); - return rewriter.create(loc, tt2, elts); +/// Generates a constant of `i8` type. +inline static Value constantI8(ConversionPatternRewriter &rewriter, + Location loc, int8_t i) { + return rewriter.create(loc, i, 8); } /// Returns a function reference (first hit also inserts into module). Sets @@ -142,43 +144,70 @@ return result; } +/// Generates a temporary buffer of the given size and type. +static Value genAlloc(ConversionPatternRewriter &rewriter, Location loc, + unsigned sz, Type tp) { + auto memTp = MemRefType::get({ShapedType::kDynamicSize}, tp); + Value a = constantIndex(rewriter, loc, sz); + return rewriter.create(loc, memTp, ValueRange{a}); +} + +/// Fills a temporary buffer of the given type with arguments. +static Value genBuffer(ConversionPatternRewriter &rewriter, Location loc, + ArrayRef values) { + unsigned sz = values.size(); + assert(sz >= 1); + Value buffer = genAlloc(rewriter, loc, sz, values[0].getType()); + for (unsigned i = 0; i < sz; i++) { + Value idx = constantIndex(rewriter, loc, i); + rewriter.create(loc, values[i], buffer, idx); + } + return buffer; +} + /// Generates a call into the "swiss army knife" method of the sparse runtime /// support library for materializing sparse tensors into the computation. The /// method returns the call value and assigns the permutation to 'perm'. static Value genNewCall(ConversionPatternRewriter &rewriter, Operation *op, SparseTensorEncodingAttr &enc, uint32_t action, - Value &perm, Value ptr = Value()) { + Value &perm, ValueRange szs, Value ptr = Value()) { Location loc = op->getLoc(); ShapedType resType = op->getResult(0).getType().cast(); SmallVector params; // Sparsity annotations in tensor constant form. - SmallVector attrs; + SmallVector attrs; unsigned sz = enc.getDimLevelType().size(); for (unsigned i = 0; i < sz; i++) - attrs.push_back( - APInt(8, getDimLevelTypeEncoding(enc.getDimLevelType()[i]))); - params.push_back(getTensor(rewriter, 8, loc, attrs)); + attrs.push_back(constantI8( + rewriter, loc, getDimLevelTypeEncoding(enc.getDimLevelType()[i]))); + params.push_back(genBuffer(rewriter, loc, attrs)); // Dimension sizes array of the enveloping *dense* tensor. Useful for either // verification of external data, or for construction of internal data. auto shape = resType.getShape(); - SmallVector sizes; - for (unsigned i = 0; i < sz; i++) { - uint64_t s = shape[i] == ShapedType::kDynamicSize ? 0 : shape[i]; - sizes.push_back(APInt(64, s)); + SmallVector sizes; + if (szs.size() > 0) { + for (Value s : szs) + sizes.push_back( + rewriter.create(loc, s, rewriter.getI64Type())); + } else { + for (unsigned i = 0; i < sz; i++) { + uint64_t s = shape[i] == ShapedType::kDynamicSize ? 0 : shape[i]; + sizes.push_back(constantI64(rewriter, loc, s)); + } } - params.push_back(getTensor(rewriter, 64, loc, sizes)); + params.push_back(genBuffer(rewriter, loc, sizes)); // Dimension order permutation array. This is the "identity" permutation by // default, or otherwise the "reverse" permutation of a given ordering, so // that indices can be mapped quickly to the right position. - SmallVector rev(sz); + SmallVector rev(sz); if (AffineMap p = enc.getDimOrdering()) { for (unsigned i = 0; i < sz; i++) - rev[p.getDimPosition(i)] = APInt(64, i); + rev[p.getDimPosition(i)] = constantI64(rewriter, loc, i); } else { for (unsigned i = 0; i < sz; i++) - rev[i] = APInt(64, i); + rev[i] = constantI64(rewriter, loc, i); } - perm = getTensor(rewriter, 64, loc, rev); + perm = genBuffer(rewriter, loc, rev); params.push_back(perm); // Secondary and primary types encoding. unsigned secPtr = getOverheadTypeEncoding(enc.getPointerBitWidth()); @@ -309,18 +338,6 @@ return rewriter.create(loc, values, ivs[0]); } -/// Generates code to stack-allocate a `memref` where the `?` -/// is the given `rank`. This array is intended to serve as a reusable -/// buffer for storing the indices of a single tensor element, to avoid -/// allocation in the body of loops. -static Value allocaIndices(ConversionPatternRewriter &rewriter, Location loc, - int64_t rank) { - auto indexTp = rewriter.getIndexType(); - auto memTp = MemRefType::get({ShapedType::kDynamicSize}, indexTp); - Value arg = constantIndex(rewriter, loc, rank); - return rewriter.create(loc, memTp, ValueRange{arg}); -} - //===----------------------------------------------------------------------===// // Conversion rules. //===----------------------------------------------------------------------===// @@ -378,8 +395,25 @@ if (!enc) return failure(); Value perm; + rewriter.replaceOp(op, genNewCall(rewriter, op, enc, kFromFile, perm, {}, + adaptor.getOperands()[0])); + return success(); + } +}; + +/// Sparse conversion rule for the init operator. +class SparseTensorInitConverter : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + LogicalResult + matchAndRewrite(InitOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Type resType = op.getType(); + auto enc = getSparseTensorEncoding(resType); + if (!enc) + return failure(); + Value perm; rewriter.replaceOp( - op, genNewCall(rewriter, op, enc, 0, perm, adaptor.getOperands()[0])); + op, genNewCall(rewriter, op, enc, kEmpty, perm, adaptor.getOperands())); return success(); } }; @@ -402,8 +436,9 @@ // yield the fastest conversion but avoids the need for a full // O(N^2) conversion matrix. Value perm; - Value coo = genNewCall(rewriter, op, encDst, 3, perm, src); - rewriter.replaceOp(op, genNewCall(rewriter, op, encDst, 1, perm, coo)); + Value coo = genNewCall(rewriter, op, encDst, kToCOO, perm, {}, src); + rewriter.replaceOp( + op, genNewCall(rewriter, op, encDst, kFromCOO, perm, {}, coo)); return success(); } if (!encDst || encSrc) { @@ -439,8 +474,9 @@ Location loc = op->getLoc(); ShapedType shape = resType.cast(); Value perm; - Value ptr = genNewCall(rewriter, op, encDst, 2, perm); - Value ind = allocaIndices(rewriter, loc, shape.getRank()); + Value ptr = genNewCall(rewriter, op, encDst, kEmptyCOO, perm, {}); + Value ind = + genAlloc(rewriter, loc, shape.getRank(), rewriter.getIndexType()); SmallVector lo; SmallVector hi; SmallVector st; @@ -478,7 +514,8 @@ genAddEltCall(rewriter, op, eltType, ptr, val, ind, perm); return {}; }); - rewriter.replaceOp(op, genNewCall(rewriter, op, encDst, 1, perm, ptr)); + rewriter.replaceOp( + op, genNewCall(rewriter, op, encDst, kFromCOO, perm, {}, ptr)); return success(); } }; @@ -637,9 +674,9 @@ void mlir::populateSparseTensorConversionPatterns(TypeConverter &typeConverter, RewritePatternSet &patterns) { patterns.add(typeConverter, - patterns.getContext()); + SparseTensorNewConverter, SparseTensorInitConverter, + SparseTensorConvertConverter, SparseTensorReleaseConverter, + SparseTensorToPointersConverter, SparseTensorToIndicesConverter, + SparseTensorToValuesConverter, SparseTensorToTensorConverter>( + typeConverter, patterns.getContext()); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp @@ -97,8 +97,8 @@ RewritePatternSet patterns(ctx); SparseTensorTypeConverter converter; ConversionTarget target(*ctx); - target.addIllegalOp(); + target.addIllegalOp(); // All dynamic rules below accept new function, call, return, and dimop // operations as legal output of the rewriting provided that all sparse // tensor types have been fully rewritten. @@ -114,11 +114,10 @@ }); // The following operations and dialects may be introduced by the // rewriting rules, and are therefore marked as legal. - target.addLegalOp(); - target.addLegalDialect(); + target.addLegalOp(); + target.addLegalDialect(); // Populate with rules and apply rewriting rules. populateFuncOpTypeConversionPattern(patterns, converter); populateCallOpTypeConversionPattern(patterns, converter); diff --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp @@ -180,36 +180,37 @@ template class SparseTensorStorage : public SparseTensorStorageBase { public: - /// Constructs a sparse tensor storage scheme from the given sparse - /// tensor in coordinate scheme following the given per-rank dimension - /// dense/sparse annotations. - SparseTensorStorage(SparseTensorCOO *tensor, const uint8_t *sparsity, - const uint64_t *perm) - : sizes(tensor->getSizes()), rev(getRank()), pointers(getRank()), - indices(getRank()) { + /// Constructs a sparse tensor storage scheme with the given dimensions, + /// permutation, and per-rank dimension dense/sparse annotations, using + /// the coordinate scheme tensor for the initial contents if provided. + SparseTensorStorage(const std::vector &szs, const uint64_t *perm, + const uint8_t *sparsity, SparseTensorCOO *tensor) + : sizes(szs), rev(getRank()), pointers(getRank()), indices(getRank()) { // Store "reverse" permutation. - for (uint64_t d = 0, rank = getRank(); d < rank; d++) - rev[perm[d]] = d; - // Provide hints on capacity. + for (uint64_t r = 0, rank = getRank(); r < rank; r++) + rev[perm[r]] = r; + // Provide hints on capacity of pointers and indices. // TODO: needs fine-tuning based on sparsity - uint64_t nnz = tensor->getElements().size(); - values.reserve(nnz); - for (uint64_t d = 0, s = 1, rank = getRank(); d < rank; d++) { - s *= sizes[d]; - if (sparsity[d] == kCompressed) { - pointers[d].reserve(s + 1); - indices[d].reserve(s); + for (uint64_t r = 0, s = 1, rank = getRank(); r < rank; r++) { + s *= sizes[r]; + if (sparsity[r] == kCompressed) { + pointers[r].reserve(s + 1); + indices[r].reserve(s); s = 1; } else { - assert(sparsity[d] == kDense && "singleton not yet supported"); + assert(sparsity[r] == kDense && "singleton not yet supported"); } } // Prepare sparse pointer structures for all dimensions. - for (uint64_t d = 0, rank = getRank(); d < rank; d++) - if (sparsity[d] == kCompressed) - pointers[d].push_back(0); - // Then setup the tensor. - fromCOO(tensor, sparsity, 0, nnz, 0); + for (uint64_t r = 0, rank = getRank(); r < rank; r++) + if (sparsity[r] == kCompressed) + pointers[r].push_back(0); + // Then assign contents from coordinate scheme tensor if provided. + if (tensor) { + uint64_t nnz = tensor->getElements().size(); + values.reserve(nnz); + fromCOO(tensor, sparsity, 0, nnz, 0); + } } virtual ~SparseTensorStorage() {} @@ -257,15 +258,29 @@ return tensor; } - /// Factory method. Expects a coordinate scheme that respects the same - /// permutation as is desired for the new sparse storage scheme. - static SparseTensorStorage *newSparseTensor(SparseTensorCOO *t, - const uint8_t *sparsity, - const uint64_t *perm) { - t->sort(); // sort lexicographically - SparseTensorStorage *n = - new SparseTensorStorage(t, sparsity, perm); - delete t; + /// Factory method. Constructs a sparse tensor storage scheme with the given + /// dimensions, permutation, and per-rank dimension dense/sparse annotations, + /// using the coordinate scheme tensor for the initial contents if provided. + /// In the latter case, the coordinate scheme must respect the same + /// permutation as is desired for the new sparse tensor storage. + static SparseTensorStorage * + newSparseTensor(uint64_t size, const uint64_t *sizes, const uint64_t *perm, + const uint8_t *sparsity, SparseTensorCOO *tensor) { + SparseTensorStorage *n = nullptr; + if (tensor) { + assert(tensor->getRank() == size); + for (uint64_t r = 0; r < size; r++) + assert(sizes[r] == 0 || sizes[r] == tensor->getSizes()[perm[r]]); + tensor->sort(); // sort lexicographically + n = new SparseTensorStorage(tensor->getSizes(), perm, sparsity, + tensor); + delete tensor; + } else { + std::vector permsz(size); + for (uint64_t r = 0; r < size; r++) + permsz[perm[r]] = sizes[r]; + n = new SparseTensorStorage(permsz, perm, sparsity, tensor); + } return n; } @@ -518,7 +533,13 @@ kI8 = 6 }; -enum Action : uint32_t { kFromFile = 0, kFromCOO = 1, kNewCOO = 2, kToCOO = 3 }; +enum Action : uint32_t { + kEmpty = 0, + kFromFile = 1, + kFromCOO = 2, + kEmptyCOO = 3, + kToCOO = 4 +}; #define CASE(p, i, v, P, I, V) \ if (ptrTp == (p) && indTp == (i) && valTp == (v)) { \ @@ -528,14 +549,14 @@ openSparseTensorCOO(static_cast(ptr), size, sizes, perm); \ else if (action == kFromCOO) \ tensor = static_cast *>(ptr); \ - else if (action == kNewCOO) \ + else if (action == kEmptyCOO) \ return SparseTensorCOO::newSparseTensorCOO(size, sizes, perm); \ else if (action == kToCOO) \ return static_cast *>(ptr)->toCOO(perm); \ else \ - assert(0); \ - return SparseTensorStorage::newSparseTensor(tensor, sparsity, \ - perm); \ + assert(action == kEmpty); \ + return SparseTensorStorage::newSparseTensor(size, sizes, perm, \ + sparsity, tensor); \ } #define IMPL1(NAME, TYPE, LIB) \ @@ -586,9 +607,10 @@ /// method for materializing sparse tensors into the computation. /// /// action: -/// kFromFile = ptr contains filename to read into storage -/// kFromCOO = ptr contains coordinate scheme to assign to new storage -/// kNewCOO = returns empty coordinate scheme to fill and use with kFromCOO +/// kEmpty = returns empty storage to fill later +/// kFromFile = returns storage, where ptr contains filename to read +/// kFromCOO = returns storage, where ptr contains coordinate scheme to assign +/// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO void * _mlir_ciface_newSparseTensor(StridedMemRefType *aref, // NOLINT @@ -750,7 +772,7 @@ } // Return sparse tensor storage format as opaque pointer. return SparseTensorStorage::newSparseTensor( - tensor, sparse.data(), perm.data()); + rank, shape, perm.data(), sparse.data(), tensor); } } // extern "C" diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -69,12 +69,12 @@ // CHECK-LABEL: func @sparse_new1d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr -// CHECK-DAG: %[[U:.*]] = arith.constant dense<1> : tensor<1xi8> -// CHECK-DAG: %[[V:.*]] = arith.constant dense<128> : tensor<1xi64> -// CHECK-DAG: %[[W:.*]] = arith.constant dense<0> : tensor<1xi64> -// CHECK-DAG: %[[X:.*]] = tensor.cast %[[U]] : tensor<1xi8> to tensor -// CHECK-DAG: %[[Y:.*]] = tensor.cast %[[V]] : tensor<1xi64> to tensor -// CHECK-DAG: %[[Z:.*]] = tensor.cast %[[W]] : tensor<1xi64> to tensor +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new1d(%arg0: !llvm.ptr) -> tensor<128xf64, #SparseVector> { @@ -84,12 +84,12 @@ // CHECK-LABEL: func @sparse_new2d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr -// CHECK-DAG: %[[U:.*]] = arith.constant dense<[0, 1]> : tensor<2xi8> -// CHECK-DAG: %[[V:.*]] = arith.constant dense<0> : tensor<2xi64> -// CHECK-DAG: %[[W:.*]] = arith.constant dense<[0, 1]> : tensor<2xi64> -// CHECK-DAG: %[[X:.*]] = tensor.cast %[[U]] : tensor<2xi8> to tensor -// CHECK-DAG: %[[Y:.*]] = tensor.cast %[[V]] : tensor<2xi64> to tensor -// CHECK-DAG: %[[Z:.*]] = tensor.cast %[[W]] : tensor<2xi64> to tensor +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new2d(%arg0: !llvm.ptr) -> tensor { @@ -99,12 +99,12 @@ // CHECK-LABEL: func @sparse_new3d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr -// CHECK-DAG: %[[U:.*]] = arith.constant dense<[0, 1, 1]> : tensor<3xi8> -// CHECK-DAG: %[[V:.*]] = arith.constant dense<0> : tensor<3xi64> -// CHECK-DAG: %[[W:.*]] = arith.constant dense<[1, 2, 0]> : tensor<3xi64> -// CHECK-DAG: %[[X:.*]] = tensor.cast %[[U]] : tensor<3xi8> to tensor -// CHECK-DAG: %[[Y:.*]] = tensor.cast %[[V]] : tensor<3xi64> to tensor -// CHECK-DAG: %[[Z:.*]] = tensor.cast %[[W]] : tensor<3xi64> to tensor +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new3d(%arg0: !llvm.ptr) -> tensor { @@ -112,6 +112,29 @@ return %0 : tensor } +// CHECK-LABEL: func @sparse_init( +// CHECK-SAME: %[[I:.*]]: index, +// CHECK-SAME: %[[J:.*]]: index) -> !llvm.ptr +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref +// CHECK-DAG: %[[II:.*]] = arith.index_cast %[[I]] : index to i64 +// CHECK-DAG: %[[JJ:.*]] = arith.index_cast %[[J]] : index to i64 +// CHECK-DAG: memref.store %[[II]], %[[Q]][%[[C0]]] : memref<2xi64> +// CHECK-DAG: memref.store %[[JJ]], %[[Q]][%[[C1]]] : memref<2xi64> +// CHECK: %[[A:.*]] = llvm.mlir.null : !llvm.ptr +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) +// CHECK: return %[[T]] : !llvm.ptr +func @sparse_init(%arg0: index, %arg1: index) -> tensor { + %0 = sparse_tensor.init [%arg0, %arg1] : tensor + return %0 : tensor +} + // CHECK-LABEL: func @sparse_release( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK: call @delSparseTensor(%[[A]]) : (!llvm.ptr) -> () @@ -133,20 +156,22 @@ // CHECK-SAME: %[[A:.*]]: tensor) -> !llvm.ptr // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[D0:.*]] = arith.constant dense<0> : tensor<1xi64> -// CHECK-DAG: %[[D1:.*]] = arith.constant dense<1> : tensor<1xi8> -// CHECK-DAG: %[[X:.*]] = tensor.cast %[[D1]] : tensor<1xi8> to tensor -// CHECK-DAG: %[[Y:.*]] = tensor.cast %[[D0]] : tensor<1xi64> to tensor -// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Y]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.}}) +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref +// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.}}) // CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref // CHECK: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U]] step %[[C1]] { // CHECK: %[[E:.*]] = tensor.extract %[[A]][%[[I]]] : tensor // CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<1xindex> -// CHECK: call @addEltI32(%[[C]], %[[E]], %[[T]], %[[Y]]) +// CHECK: call @addEltI32(%[[C]], %[[E]], %[[T]], %[[Z]]) // CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Y]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_1d(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor @@ -167,12 +192,12 @@ // CHECK-SAME: %[[A:.*]]: tensor<2x4xf64>) -> !llvm.ptr // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[U:.*]] = arith.constant dense<[0, 1]> : tensor<2xi8> -// CHECK-DAG: %[[V:.*]] = arith.constant dense<[2, 4]> : tensor<2xi64> -// CHECK-DAG: %[[W:.*]] = arith.constant dense<[0, 1]> : tensor<2xi64> -// CHECK-DAG: %[[X:.*]] = tensor.cast %[[U]] : tensor<2xi8> to tensor -// CHECK-DAG: %[[Y:.*]] = tensor.cast %[[V]] : tensor<2xi64> to tensor -// CHECK-DAG: %[[Z:.*]] = tensor.cast %[[W]] : tensor<2xi64> to tensor +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.}}) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<2xindex> to memref @@ -184,50 +209,40 @@ // CHECK: call @addEltF64(%[[C]], %[[E]], %[[T]], %[[Z]]) // CHECK: } // CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> { %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #SparseMatrix> return %0 : tensor<2x4xf64, #SparseMatrix> } -#CSR = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }> - -// CHECK-LABEL: func @entry() -> !llvm.ptr { -// CHECK: %[[C1:.*]] = arith.constant 1 : i32 -// CHECK: %[[Offset:.*]] = arith.constant dense<[0, 1]> : tensor<2xi64> -// CHECK: %[[Dims:.*]] = arith.constant dense<[8, 7]> : tensor<2xi64> -// CHECK: %[[Base:.*]] = arith.constant dense<[0, 1]> : tensor<2xi8> -// CHECK: %[[I2:.*]] = arith.constant 2 : index -// CHECK: %[[SparseV:.*]] = arith.constant dense<[1.000000e+00, 5.000000e+00]> : tensor<2xf32> -// CHECK: %[[SparseI:.*]] = arith.constant dense<{{\[\[}}0, 0], [1, 6]]> : tensor<2x2xi64> -// CHECK: %[[I1:.*]] = arith.constant 1 : index -// CHECK: %[[I0:.*]] = arith.constant 0 : index -// CHECK: %[[C2:.*]] = arith.constant 2 : i32 -// CHECK: %[[BaseD:.*]] = tensor.cast %[[Base]] : tensor<2xi8> to tensor -// CHECK: %[[DimsD:.*]] = tensor.cast %[[Dims]] : tensor<2xi64> to tensor -// CHECK: %[[OffsetD:.*]] = tensor.cast %[[Offset]] : tensor<2xi64> to tensor -// CHECK: %[[TCOO:.*]] = call @newSparseTensor(%[[BaseD]], %[[DimsD]], %[[OffsetD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[C2]], %{{.}}) -// CHECK: %[[Index:.*]] = memref.alloca() : memref<2xindex> -// CHECK: %[[IndexD:.*]] = memref.cast %[[Index]] : memref<2xindex> to memref -// CHECK: scf.for %[[IV:.*]] = %[[I0]] to %[[I2]] step %[[I1]] { -// CHECK: %[[VAL0:.*]] = tensor.extract %[[SparseI]]{{\[}}%[[IV]], %[[I0]]] : tensor<2x2xi64> -// CHECK: %[[VAL1:.*]] = arith.index_cast %[[VAL0]] : i64 to index -// CHECK: memref.store %[[VAL1]], %[[Index]]{{\[}}%[[I0]]] : memref<2xindex> -// CHECK: %[[VAL2:.*]] = tensor.extract %[[SparseI]]{{\[}}%[[IV]], %[[I1]]] : tensor<2x2xi64> -// CHECK: %[[VAL3:.*]] = arith.index_cast %[[VAL2]] : i64 to index -// CHECK: memref.store %[[VAL3]], %[[Index]]{{\[}}%[[I1]]] : memref<2xindex> -// CHECK: %[[VAL4:.*]] = tensor.extract %[[SparseV]]{{\[}}%[[IV]]] : tensor<2xf32> -// CHECK: call @addEltF32(%[[TCOO]], %[[VAL4]], %[[IndexD]], %[[OffsetD]]) -// CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[BaseD]], %[[DimsD]], %[[OffsetD]], %{{.*}}, %{{.*}}, %[[C1]], %{{.*}}) -// CHECK: return %[[T]] : !llvm.ptr -func @entry() -> tensor<8x7xf32, #CSR>{ +// CHECK-LABEL: func @sparse_constant() -> !llvm.ptr { +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref +// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.}}) +// CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> +// CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<2xindex> to memref +// CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C2]] step %[[C1]] { +// CHECK: memref.store %{{.*}}, %[[M]][%[[C0]]] : memref<2xindex> +// CHECK: memref.store %{{.*}}, %[[M]][%[[C1]]] : memref<2xindex> +// CHECK: %[[V:.*]] = tensor.extract %{{.*}}[%[[I]]] : tensor<2xf32> +// CHECK: call @addEltF32(%{{.*}}, %[[V]], %[[N]], %{{.*}}) +// CHECK: } +// CHECK: %[[T:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: return %[[T]] : !llvm.ptr +func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{ // Initialize a tensor. %0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32> // Convert the tensor to a sparse tensor. - %1 = sparse_tensor.convert %0 : tensor<8x7xf32> to tensor<8x7xf32, #CSR> - return %1 : tensor<8x7xf32, #CSR> + %1 = sparse_tensor.convert %0 : tensor<8x7xf32> to tensor<8x7xf32, #SparseMatrix> + return %1 : tensor<8x7xf32, #SparseMatrix> } // CHECK-LABEL: func @sparse_convert_3d( @@ -235,15 +250,15 @@ // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index -// CHECK-DAG: %[[U:.*]] = arith.constant dense<[0, 1, 1]> : tensor<3xi8> -// CHECK-DAG: %[[V:.*]] = arith.constant dense<0> : tensor<3xi64> -// CHECK-DAG: %[[W:.*]] = arith.constant dense<[1, 2, 0]> : tensor<3xi64> -// CHECK-DAG: %[[X:.*]] = tensor.cast %[[U]] : tensor<3xi8> to tensor -// CHECK-DAG: %[[Y:.*]] = tensor.cast %[[V]] : tensor<3xi64> to tensor -// CHECK-DAG: %[[Z:.*]] = tensor.cast %[[W]] : tensor<3xi64> to tensor +// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64> +// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.}}) // CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex> -// CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<3xindex> to memref +// CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<3xindex> to memref // CHECK: %[[U1:.*]] = tensor.dim %[[A]], %[[C0]] : tensor // CHECK: %[[U2:.*]] = tensor.dim %[[A]], %[[C1]] : tensor // CHECK: %[[U3:.*]] = tensor.dim %[[A]], %[[C2]] : tensor @@ -254,11 +269,11 @@ // CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<3xindex> // CHECK: memref.store %[[J]], %[[M]][%[[C1]]] : memref<3xindex> // CHECK: memref.store %[[K]], %[[M]][%[[C2]]] : memref<3xindex> -// CHECK: call @addEltF64(%[[C]], %[[E]], %[[T]], %[[Z]]) +// CHECK: call @addEltF64(%[[C]], %[[E]], %[[N]], %[[Z]]) // CHECK: } // CHECK: } // CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_3d(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor