diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -83,7 +83,7 @@ /// Returns integers of given width and values as a constant tensor. /// We cast the static shape into a dynamic shape to ensure that the -/// method signature remains uniform accross different tensor dimensions. +/// method signature remains uniform across different tensor dimensions. static Value getTensor(ConversionPatternRewriter &rewriter, unsigned width, Location loc, ArrayRef values) { Type etp = rewriter.getIntegerType(width); @@ -95,20 +95,25 @@ return rewriter.create(loc, tt2, elts); } -/// Returns function reference (first hit also inserts into module). +/// Returns a function reference (first hit also inserts into module). Sets +/// the "_emit_c_interface" on the function declaration when requested, +/// so that LLVM lowering generates a wrapper function that takes care +/// of ABI complications with passing in and returning MemRefs to C functions. static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type resultType, - ValueRange operands) { + ValueRange operands, + bool emit_c_interface = false) { MLIRContext *context = op->getContext(); auto module = op->getParentOfType(); auto result = SymbolRefAttr::get(context, name); auto func = module.lookupSymbol(result.getAttr()); if (!func) { OpBuilder moduleBuilder(module.getBodyRegion()); - moduleBuilder - .create( - op->getLoc(), name, - FunctionType::get(context, operands.getTypes(), resultType)) - .setPrivate(); + func = moduleBuilder.create( + op->getLoc(), name, + FunctionType::get(context, operands.getTypes(), resultType)); + func.setPrivate(); + if (emit_c_interface) + func->setAttr("llvm.emit_c_interface", UnitAttr::get(context)); } return result; } @@ -171,8 +176,8 @@ params.push_back(ptr); // Generate the call to create new tensor. StringRef name = "newSparseTensor"; - auto call = - rewriter.create(loc, pTp, getFunc(op, name, pTp, params), params); + auto call = rewriter.create( + loc, pTp, getFunc(op, name, pTp, params, true), params); return call.getResult(0); } @@ -210,7 +215,8 @@ params.push_back(ind); params.push_back(perm); Type pTp = LLVM::LLVMPointerType::get(IntegerType::get(op->getContext(), 8)); - rewriter.create(loc, pTp, getFunc(op, name, pTp, params), params); + rewriter.create(loc, pTp, getFunc(op, name, pTp, params, true), + params); } //===----------------------------------------------------------------------===// @@ -356,8 +362,9 @@ Type resType = op.getType(); Type eltType = resType.cast().getElementType(); StringRef name; + assert(IndexType::kInternalStorageBitWidth == 64); if (eltType.isIndex()) - name = "sparsePointers"; + name = "sparsePointers"; // 64-bit, but its own name for unique signature else if (eltType.isInteger(64)) name = "sparsePointers64"; else if (eltType.isInteger(32)) @@ -369,7 +376,7 @@ else return failure(); rewriter.replaceOpWithNewOp( - op, resType, getFunc(op, name, resType, operands), operands); + op, resType, getFunc(op, name, resType, operands, true), operands); return success(); } }; @@ -384,8 +391,9 @@ Type resType = op.getType(); Type eltType = resType.cast().getElementType(); StringRef name; + assert(IndexType::kInternalStorageBitWidth == 64); if (eltType.isIndex()) - name = "sparseIndices"; + name = "sparseIndices"; // 64-bit, but its own name for unique signature else if (eltType.isInteger(64)) name = "sparseIndices64"; else if (eltType.isInteger(32)) @@ -397,7 +405,7 @@ else return failure(); rewriter.replaceOpWithNewOp( - op, resType, getFunc(op, name, resType, operands), operands); + op, resType, getFunc(op, name, resType, operands, true), operands); return success(); } }; @@ -427,7 +435,7 @@ else return failure(); rewriter.replaceOpWithNewOp( - op, resType, getFunc(op, name, resType, operands), operands); + op, resType, getFunc(op, name, resType, operands, true), operands); return success(); } }; diff --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp @@ -111,8 +111,9 @@ /// the given ordering and expects subsequent add() calls to honor /// that same ordering for the given indices. The result is a /// fully permuted coordinate scheme. - static SparseTensorCOO *newSparseTensorCOO(uint64_t size, uint64_t *sizes, - uint64_t *perm, + static SparseTensorCOO *newSparseTensorCOO(uint64_t size, + const uint64_t *sizes, + const uint64_t *perm, uint64_t capacity = 0) { std::vector permsz(size); for (uint64_t r = 0; r < size; r++) @@ -182,8 +183,8 @@ /// Constructs a sparse tensor storage scheme from the given sparse /// tensor in coordinate scheme following the given per-rank dimension /// dense/sparse annotations. - SparseTensorStorage(SparseTensorCOO *tensor, uint8_t *sparsity, - uint64_t *perm) + SparseTensorStorage(SparseTensorCOO *tensor, const uint8_t *sparsity, + const uint64_t *perm) : sizes(tensor->getSizes()), rev(getRank()), pointers(getRank()), indices(getRank()) { // Store "reverse" permutation. @@ -235,7 +236,7 @@ /// Returns this sparse tensor storage scheme as a new memory-resident /// sparse tensor in coordinate scheme with the given dimension order. - SparseTensorCOO *toCOO(uint64_t *perm) { + SparseTensorCOO *toCOO(const uint64_t *perm) { // Restore original order of the dimension sizes and allocate coordinate // scheme with desired new ordering specified in perm. uint64_t size = getRank(); @@ -258,8 +259,9 @@ /// Factory method. Expects a coordinate scheme that respects the same /// permutation as is desired for the new sparse storage scheme. - static SparseTensorStorage * - newSparseTensor(SparseTensorCOO *t, uint8_t *sparsity, uint64_t *perm) { + static SparseTensorStorage *newSparseTensor(SparseTensorCOO *t, + const uint8_t *sparsity, + const uint64_t *perm) { t->sort(); // sort lexicographically SparseTensorStorage *n = new SparseTensorStorage(t, sparsity, perm); @@ -271,7 +273,7 @@ /// Initializes sparse tensor storage scheme from a memory-resident sparse /// tensor in coordinate scheme. This method prepares the pointers and indices /// arrays under the given per-rank dimension dense/sparse annotations. - void fromCOO(SparseTensorCOO *tensor, uint8_t *sparsity, uint64_t lo, + void fromCOO(SparseTensorCOO *tensor, const uint8_t *sparsity, uint64_t lo, uint64_t hi, uint64_t d) { const std::vector> &elements = tensor->getElements(); // Once dimensions are exhausted, insert the numerical values. @@ -428,8 +430,8 @@ /// sparse tensor in coordinate scheme. template static SparseTensorCOO *openSparseTensorCOO(char *filename, uint64_t size, - uint64_t *sizes, - uint64_t *perm) { + const uint64_t *sizes, + const uint64_t *perm) { // Open the file. FILE *file = fopen(filename, "r"); if (!file) { @@ -515,6 +517,17 @@ uint64_t strides[1]; \ } +TEMPLATE(MemRef1DU64, uint64_t); +TEMPLATE(MemRef1DU32, uint32_t); +TEMPLATE(MemRef1DU16, uint16_t); +TEMPLATE(MemRef1DU8, uint8_t); +TEMPLATE(MemRef1DI64, int64_t); +TEMPLATE(MemRef1DI32, int32_t); +TEMPLATE(MemRef1DI16, int16_t); +TEMPLATE(MemRef1DI8, int8_t); +TEMPLATE(MemRef1DF64, double); +TEMPLATE(MemRef1DF32, float); + #define CASE(p, i, v, P, I, V) \ if (ptrTp == (p) && indTp == (i) && valTp == (v)) { \ SparseTensorCOO *tensor = nullptr; \ @@ -531,30 +544,36 @@ perm); \ } -#define IMPL1(RET, NAME, TYPE, LIB) \ - RET NAME(void *tensor) { \ +#define IMPL1(REF, NAME, TYPE, LIB) \ + void _mlir_ciface_##NAME(REF *ref, void *tensor) { \ std::vector *v; \ static_cast(tensor)->LIB(&v); \ - return {v->data(), v->data(), 0, {v->size()}, {1}}; \ + ref->base = ref->data = v->data(); \ + ref->off = 0; \ + ref->sizes[0] = v->size(); \ + ref->strides[0] = 1; \ } -#define IMPL2(RET, NAME, TYPE, LIB) \ - RET NAME(void *tensor, uint64_t d) { \ +#define IMPL2(REF, NAME, TYPE, LIB) \ + void _mlir_ciface_##NAME(REF *ref, void *tensor, uint64_t d) { \ std::vector *v; \ static_cast(tensor)->LIB(&v, d); \ - return {v->data(), v->data(), 0, {v->size()}, {1}}; \ + ref->base = ref->data = v->data(); \ + ref->off = 0; \ + ref->sizes[0] = v->size(); \ + ref->strides[0] = 1; \ } #define IMPL3(NAME, TYPE) \ - void *NAME(void *tensor, TYPE value, uint64_t *ibase, uint64_t *idata, \ - uint64_t ioff, uint64_t isize, uint64_t istride, uint64_t *pbase, \ - uint64_t *pdata, uint64_t poff, uint64_t psize, \ - uint64_t pstride) { \ - assert(istride == 1 && pstride == 1 && isize == psize); \ - uint64_t *indx = idata + ioff; \ + void *_mlir_ciface_##NAME(void *tensor, TYPE value, MemRef1DU64 *iref, \ + MemRef1DU64 *pref) { \ if (!value) \ return tensor; \ - uint64_t *perm = pdata + poff; \ + assert(iref->strides[0] == 1 && pref->strides[0] == 1); \ + assert(iref->sizes[0] == pref->sizes[0]); \ + const uint64_t *indx = iref->data + iref->off; \ + const uint64_t *perm = pref->data + pref->off; \ + uint64_t isize = iref->sizes[0]; \ std::vector indices(isize); \ for (uint64_t r = 0; r < isize; r++) \ indices[perm[r]] = indx[r]; \ @@ -562,17 +581,6 @@ return tensor; \ } -TEMPLATE(MemRef1DU64, uint64_t); -TEMPLATE(MemRef1DU32, uint32_t); -TEMPLATE(MemRef1DU16, uint16_t); -TEMPLATE(MemRef1DU8, uint8_t); -TEMPLATE(MemRef1DI64, int64_t); -TEMPLATE(MemRef1DI32, int32_t); -TEMPLATE(MemRef1DI16, int16_t); -TEMPLATE(MemRef1DI8, int8_t); -TEMPLATE(MemRef1DF64, double); -TEMPLATE(MemRef1DF32, float); - enum OverheadTypeEnum : uint64_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 }; enum PrimaryTypeEnum : uint64_t { @@ -591,19 +599,17 @@ /// 1 : ptr contains coordinate scheme to assign to new storage /// 2 : returns empty coordinate scheme to fill (call back 1 to setup) /// 3 : returns coordinate scheme from storage in ptr (call back 1 to convert) -void *newSparseTensor(uint8_t *abase, uint8_t *adata, uint64_t aoff, - uint64_t asize, uint64_t astride, uint64_t *sbase, - uint64_t *sdata, uint64_t soff, uint64_t ssize, - uint64_t sstride, uint64_t *pbase, uint64_t *pdata, - uint64_t poff, uint64_t psize, uint64_t pstride, - uint64_t ptrTp, uint64_t indTp, uint64_t valTp, - uint32_t action, void *ptr) { - assert(astride == 1 && sstride == 1 && pstride == 1); - assert(asize == ssize && ssize == psize); - uint8_t *sparsity = adata + aoff; - uint64_t *sizes = sdata + soff; - uint64_t *perm = pdata + poff; - uint64_t size = asize; +void *_mlir_ciface_newSparseTensor(MemRef1DU8 *aref, MemRef1DU64 *sref, + MemRef1DU64 *pref, uint64_t ptrTp, + uint64_t indTp, uint64_t valTp, + uint32_t action, void *ptr) { + assert(aref->strides[0] == 1 && sref->strides[0] == 1 && + pref->strides[0] == 1); + assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]); + const uint8_t *sparsity = aref->data + aref->off; + const uint64_t *sizes = sref->data + sref->off; + const uint64_t *perm = pref->data + pref->off; + uint64_t size = aref->sizes[0]; // Double matrices with all combinations of overhead storage. CASE(kU64, kU64, kF64, uint64_t, uint64_t, double);