diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h @@ -562,6 +562,7 @@ using OutputUpdater = function_ref; + SparseTensorLoopEmitter() = default; /// Constructor: take an array of tensors inputs, on which the generated /// loops will iterate on. The index of the tensor in the array is also the /// tensor id (tid) used in related functions. @@ -577,6 +578,12 @@ bool isSparseOut = false, ArrayRef topSort = {}); + // Similar as the above constructor, but to support post-constructor + // initialization. + void initialize(ValueRange tensors, StringAttr loopTag = nullptr, + bool hasOutput = false, bool isSparseOut = false, + ArrayRef topSort = {}); + /// Starts a loop emitting session by generating all the buffers needed to /// iterate tensors. void initializeLoopEmit(OpBuilder &builder, Location loc, diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -208,13 +208,28 @@ StringAttr loopTag, bool hasOutput, bool isSparseOut, - ArrayRef topSort) - : loopTag(loopTag), hasOutput(hasOutput), isSparseOut(isSparseOut), - tensors(tensors.begin(), tensors.end()), dimTypes(tensors.size()), - pidxs(tensors.size()), coord(tensors.size()), highs(tensors.size()), - ptrBuffer(tensors.size()), idxBuffer(tensors.size()), - valBuffer(tensors.size()), loopStack(), - sparsiferLoopLvlMap(topSort.size(), 0) { + ArrayRef topSort) { + initialize(tensors, loopTag, hasOutput, isSparseOut, topSort); +} + +void SparseTensorLoopEmitter::initialize(ValueRange tensors, StringAttr loopTag, + bool hasOutput, bool isSparseOut, + ArrayRef topSort) { + // First initializes fields. + this->loopTag = loopTag; + this->hasOutput = hasOutput; + this->isSparseOut = isSparseOut; + this->tensors.assign(tensors.begin(), tensors.end()); + this->dimTypes.assign(tensors.size(), std::vector()); + this->pidxs.assign(tensors.size(), std::vector()); + this->coord.assign(tensors.size(), std::vector()); + this->highs.assign(tensors.size(), std::vector()); + this->ptrBuffer.assign(tensors.size(), std::vector()); + this->idxBuffer.assign(tensors.size(), std::vector()); + this->valBuffer.assign(tensors.size(), nullptr); + this->loopStack.reserve(topSort.size()); + this->sparsiferLoopLvlMap.assign(topSort.size(), 0); + for (size_t tid = 0, e = tensors.size(); tid < e; tid++) { auto t = tensors[tid]; // a scalar or 0-dimension tensors @@ -239,6 +254,7 @@ idxBuffer[tid].assign(rank, Value()); } + // FIXME: This map should be maintained outside loop emitter. for (unsigned i = 0, e = topSort.size(); i < e; i++) { // This is an inverse map of the topologically sorted loop index from // sparsifier. This is needed to map the AffineDimExpr back to the loopStack