diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -375,6 +375,7 @@ Type resType = op.getType(); auto encDst = getSparseTensorEncoding(resType); auto encSrc = getSparseTensorEncoding(op.source().getType()); + auto src = adaptor.getOperands()[0]; if (encDst && encSrc) { // This is a sparse => sparse conversion, which is handled as follows: // t = src->toCOO(); ; src to COO in dst order @@ -383,8 +384,7 @@ // yield the fastest conversion but avoids the need for a full // O(N^2) conversion matrix. Value perm; - Value coo = - genNewCall(rewriter, op, encDst, 3, perm, adaptor.getOperands()[0]); + Value coo = genNewCall(rewriter, op, encDst, 3, perm, src); rewriter.replaceOp(op, genNewCall(rewriter, op, encDst, 1, perm, coo)); return success(); } @@ -428,8 +428,7 @@ SmallVector st; Value zero = rewriter.create(loc, rewriter.getIndexAttr(0)); Value one = rewriter.create(loc, rewriter.getIndexAttr(1)); - Value tensor = adaptor.getOperands()[0]; - auto indicesValues = genSplitSparseConstant(rewriter, op, tensor); + auto indicesValues = genSplitSparseConstant(rewriter, op, src); bool isCOOConstant = indicesValues.hasValue(); Value indices; Value values; @@ -442,7 +441,7 @@ } else { for (unsigned i = 0, rank = shape.getRank(); i < rank; i++) { lo.push_back(zero); - hi.push_back(linalg::createOrFoldDimOp(rewriter, loc, tensor, i)); + hi.push_back(linalg::createOrFoldDimOp(rewriter, loc, src, i)); st.push_back(one); } } @@ -456,7 +455,7 @@ val = genIndexAndValueForSparse( rewriter, op, indices, values, ind, ivs, rank); else - val = genIndexAndValueForDense(rewriter, op, tensor, + val = genIndexAndValueForDense(rewriter, op, src, ind, ivs); genAddEltCall(rewriter, op, eltType, ptr, val, ind, perm);