diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -420,7 +420,7 @@ // Extract fields and coordinates from args. SmallVector fields = llvm::to_vector(args.drop_back(lvlRank + 1)); MutSparseTensorDescriptor desc(rtp, fields); - const SmallVector coordinates = + const SmallVector coords = llvm::to_vector(args.take_back(lvlRank + 1).drop_back()); Value value = args.back(); Value parentPos = constantZero(builder, loc, builder.getIndexType()); @@ -436,14 +436,14 @@ // positions[l] = coordinates.size() - 1 // parentPos = - genCompressed(builder, loc, desc, coordinates, value, parentPos, l); + genCompressed(builder, loc, desc, coords, value, parentPos, l); } else if (isSingletonDLT(dlt)) { // Create: // coordinates[l].push_back(coords[l]) // positions[l] = positions[l-1] // createPushback(builder, loc, desc, SparseTensorFieldKind::CrdMemRef, l, - coordinates[l]); + coords[l]); } else { assert(isDenseDLT(dlt)); // Construct the new position as: @@ -451,7 +451,7 @@ // Value size = sizeFromTensorAtLvl(builder, loc, desc, l); Value mult = builder.create(loc, size, parentPos); - parentPos = builder.create(loc, mult, coordinates[l]); + parentPos = builder.create(loc, mult, coords[l]); } } // Reached the actual value append/insert. @@ -846,7 +846,7 @@ // All initialization should be done on entry of the loop nest. rewriter.setInsertionPointAfter(op.getTensor().getDefiningOp()); // Determine the size for access expansion (always the innermost stored - // dimension size, translated back to original dimension). Note that we + // level size, translated back to original dimension). Note that we // recursively rewrite the new DimOp on the **original** tensor. // FIXME: `toOrigDim` is deprecated. const Dimension innerDim = toOrigDim(srcType, srcType.getLvlRank() - 1);