diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h --- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h @@ -298,7 +298,7 @@ /// may cause data movement and invalidate the underlying memory address. TensorExp &exp(unsigned e) { return tensorExps[e]; } LatPoint &lat(unsigned l) { return latPoints[l]; } - SmallVector &set(unsigned s) { return latSets[s]; } + SmallVector &set(unsigned s) { return latSets[s]; } #ifndef NDEBUG /// Print methods (for debugging). @@ -341,9 +341,9 @@ std::vector> dimTypes; // Map that converts pair to the corresponding dimension. std::vector>> loopIdxToDim; - llvm::SmallVector tensorExps; - llvm::SmallVector latPoints; - llvm::SmallVector, 8> latSets; + llvm::SmallVector tensorExps; + llvm::SmallVector latPoints; + llvm::SmallVector> latSets; }; } // namespace sparse_tensor diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -51,7 +51,7 @@ if (failed(parser.parseGreater())) return {}; // Process the data from the parsed dictionary value into struct-like data. - SmallVector dlt; + SmallVector dlt; AffineMap dimOrd = {}; AffineMap higherOrd = {}; unsigned ptr = 0; @@ -601,7 +601,7 @@ auto rtp = tensor.getType().cast(); int64_t rank = rtp.getRank(); - SmallVector blockArgTypes; + SmallVector blockArgTypes; // Starts with n index. std::fill_n(std::back_inserter(blockArgTypes), rank, builder.getIndexType()); // Followed by one value. @@ -609,7 +609,7 @@ // Followed by reduction variable. blockArgTypes.append(initArgs.getTypes().begin(), initArgs.getTypes().end()); - SmallVector blockArgLocs; + SmallVector blockArgLocs; std::fill_n(std::back_inserter(blockArgLocs), blockArgTypes.size(), tensor.getLoc()); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h @@ -95,7 +95,7 @@ /// used when operands have dynamic shape. The shape of the destination is /// stored into dstShape. void genReshapeDstShape(Location loc, PatternRewriter &rewriter, - SmallVector &dstShape, + SmallVectorImpl &dstShape, ArrayRef srcShape, ArrayRef staticDstShape, ArrayRef reassociation); @@ -177,7 +177,7 @@ function_ref bodyBuilder); /// Populates given sizes array from dense tensor or sparse tensor constant. -void sizesFromSrc(OpBuilder &builder, SmallVector &sizes, +void sizesFromSrc(OpBuilder &builder, SmallVectorImpl &sizes, Location loc, Value src); /// Scans to top of generated loop. @@ -420,9 +420,9 @@ : tids(tids), dims(dims), loop(loop), iv(iv) {} // TODO: maybe use a vector for tid and dim? // The set of tensors that the loop is operating on - const llvm::SmallVector tids; + const llvm::SmallVector tids; // The corresponding dims for the tensors - const llvm::SmallVector dims; + const llvm::SmallVector dims; const Operation *loop; // the loop operation const Value iv; // the induction variable for the loop }; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -296,8 +296,8 @@ ArrayRef dims, bool needsUniv, MutableArrayRef reduc, ArrayRef extraTids, ArrayRef extraDims) { assert(tids.size() == dims.size()); - SmallVector types; - SmallVector operands; + SmallVector types; + SmallVector operands; // Construct the while-loop with a parameter for each index. Type indexType = builder.getIndexType(); for (auto [tid, dim] : llvm::zip(tids, dims)) { @@ -556,7 +556,7 @@ // instructions during code generation. Moreover, performing the induction // after the if-statements more closely resembles code generated by TACO. unsigned o = 0; - SmallVector operands; + SmallVector operands; Value one = constantIndex(builder, loc, 1); for (auto [tid, dim] : llvm::zip(tids, dims)) { if (isCompressedDLT(dimTypes[tid][dim]) || @@ -604,7 +604,7 @@ // earlier stage (instead of silently using a wrong value). LoopLevelInfo &loopInfo = loopStack.back(); assert(loopInfo.tids.size() == loopInfo.dims.size()); - SmallVector red; + SmallVector red; if (llvm::isa(loopInfo.loop)) { exitCoIterationLoop(rewriter, loc, reduc); } else { @@ -777,7 +777,7 @@ } void mlir::sparse_tensor::genReshapeDstShape( - Location loc, PatternRewriter &rewriter, SmallVector &dstShape, + Location loc, PatternRewriter &rewriter, SmallVectorImpl &dstShape, ArrayRef srcShape, ArrayRef staticDstShape, ArrayRef reassociation) { // Collapse shape. @@ -967,7 +967,7 @@ void mlir::sparse_tensor::genDenseTensorOrSparseConstantIterLoop( OpBuilder &builder, Location loc, Value src, unsigned rank, function_ref bodyBuilder) { - SmallVector indicesArray; + SmallVector indicesArray; SmallVector lo; SmallVector hi; SmallVector st; @@ -1007,7 +1007,7 @@ } void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder, - SmallVector &sizes, + SmallVectorImpl &sizes, Location loc, Value src) { unsigned rank = src.getType().cast().getRank(); for (unsigned i = 0; i < rank; i++) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp @@ -331,7 +331,7 @@ Location loc = func.getLoc(); ValueRange args = entryBlock->getArguments(); Value p = args[hiIdx]; - SmallVector types(2, p.getType()); + SmallVector types(2, p.getType()); // only two scf::WhileOp whileOp = builder.create( loc, types, SmallVector{args[loIdx], args[hiIdx]}); @@ -357,7 +357,7 @@ Value midp1 = builder.create(loc, mid, c1); // Compare xs[p] < xs[mid]. - SmallVector compareOperands{p, mid}; + SmallVector compareOperands{p, mid}; uint64_t numXBuffers = isCoo ? 1 : nx; compareOperands.append(args.begin() + xStartIdx, args.begin() + xStartIdx + numXBuffers); @@ -400,7 +400,7 @@ Block *before = builder.createBlock(&whileOp.getBefore(), {}, {i.getType()}, {loc}); builder.setInsertionPointToEnd(before); - SmallVector compareOperands; + SmallVector compareOperands; if (step > 0) { compareOperands.push_back(before->getArgument(0)); compareOperands.push_back(p); @@ -490,8 +490,8 @@ Value i = lo; Value j = builder.create(loc, hi, c1); - SmallVector operands{i, j, p}; - SmallVector types{i.getType(), j.getType(), p.getType()}; + SmallVector operands{i, j, p}; + SmallVector types{i.getType(), j.getType(), p.getType()}; scf::WhileOp whileOp = builder.create(loc, types, operands); // The before-region of the WhileOp. @@ -525,7 +525,7 @@ cond = builder.create(loc, arith::CmpIPredicate::ult, i, j); scf::IfOp ifOp = builder.create(loc, types, cond, /*else=*/true); builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); - SmallVector swapOperands{i, j}; + SmallVector swapOperands{i, j}; swapOperands.append(args.begin() + xStartIdx, args.end()); createSwap(builder, loc, swapOperands, nx, ny, isCoo); // If the pivot is moved, update p with the new pivot. @@ -610,11 +610,11 @@ auto p = builder.create( loc, partitionFunc, TypeRange{IndexType::get(context)}, ValueRange(args)); - SmallVector lowOperands{lo, p.getResult(0)}; + SmallVector lowOperands{lo, p.getResult(0)}; lowOperands.append(args.begin() + xStartIdx, args.end()); builder.create(loc, func, lowOperands); - SmallVector highOperands{ + SmallVector highOperands{ builder.create(loc, p.getResult(0), constantIndex(builder, loc, 1)), hi}; @@ -660,7 +660,7 @@ Value i = forOpI.getInductionVar(); // Binary search to find the insertion point p. - SmallVector operands{lo, i}; + SmallVector operands{lo, i}; operands.append(args.begin() + xStartIdx, args.end()); FlatSymbolRefAttr searchFunc = getMangledSortHelperFunc( builder, func, {IndexType::get(context)}, kBinarySearchFuncNamePrefix, nx, @@ -672,7 +672,7 @@ // Move the value at data[i] to a temporary location. operands[0] = operands[1] = i; - SmallVector d; + SmallVector d; forEachIJPairInAllBuffers( builder, loc, operands, nx, ny, isCoo, [&](uint64_t unused, Value i, Value unused2, Value buffer) { @@ -715,7 +715,7 @@ uint64_t ny, bool isCoo, PatternRewriter &rewriter) { Location loc = op.getLoc(); - SmallVector operands{constantIndex(rewriter, loc, 0), op.getN()}; + SmallVector operands{constantIndex(rewriter, loc, 0), op.getN()}; // Convert `values` to have dynamic shape and append them to `operands`. for (Value v : xys) { @@ -869,7 +869,7 @@ LogicalResult matchAndRewrite(SortOp op, PatternRewriter &rewriter) const override { - SmallVector xys(op.getXs()); + SmallVector xys(op.getXs()); xys.append(op.getYs().begin(), op.getYs().end()); return matchAndRewriteSortOp(op, xys, op.getXs().size(), /*ny=*/0, /*isCoo=*/false, rewriter); @@ -883,7 +883,7 @@ LogicalResult matchAndRewrite(SortCooOp op, PatternRewriter &rewriter) const override { - SmallVector xys; + SmallVector xys; xys.push_back(op.getXy()); xys.append(op.getYs().begin(), op.getYs().end()); uint64_t nx = 1; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -324,7 +324,7 @@ unsigned rank = shape.size(); Value heuristic = constantIndex(builder, loc, 16); // Build original sizes. - SmallVector sizes; + SmallVector sizes; for (unsigned r = 0, o = 0; r < rank; r++) { if (ShapedType::isDynamic(shape[r])) sizes.push_back(dynSizes[o++]); @@ -403,7 +403,7 @@ SmallVectorImpl &indices, Value value, Value pos, unsigned field, unsigned d) { unsigned rank = rtp.getShape().size(); - SmallVector types; + SmallVector types; Type indexType = builder.getIndexType(); Type boolType = builder.getIntegerType(1); Value one = constantIndex(builder, loc, 1); @@ -543,7 +543,7 @@ Value hi = genLoad(builder, loc, fields[memSizesIdx], mz); Value zero = constantIndex(builder, loc, 0); Value one = constantIndex(builder, loc, 1); - SmallVector inits; + SmallVector inits; // only one inits.push_back(genLoad(builder, loc, fields[field], zero)); scf::ForOp loop = createFor(builder, loc, hi, inits, one); Value i = loop.getInductionVar(); @@ -584,7 +584,7 @@ LogicalResult matchAndRewrite(func::ReturnOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - SmallVector flattened; + SmallVector flattened; flattenOperands(adaptor.getOperands(), flattened); // Create a return with the flattened value extracted from sparse tensors. rewriter.replaceOpWithNewOp(op, flattened); @@ -606,23 +606,23 @@ // ==> // memref..., f, memref = call @foo(...) replace with // cast(memref...)->sparse_tensor, f, cast(memref...)->sparse_tensor - SmallVector finalRetTy; + SmallVector finalRetTy; if (failed(typeConverter->convertTypes(op.getResultTypes(), finalRetTy))) return failure(); // (1) Genereates new call with flattened return value. - SmallVector flattened; + SmallVector flattened; flattenOperands(adaptor.getOperands(), flattened); auto newCall = rewriter.create(loc, op.getCallee(), finalRetTy, flattened); // (2) Create cast operation for sparse tensor returns. - SmallVector castedRet; + SmallVector castedRet; // Tracks the offset of current return value (of the orignal call) // relative to the new call (after sparse tensor flattening); unsigned retOffset = 0; // Temporal buffer to hold the flattened list of type for // a sparse tensor. - SmallVector sparseFlat; + SmallVector sparseFlat; for (auto ret : op.getResults()) { assert(retOffset < newCall.getNumResults()); auto retType = ret.getType(); @@ -713,7 +713,7 @@ // Construct allocation for each field. Location loc = op.getLoc(); - SmallVector fields; + SmallVector fields; createAllocFields(rewriter, loc, resType, adaptor.getOperands(), enableBufferInitialization, fields); // Replace operation with resulting memrefs. @@ -760,7 +760,7 @@ op.getTensor().getType().cast(); auto tuple = getTuple(adaptor.getTensor()); // Prepare fields. - SmallVector fields(tuple.getInputs()); + SmallVector fields(tuple.getInputs()); // Generate optional insertion finalization code. if (op.getHasInserts()) genEndInsert(rewriter, op.getLoc(), srcType, fields); @@ -839,8 +839,8 @@ Value added = adaptor.getAdded(); Value count = adaptor.getCount(); // Prepare fields and indices. - SmallVector fields(tuple.getInputs()); - SmallVector indices(adaptor.getIndices()); + SmallVector fields(tuple.getInputs()); + SmallVector indices(adaptor.getIndices()); // If the innermost dimension is ordered, we need to sort the indices // in the "added" array prior to applying the compression. unsigned rank = dstType.getShape().size(); @@ -897,8 +897,8 @@ op.getTensor().getType().cast(); auto tuple = getTuple(adaptor.getTensor()); // Prepare fields and indices. - SmallVector fields(tuple.getInputs()); - SmallVector indices(adaptor.getIndices()); + SmallVector fields(tuple.getInputs()); + SmallVector indices(adaptor.getIndices()); // Generate insertion. Value value = adaptor.getValue(); genInsert(rewriter, op->getLoc(), dstType, fields, indices, value); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -63,7 +63,7 @@ uint64_t lvl) { // Generate the call. StringRef name = "sparseLvlSize"; - SmallVector params{ + SmallVector params{ // just two src, constantIndex(builder, loc, toStoredDim(enc, lvl))}; Type iTp = builder.getIndexType(); return createFuncCall(builder, loc, name, iTp, params, EmitCInterface::Off) @@ -92,7 +92,7 @@ /// Populates given sizes array from type (for static sizes) and from /// an already-converted opaque pointer source (for dynamic sizes). -static void sizesFromPtr(OpBuilder &builder, SmallVector &sizes, +static void sizesFromPtr(OpBuilder &builder, SmallVectorImpl &sizes, Location loc, SparseTensorEncodingAttr &enc, ShapedType stp, Value src) { for (unsigned i = 0, rank = stp.getRank(); i < rank; i++) @@ -100,7 +100,7 @@ } /// Populates given sizes array from type. -static void sizesFromType(OpBuilder &builder, SmallVector &sizes, +static void sizesFromType(OpBuilder &builder, SmallVectorImpl &sizes, Location loc, ShapedType stp) { auto shape = stp.getShape(); for (unsigned i = 0, rank = stp.getRank(); i < rank; i++) { @@ -113,7 +113,7 @@ /// sizes) and from an already-converted opaque pointer source (for dynamic /// sizes). static void concatSizesFromInputs(OpBuilder &builder, - SmallVector &sizes, Location loc, + SmallVectorImpl &sizes, Location loc, ShapedType dstTp, ValueRange srcs, unsigned dim) { auto dstShape = dstTp.getShape(); @@ -262,7 +262,7 @@ const unsigned lvlRank = enc.getDimLevelType().size(); const unsigned dimRank = stp.getRank(); // Sparsity annotations. - SmallVector lvlTypes; + SmallVector lvlTypes; for (auto dlt : enc.getDimLevelType()) lvlTypes.push_back(constantDimLevelTypeEncoding(builder, loc, dlt)); assert(lvlTypes.size() == lvlRank && "Level-rank mismatch"); @@ -276,12 +276,12 @@ // For now however, since we're still assuming permutations, we will // initialize this parameter alongside the `dim2lvl` and `lvl2dim` // parameters below. We preinitialize `lvlSizes` for code symmetry. - SmallVector lvlSizes(lvlRank); + SmallVector lvlSizes(lvlRank); // The dimension-to-level mapping and its inverse. We must preinitialize // `dim2lvl` so that the true branch below can perform random-access // `operator[]` assignment. We preinitialize `lvl2dim` for code symmetry. - SmallVector dim2lvl(dimRank); - SmallVector lvl2dim(lvlRank); + SmallVector dim2lvl(dimRank); + SmallVector lvl2dim(lvlRank); auto dimOrder = enc.getDimOrdering(); if (dimOrder) { assert(dimOrder.isPermutation()); @@ -365,11 +365,11 @@ /// Converts a pointer to COO (from calls to iter->next()) into a vector of /// indices, apply (optional) `offset` on `offsetDim`. -static SmallVector loadIndices(OpBuilder &builder, Location loc, - unsigned rank, Value ind, - unsigned offsetDim = 0, - Value offset = Value()) { - SmallVector ivs; +static SmallVector loadIndices(OpBuilder &builder, Location loc, + unsigned rank, Value ind, + unsigned offsetDim = 0, + Value offset = Value()) { + SmallVector ivs; ivs.reserve(rank); for (unsigned i = 0; i < rank; i++) { Value idx = constantIndex(builder, loc, i); @@ -437,14 +437,14 @@ unsigned dstRank = dstTp.getRank(); unsigned srcRank = srcTp.getRank(); - SmallVector srcIndices; + SmallVector srcIndices; for (unsigned i = 0; i < srcRank; i++) { Value idx = rewriter.create( loc, srcIdx, constantIndex(rewriter, loc, i)); srcIndices.push_back(idx); } - SmallVector dstIndices; + SmallVector dstIndices; translateIndicesArray(rewriter, loc, reassociation, srcIndices, srcShape, dstShape, dstIndices); @@ -488,13 +488,13 @@ auto noPerm = SparseTensorEncodingAttr::get( op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); - SmallVector srcSizes; + SmallVector srcSizes; sizesFromPtr(rewriter, srcSizes, loc, encSrc, srcTp, adaptor.getSrc()); NewCallParams params(rewriter, loc); Value iter = params.genBuffers(noPerm, srcSizes, srcTp) .genNewCall(Action::kToIterator, adaptor.getSrc()); // Start a new COO for the destination tensor. - SmallVector dstSizes; + SmallVector dstSizes; if (dstTp.hasStaticShape()) { sizesFromType(rewriter, dstSizes, loc, dstTp); } else { @@ -555,7 +555,7 @@ auto noPerm = SparseTensorEncodingAttr::get( rewriter.getContext(), enc.getDimLevelType(), AffineMap(), AffineMap(), enc.getPointerBitWidth(), enc.getIndexBitWidth()); - SmallVector sizes; + SmallVector sizes; sizesFromPtr(rewriter, sizes, loc, noPerm, tensorTp, t); Value iter = NewCallParams(rewriter, loc) .genBuffers(noPerm, sizes, tensorTp) @@ -721,7 +721,7 @@ return failure(); // Generate the call to construct tensor from ptr. The sizes are // inferred from the result type of the new operator. - SmallVector sizes; + SmallVector sizes; ShapedType stp = resType.cast(); sizesFromType(rewriter, sizes, loc, stp); Value ptr = adaptor.getOperands()[0]; @@ -800,7 +800,7 @@ rewriter.replaceOp(op, adaptor.getOperands()); // hidden nop cast return success(); } - SmallVector sizes; + SmallVector sizes; NewCallParams params(rewriter, loc); ShapedType stp = srcType.cast(); sizesFromPtr(rewriter, sizes, loc, encSrc, stp, src); @@ -860,7 +860,7 @@ op->getContext(), SmallVector(rank, DimLevelType::Dense), AffineMap(), AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); - SmallVector sizes; + SmallVector sizes; sizesFromPtr(rewriter, sizes, loc, encSrc, srcTensorTp, src); Value iter = NewCallParams(rewriter, loc) .genBuffers(encDst, sizes, dstTensorTp) @@ -880,7 +880,7 @@ rewriter.create(loc, cond, before->getArguments()); Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, noTypes); rewriter.setInsertionPointToStart(after); - SmallVector ivs = loadIndices(rewriter, loc, rank, ind); + SmallVector ivs = loadIndices(rewriter, loc, rank, ind); insertScalarIntoDenseTensor(rewriter, loc, elemPtr, dst, ivs); rewriter.create(loc); rewriter.setInsertionPointAfter(whileOp); @@ -925,7 +925,7 @@ // loop is generated by genAddElt(). ShapedType stp = resType.cast(); unsigned rank = stp.getRank(); - SmallVector sizes; + SmallVector sizes; sizesFromSrc(rewriter, sizes, loc, src); NewCallParams params(rewriter, loc); Value coo = @@ -1223,7 +1223,7 @@ // The offset applied to the dimenstion to be concated (starting from 0) Value offset = constantIndex(rewriter, loc, 0); - SmallVector sizes; + SmallVector sizes; NewCallParams params(rewriter, loc); concatSizesFromInputs(rewriter, sizes, loc, dstTp, op.getInputs(), concatDim); @@ -1277,7 +1277,7 @@ } else { // Case: dense => dense Value val = genValueForDense(builder, loc, adaptedOp, idx); - SmallVector indVec(idx); + SmallVector indVec(idx); // Apply offset. indVec[concatDim] = builder.create( loc, indVec[concatDim], offset); @@ -1320,7 +1320,7 @@ // Convert to default permuted COO. Value src = adaptor.getOperands()[0]; auto encSrc = getSparseTensorEncoding(srcType); - SmallVector sizes; + SmallVector sizes; sizesFromPtr(rewriter, sizes, loc, encSrc, srcType, src); auto enc = SparseTensorEncodingAttr::get( op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(), diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -112,7 +112,7 @@ /// Populates given sizes array from type (for static sizes) and from /// the tensor (for dynamic sizes). -static void sizesForTensor(OpBuilder &builder, SmallVector &sizes, +static void sizesForTensor(OpBuilder &builder, SmallVectorImpl &sizes, Location loc, ShapedType stp, Value tensor) { for (const auto &d : enumerate(stp.getShape())) { Value dim; @@ -130,7 +130,7 @@ static RankedTensorType getUnorderedCOOFromType(RankedTensorType src) { auto *ctx = src.getContext(); auto rank = src.getRank(); - SmallVector dims; + SmallVector dims; // An unordered and non-unique compressed dim at beginning. dims.push_back(DimLevelType::CompressedNuNo); @@ -331,10 +331,10 @@ // Generate code to represent the static dimension constants or compute // the dynamic dimension values. - SmallVector srcSizes; + SmallVector srcSizes; sizesForTensor(rewriter, srcSizes, loc, srcTp, srcTensor); - SmallVector dstSizes; - SmallVector dstDynSizes; + SmallVector dstSizes; + SmallVector dstDynSizes; if (dstTp.hasStaticShape()) { for (auto d : dstTp.getShape()) dstSizes.push_back(constantIndex(rewriter, loc, d)); @@ -360,8 +360,8 @@ loc, srcTensor, cooBuffer, [&](OpBuilder &builder, Location loc, ValueRange args, Value v, ValueRange reduc) { - SmallVector srcIndices; - SmallVector dstIndices; + SmallVector srcIndices; + SmallVector dstIndices; for (int64_t i = 0, e = srcTp.getRank(); i < e; i++) { uint64_t dim = toStoredDim(encSrc, i); srcIndices.push_back(args[dim]); @@ -454,7 +454,7 @@ loc, input, cooBuffer, [&](OpBuilder &builder, Location loc, ValueRange args, Value v, ValueRange reduc) { - SmallVector indices; + SmallVector indices; for (int64_t i = 0; i < rank; i++) { Value idx = args[i]; if (i == static_cast(conDim)) @@ -528,9 +528,9 @@ Location loc = op.getLoc(); Value src = op.getSource(); RankedTensorType dstTp = op.getType().cast(); - SmallVector sizes; + SmallVector sizes; sizesFromSrc(rewriter, sizes, loc, src); - SmallVector dynSizes; + SmallVector dynSizes; getDynamicSizes(dstTp, sizes, dynSizes); bool fromSparseConst = false; @@ -583,7 +583,7 @@ Value src = op.getSource(); RankedTensorType srcTp = src.getType().cast(); - SmallVector sizes; + SmallVector sizes; sizesForTensor(rewriter, sizes, loc, srcTp, src); Value dst = allocDenseTensor(rewriter, loc, dstTp, sizes); @@ -622,7 +622,7 @@ RankedTensorType dstTp = op.getType().cast(); SparseTensorEncodingAttr encDst = getSparseTensorEncoding(dstTp); - SmallVector srcSizes; + SmallVector srcSizes; sizesForTensor(rewriter, srcSizes, loc, srcTp, src); Value tmpCoo = Value(); if (!isUniqueCOOType(srcTp)) { @@ -630,7 +630,7 @@ // TODO: there may be cases for which more efficiently without // going through an intermediate COO, such as cases that only change // the overhead types. - SmallVector dynSrcSizes; + SmallVector dynSrcSizes; getDynamicSizes(srcTp, srcSizes, dynSrcSizes); srcTp = getUnorderedCOOFromType(srcTp); tmpCoo = @@ -655,7 +655,7 @@ MemRefType::get(dynShape, getIndexOverheadType(rewriter, encSrc)); uint64_t rank = dstTp.getRank(); // Gather the indices-arrays in the dst tensor storage order. - SmallVector xs(rank, Value()); + SmallVector xs(rank, Value()); for (uint64_t i = 0; i < rank; i++) { uint64_t orgDim = toOrigDim(encSrc, i); xs[toStoredDim(encDst, orgDim)] = rewriter.create( @@ -675,11 +675,11 @@ rewriter.create(loc, nnz, xs, ValueRange{y}); // For each element in the COO tensor, insert the element to the dst tensor. - SmallVector dynDstSizes; + SmallVector dynDstSizes; getDynamicSizes(dstTp, srcSizes, dynDstSizes); Value dst = rewriter.create(loc, dstTp, dynDstSizes).getResult(); - SmallVector indices(srcTp.getRank(), Value()); + SmallVector indices(srcTp.getRank(), Value()); auto foreachOp = rewriter.create( loc, src, dst, [&](OpBuilder &builder, Location loc, ValueRange args, Value v, @@ -774,7 +774,7 @@ loopEmitter.enterLoopOverTensorAtDim(rewriter, loc, 0, i, reduc); } - SmallVector coords; + SmallVector coords; coords.reserve(rank); loopEmitter.getCoordinateArray(coords); @@ -788,8 +788,8 @@ // 2. Inline the block in the foreach operator. Block *srcBlock = op.getBody(); - SmallVector args; // Remap coordinates. + SmallVector args; for (int64_t i = 0; i < rank; i++) { Value actual = coords[toStoredDim(enc, i)]; args.push_back(actual); @@ -853,7 +853,7 @@ // If the result tensor has dynamic dimensions, get the dynamic sizes from // the sparse tensor reader. - SmallVector dynSizesArray; + SmallVector dynSizesArray; if (!dstTp.hasStaticShape()) { createFuncCall(rewriter, loc, "getSparseTensorReaderDimSizes", {}, {reader, dimSizes}, EmitCInterface::On) @@ -895,7 +895,7 @@ createFuncCall(rewriter, loc, getNextFuncName, {eltTp}, {reader, indices, value}, EmitCInterface::On) .getResult(0); - SmallVector indicesArray; + SmallVector indicesArray; for (uint64_t i = 0; i < rank; i++) { indicesArray.push_back(rewriter.create( loc, indices, constantIndex(rewriter, loc, i))); @@ -939,7 +939,7 @@ // Generate code to calculate dimension size values and store the values to // the buffer. - SmallVector dims; + SmallVector dims; sizesForTensor(rewriter, dims, loc, srcTp, src); for (uint64_t i = 0; i < rank; i++) { rewriter.create(loc, dims[i], dimSizes, @@ -972,7 +972,7 @@ constantIndex(builder, loc, i)); } rewriter.create(loc, v, value); - SmallVector operands{writer, rankValue, indices, value}; + SmallVector operands{writer, rankValue, indices, value}; FlatSymbolRefAttr fn = getFunc(module, outNextFuncName, {}, operands, EmitCInterface::On); builder.create(loc, TypeRange(), fn, operands); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -108,11 +108,11 @@ assert(m.getNumResults() == sz && "TopoSort/AffineMap size mismatch"); // Construct the inverse of `m`; to avoid the asymptotic complexity // of calling `m.getPermutedPosition` repeatedly. - SmallVector inv(sz); + SmallVector inv(sz); for (unsigned i = 0; i < sz; i++) inv[i] = m.getDimPosition(i); // Construct the permutation. - SmallVector perm(sz); + SmallVector perm(sz); for (unsigned i = 0; i < sz; i++) perm[i] = inv[topSort[i]]; return AffineMap::getPermutationMap(perm, context); @@ -417,7 +417,7 @@ CodeGen &codegen, Merger &merger, function_ref(MutableArrayRef reduc)> callback) { - SmallVector reduc; + SmallVector reduc; if (codegen.redVal) reduc.push_back(codegen.redVal); if (codegen.expValues) @@ -526,7 +526,7 @@ /// Generates subscript for load/store on a dense or sparse tensor. static Value genSubscript(CodeGen &codegen, OpBuilder &builder, linalg::GenericOp op, OpOperand *t, - SmallVector &args) { + SmallVectorImpl &args) { unsigned tensor = t->getOperandNumber(); auto map = op.getMatchingIndexingMap(t); auto enc = getSparseTensorEncoding(t->get().getType()); @@ -588,7 +588,7 @@ // Direct insertion in lexicographic index order. if (!codegen.expValues) { unsigned rank = op.getRank(t); - SmallVector indices; + SmallVector indices; for (unsigned i = 0; i < rank; i++) { assert(codegen.loopEmitter.getLoopIV(i)); indices.push_back(codegen.loopEmitter.getLoopIV(i)); @@ -645,7 +645,7 @@ return genInsertionLoad(codegen, builder, op, &t); } // Actual load. - SmallVector args; + SmallVector args; Value ptr = genSubscript(codegen, builder, op, &t, args); return builder.create(op.getLoc(), ptr, args); } @@ -670,7 +670,7 @@ // Select operation insertion. Value insChain = codegen.insChain; assert(insChain); - SmallVector types; + SmallVector types; // only one types.push_back(codegen.insChain.getType()); scf::IfOp ifOp = builder.create(loc, types, rhs, /*else=*/true); @@ -694,7 +694,7 @@ return; } // Actual store. - SmallVector args; + SmallVector args; Value ptr = genSubscript(codegen, builder, op, t, args); builder.create(loc, rhs, ptr, args); } @@ -882,7 +882,7 @@ codegen.expCount = res.getResult(3); } else { assert(codegen.expValues); - SmallVector indices; + SmallVector indices; for (unsigned i = 0; i < at; i++) { assert(codegen.loopEmitter.getLoopIV(i)); indices.push_back(codegen.loopEmitter.getLoopIV(i)); @@ -991,7 +991,7 @@ while (auto ifOp = dyn_cast_or_null( builder.getInsertionBlock()->getParentOp())) { unsigned y = 0; - SmallVector yields; + SmallVector yields; if (codegen.redVal) { yields.push_back(codegen.redVal); updateReduc(merger, codegen, ifOp.getResult(y++)); @@ -1017,7 +1017,7 @@ linalg::GenericOp op, unsigned idx, BitVector &conditions) { Location loc = op.getLoc(); - SmallVector types; + SmallVector types; Value cond; for (unsigned b = 0, be = conditions.size(); b < be; b++) { if (!conditions[b]) @@ -1054,7 +1054,7 @@ static void endIf(Merger &merger, CodeGen &codegen, OpBuilder &builder, linalg::GenericOp op, scf::IfOp ifOp, Operation *loop, Value redInput, Value cntInput, Value insInput) { - SmallVector operands; + SmallVector operands; if (codegen.redVal) { operands.push_back(codegen.redVal); updateReduc(merger, codegen, redInput); @@ -1172,10 +1172,10 @@ OpBuilder &builder, linalg::GenericOp op, unsigned at, unsigned li, bool needsUniv) { // The set of tensors + dims to generate loops on - SmallVector condTids, condDims; + SmallVector condTids, condDims; // The set of (dense) tensors that is optimized from condition, yet still // need extra locals to iterate on them. - SmallVector extraTids, extraDims; + SmallVector extraTids, extraDims; translateBitsToTidDimPairs(merger, codegen, li, codegen.topSort[at], condTids, condDims, extraTids, extraDims); @@ -1369,7 +1369,7 @@ merger.setHasSparseOut(sparseOut != nullptr); - SmallVector tensors; + SmallVector tensors; for (OpOperand &t : op->getOpOperands()) tensors.push_back(t.get()); diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -150,7 +150,7 @@ unsigned Merger::addSet() { unsigned s = latSets.size(); - latSets.emplace_back(SmallVector()); + latSets.emplace_back(SmallVector()); return s; }