diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h @@ -89,7 +89,7 @@ /// Computes the shape of destination tensor of a reshape operator. This is only /// used when operands have dynamic shape. The shape of the destination is /// stored into dstShape. -void genReshapeDstShape(Location loc, PatternRewriter &rewriter, +void genReshapeDstShape(OpBuilder &builder, Location loc, SmallVectorImpl &dstShape, ArrayRef srcShape, ArrayRef staticDstShape, @@ -211,8 +211,8 @@ /// %v3 = complex.constant (5.0, 6.0) /// callback({%c3}, %v3) void foreachInSparseConstant( - Location loc, RewriterBase &rewriter, SparseElementsAttr attr, - AffineMap order, function_ref, Value)> callback); + OpBuilder &builder, Location loc, SparseElementsAttr attr, AffineMap order, + function_ref, Value)> callback); /// Loads `size`-many values from the memref, which must have rank-1 and /// size greater-or-equal to `size`. If the optional `(offsetIdx,offsetVal)` diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -244,16 +244,16 @@ } void mlir::sparse_tensor::genReshapeDstShape( - Location loc, PatternRewriter &rewriter, SmallVectorImpl &dstShape, + OpBuilder &builder, Location loc, SmallVectorImpl &dstShape, ArrayRef srcShape, ArrayRef staticDstShape, ArrayRef reassociation) { // Collapse shape. if (reassociation.size() < srcShape.size()) { unsigned start = 0; for (const auto &map : llvm::enumerate(reassociation)) { - auto dstDim = constantIndex(rewriter, loc, 1); + auto dstDim = constantIndex(builder, loc, 1); for (unsigned i = start; i < start + map.value().size(); i++) { - dstDim = rewriter.create(loc, dstDim, srcShape[i]); + dstDim = builder.create(loc, dstDim, srcShape[i]); } dstShape.push_back(dstDim); start = start + map.value().size(); @@ -285,13 +285,13 @@ } } // Compute the dynamic dimension size. - Value productVal = constantIndex(rewriter, loc, product); + Value productVal = constantIndex(builder, loc, product); Value dynamicSize = - rewriter.create(loc, srcDim, productVal); + builder.create(loc, srcDim, productVal); dstShape.push_back(dynamicSize); } else { // The expanded dimension is statically known. - dstShape.push_back(constantIndex(rewriter, loc, staticDstShape[j])); + dstShape.push_back(constantIndex(builder, loc, staticDstShape[j])); } } start = start + map.size(); @@ -512,8 +512,8 @@ } void sparse_tensor::foreachInSparseConstant( - Location loc, RewriterBase &rewriter, SparseElementsAttr attr, - AffineMap order, function_ref, Value)> callback) { + OpBuilder &builder, Location loc, SparseElementsAttr attr, AffineMap order, + function_ref, Value)> callback) { const Dimension dimRank = getSparseTensorType(attr).getDimRank(); const auto coordinates = attr.getIndices().getValues(); const auto values = attr.getValues().getValues(); @@ -560,17 +560,17 @@ cvs.clear(); for (Dimension d = 0; d < dimRank; d++) { auto crd = elems[i].first[d].getInt(); - cvs.push_back(rewriter.create(loc, crd)); + cvs.push_back(builder.create(loc, crd)); } // Remap value. Value val; if (attr.getElementType().isa()) { auto valAttr = elems[i].second.cast(); - val = rewriter.create(loc, attr.getElementType(), - valAttr); + val = builder.create(loc, attr.getElementType(), + valAttr); } else { auto valAttr = elems[i].second.cast(); - val = rewriter.create(loc, valAttr); + val = builder.create(loc, valAttr); } assert(val); callback(cvs, val); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -489,7 +489,7 @@ // Static "shapes" are in fact "sizes". fillDimShape(rewriter, loc, dstTp, dstDimSizes); else - genReshapeDstShape(loc, rewriter, dstDimSizes, srcDimSizes, + genReshapeDstShape(rewriter, loc, dstDimSizes, srcDimSizes, dstTp.getDimShape(), op.getReassociationIndices()); const Value coo = params.genBuffers(dstTp, dstDimSizes).genNewCall(Action::kEmptyCOO); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -158,7 +158,7 @@ // Foreach on constant. foreachInSparseConstant( - loc, rewriter, attr, op.getOrder().value_or(AffineMap()), + rewriter, loc, attr, op.getOrder().value_or(AffineMap()), [&reduc, &rewriter, op](ArrayRef cvs, Value v) mutable { SmallVector args; args.append(cvs.begin(), cvs.end()); @@ -372,7 +372,7 @@ dstSizes.push_back(constantIndex(rewriter, loc, d)); } else { ArrayRef dstShape = dstTp.getDimShape(); - genReshapeDstShape(loc, rewriter, dstSizes, srcSizes, dstShape, + genReshapeDstShape(rewriter, loc, dstSizes, srcSizes, dstShape, op.getReassociationIndices()); for (auto [idx, shape] : llvm::enumerate(dstShape)) { if (shape == ShapedType::kDynamic)