diff --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td --- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td +++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td @@ -1298,6 +1298,12 @@ "ArrayRef":$low, "ArrayRef":$high, CArg<"bool", "false">:$nofold, CArg<"ArrayRef", "{}">:$attrs)>, + // Build a PadOp with constant padding, mixed static and dynamic entries + // and custom result type. If the type passed is nullptr, it is inferred. + OpBuilder<(ins "Type":$resultType, "Value":$source, + "ArrayRef":$low, "ArrayRef":$high, + "Value":$constantPadValue, CArg<"bool", "false">:$nofold, + CArg<"ArrayRef", "{}">:$attrs)> ]; let hasCanonicalizer = 1; diff --git a/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h b/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h --- a/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h +++ b/mlir/include/mlir/Dialect/Tensor/Utils/Utils.h @@ -21,13 +21,6 @@ PadOp createPadHighOp(RankedTensorType type, Value source, Value pad, bool nofold, Location loc, OpBuilder &builder); -// Return a PadOp that pads `source to `type` size with `pad` value. -// I.e., a block will be created and the `pad` value will be yielded -// directly. If the type passed is nullptr, it is inferred. -PadOp createPadScalarOp(Type type, Value source, Value pad, - ArrayRef low, ArrayRef high, - bool nofold, Location loc, OpBuilder &builder); - // Creates dim ops for each dynamic dimension of the ranked tensor argument and // returns these as values. SmallVector createDynamicDimValues(OpBuilder &b, Location loc, diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -2000,9 +2000,8 @@ highValues.push_back(highVal); } - auto newPadOp = tensor::createPadScalarOp( - padOp.getType(), input, padConstant, lowValues, highValues, - /*nofold=*/false, loc, rewriter); + auto newPadOp = rewriter.create( + loc, padOp.getType(), input, lowValues, highValues, padConstant); rewriter.replaceOp(padOp, newPadOp.getResult()); return success(); diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp @@ -58,10 +58,9 @@ Value padValue = rewriter.create(loc, padAttr); - return tensor::createPadScalarOp(RankedTensorType::get(paddedShape, inputETy), - input, padValue, lowIndices, highIndices, - /*nofold=*/false, loc, rewriter) - .getResult(); + return rewriter.create( + loc, RankedTensorType::get(paddedShape, inputETy), input, lowIndices, + highIndices, padValue); } static mlir::Value reifyConstantDim(Attribute attr, diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -2475,12 +2475,32 @@ if (!resultType) { resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh); } + assert(resultType.isa()); build(b, result, resultType, source, dynamicLow, dynamicHigh, b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr()); result.addAttributes(attrs); } +void PadOp::build(OpBuilder &b, OperationState &result, Type resultType, + Value source, ArrayRef low, + ArrayRef high, Value constantPadValue, + bool nofold, ArrayRef attrs) { + build(b, result, resultType, source, low, high, nofold, attrs); + + // Add a region and a block to yield the pad value. + Region *region = result.regions[0].get(); + int sourceRank = source.getType().cast().getRank(); + SmallVector blockArgTypes(sourceRank, b.getIndexType()); + SmallVector blockArgLocs(sourceRank, result.location); + + // `builder.createBlock` changes the insertion point within the block. Create + // a guard to reset the insertion point of the builder after it is destroyed. + OpBuilder::InsertionGuard guard(b); + b.createBlock(region, region->end(), blockArgTypes, blockArgLocs); + b.create(result.location, constantPadValue); +} + llvm::SmallBitVector PadOp::getPaddedDims() { llvm::SmallBitVector paddedDims(getSourceType().getRank()); auto extractPaddedDims = [&](ArrayRef paddingWidths) { diff --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp --- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp @@ -18,24 +18,6 @@ using namespace mlir; using namespace mlir::tensor; -PadOp mlir::tensor::createPadScalarOp(Type type, Value source, Value pad, - ArrayRef low, - ArrayRef high, bool nofold, - Location loc, OpBuilder &builder) { - auto padTensorOp = - builder.create(loc, type, source, low, high, nofold); - int rank = padTensorOp.getResultType().getRank(); - SmallVector blockArgTypes(rank, builder.getIndexType()); - SmallVector blockArgLocs(rank, loc); - auto ®ion = padTensorOp.getRegion(); - // `builder.createBlock` changes the insertion point within the block. Create - // a guard to reset the insertion point of the builder after it is destroyed. - OpBuilder::InsertionGuard guard(builder); - builder.createBlock(®ion, region.end(), blockArgTypes, blockArgLocs); - builder.create(loc, pad); - return padTensorOp; -} - PadOp mlir::tensor::createPadHighOp(RankedTensorType type, Value source, Value pad, bool nofold, Location loc, OpBuilder &b) { @@ -53,7 +35,7 @@ high[en.index()] = makeComposedAffineApply(b, loc, en.value() - d0, {dimOp}).getResult(); } - return createPadScalarOp(type, source, pad, low, high, nofold, loc, b); + return b.create(loc, type, source, low, high, pad, nofold); } SmallVector mlir::tensor::createDynamicDimValues(OpBuilder &b,