diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1205,7 +1205,7 @@ const SmallVector &dynSizes) const; }; -/// Populates `patterns` with patterns that vectorize linalg.pad_tensor. +/// Populates `patterns` with patterns that vectorize tensor.pad. /// These patterns are meant to apply in a complementary fashion. Benefits /// are used to encode a certain ordering of pattern application. To avoid /// scattering magic constants throughout the code base, the patterns must be @@ -1290,7 +1290,7 @@ const FrozenRewritePatternSet &stage2Patterns, function_ref stage3Lambda = nullptr); -/// Rewrite extract_slice(pad_tensor(x)) into pad_tensor(extract_slice(x)). +/// Rewrite extract_slice(tensor.pad(x)) into tensor.pad(extract_slice(x)). struct ExtractSliceOfPadTensorSwapPattern : public OpRewritePattern { /// A function to control pattern application and rewrite logic. diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -412,7 +412,7 @@ /// : tensor into tensor /// /// The reshape can be folded into the `genericOp` if its loop dimensionality -/// is increased to match the result (operand) of the tensor_expand_shape. +/// is increased to match the result (operand) of the tensor.expand_shape. /// The indexing_map of the fused tensor in the `genericOp` and the /// reassociation map helps compute the indexing maps of the modified op. /// For the above example, based on the reassociation map it @@ -677,7 +677,7 @@ } } -/// Implements the fusion of a tensor_collapse_shape or a tensor_expand_shape op +/// Implements the fusion of a tensor.collapse_shape or a tensor.expand_shape op /// and a generic op as explained in `isFusableWithReshapeByExpansion`. Assumes /// that those conditions have been satisfied. static Optional> @@ -811,7 +811,7 @@ namespace { -/// Pattern to fuse a tensor_collapse_shape op with its consumer generic op, +/// Pattern to fuse a tensor.collapse_shape op with its consumer generic op, /// when the reshape op is collapsing dimensions. The dimensionality of the loop /// in the consumer is expanded. class FoldWithProducerReshapeOpByExpansion @@ -851,7 +851,7 @@ ControlFusionFn controlFoldingReshapes; }; -/// Pattern to fold a tensor_expand_shape op with its producer generic op +/// Pattern to fold a tensor.expand_shape op with its producer generic op /// by expanding the dimensionality of the loop in the producer op. struct FoldReshapeWithGenericOpByExpansion : public OpRewritePattern { diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -814,7 +814,7 @@ /// Rewrite use of tensor::PadOp result in TransferReadOp. E.g.: /// ``` -/// %0 = linalg.pad_tensor %src ... : tensor to tensor<17x5xf32> +/// %0 = tensor.pad %src ... : tensor to tensor<17x5xf32> /// %r = vector.transfer_read %0[%c0, %c0], %cst /// {in_bounds = [true, true]} : tensor<17x5xf32>, vector<17x5xf32> /// ``` @@ -869,7 +869,7 @@ /// ``` /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1] /// : tensor<...> to tensor -/// %1 = linalg.pad_tensor %0 ... : tensor to tensor<17x5xf32> +/// %1 = tensor.pad %0 ... : tensor to tensor<17x5xf32> /// %2 = vector.transfer_write %vec, %1[...] /// : vector<17x5xf32>, tensor<17x5xf32> /// %r = tensor.extract_slice %2[0, 0] [%s0, %s1] [1, 1] @@ -1026,7 +1026,7 @@ /// Rewrite use of tensor::PadOp result in InsertSliceOp. E.g.: /// ``` -/// %0 = linalg.pad_tensor %src ... : tensor to tensor<17x5xf32> +/// %0 = tensor.pad %src ... : tensor to tensor<17x5xf32> /// %r = tensor.insert_slice %0 /// into %dest[%a, %b, 0, 0] [1, 1, 17, 5] [1, 1, 1, 1] /// : tensor<17x5xf32> into tensor diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp --- a/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp @@ -110,8 +110,8 @@ llvm::cl::init(false)}; Option testSwapSubTensorPadTensor{ *this, "test-swap-subtensor-padtensor", - llvm::cl::desc("Test rewrite of subtensor(pad_tensor) into " - "pad_tensor(subtensor)"), + llvm::cl::desc("Test rewrite of subtensor(tensor.pad) into " + "tensor.pad(subtensor)"), llvm::cl::init(false)}; Option testSplitReduction{ *this, "test-split-reduction",