diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt --- a/clang/docs/tools/clang-formatted-files.txt +++ b/clang/docs/tools/clang-formatted-files.txt @@ -8250,7 +8250,6 @@ mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp mlir/lib/Dialect/Tensor/Transforms/Bufferize.cpp mlir/lib/Dialect/Tensor/Transforms/PassDetail.h -mlir/lib/Dialect/Tensor/Transforms/SplitPadding.cpp mlir/lib/Dialect/Tensor/Utils/Utils.cpp mlir/lib/Dialect/Tosa/IR/TosaOps.cpp mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeConv2D.cpp diff --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h @@ -41,12 +41,6 @@ /// ops into `patterns`. void populateFoldTensorSubsetOpPatterns(RewritePatternSet &patterns); -/// Populates `patterns` with patterns to wrap a tensor.pad op with an scf.if op -/// to separate the cases where we don't need padding (all pad sizes are -/// actually zeros) and where we indeed need padding. -void populateSplitPaddingPatterns(RewritePatternSet &patterns, - PatternBenefit baseBenefit = 1); - /// Collects patterns to merge consecutive tensor.insert_slice/extract_slice /// into one. These patterns are in in this separate entry point because the /// bufferization is sensitive over IR structure, particularly those diff --git a/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Tensor/Transforms/CMakeLists.txt @@ -7,7 +7,6 @@ FoldTensorSubsetOps.cpp MergeConsecutiveInsertExtractSlicePatterns.cpp ReshapePatterns.cpp - SplitPaddingPatterns.cpp SwapExtractSliceWithProducerPatterns.cpp ADDITIONAL_HEADER_DIRS diff --git a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp deleted file mode 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp +++ /dev/null @@ -1,95 +0,0 @@ -//===- SplitPaddingPatterns.cpp - Splitting tensor.pad Op -----------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file implements patterns to wrap a tensor.pad op with an scf.if op -/// to separate the cases where we don't need padding (all pad sizes are -/// actually zeros) and where we indeed need padding. -// -//===----------------------------------------------------------------------===// - -#include "mlir/Dialect/Arith/IR/Arith.h" -#include "mlir/Dialect/SCF/IR/SCF.h" -#include "mlir/Dialect/Tensor/IR/Tensor.h" -#include "mlir/Dialect/Tensor/Transforms/Transforms.h" -#include "mlir/Dialect/Utils/StaticValueUtils.h" -#include "mlir/IR/PatternMatch.h" -#include "llvm/Support/Debug.h" - -#define DEBUG_TYPE "mlir-tensor-split-padding" - -using namespace mlir; - -/// Returns true if the the given `attrOrValue` is a constant zero. -static bool isZero(OpFoldResult attrOrValue) { - if (std::optional val = getConstantIntValue(attrOrValue)) - return *val == 0; - return false; -} - -/// Gets the given `attrOrValue` as a Value by creating constant ops for -/// attributes. -static Value getAsValue(OpFoldResult attrOrValue, OpBuilder &builder, - Location loc) { - if (Value val = attrOrValue.dyn_cast()) - return val; - auto attr = attrOrValue.get().cast(); - return builder.create(loc, attr.getInt()); -} - -namespace { - -struct SplitPadding final : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - LogicalResult matchAndRewrite(tensor::PadOp padOp, - PatternRewriter &rewriter) const override { - // Avoid infinitely applying this pattern. - if (padOp->getParentOfType()) - return failure(); - - // If all padding sizes are zero, we don't need to do anything. - SmallVector lowPads = padOp.getMixedLowPad(); - SmallVector highPads = padOp.getMixedHighPad(); - if (llvm::all_of(lowPads, isZero) && llvm::all_of(highPads, isZero)) - return failure(); - - // Build the condition for the scf.if op: all pad sizes are zero. - Location loc = padOp.getLoc(); - Value cstZero = rewriter.create(loc, 0); - SmallVector eqZeroCmpVals; - for (OpFoldResult pad : llvm::concat(lowPads, highPads)) { - if (!isZero(pad)) - eqZeroCmpVals.push_back(rewriter.create( - loc, arith::CmpIPredicate::eq, getAsValue(pad, rewriter, loc), - cstZero)); - } - Value ifCond = eqZeroCmpVals.front(); - for (Value cmp : llvm::ArrayRef(eqZeroCmpVals).drop_front()) - ifCond = rewriter.create(loc, ifCond, cmp); - - // Build the scf.if op itself. For the "then" branch, we can elide the - // padding. For the "else" branch, we retain the clone op. - auto thenBuilder = [&padOp](OpBuilder &builder, Location loc) { - builder.create(loc, padOp.getSource()); - }; - auto elseBuilder = [&padOp](OpBuilder &builder, Location loc) { - Operation *newOp = builder.clone(*padOp); - builder.create(loc, newOp->getResults()); - }; - rewriter.replaceOpWithNewOp(padOp, ifCond, thenBuilder, - elseBuilder); - return success(); - } -}; - -} // namespace - -void tensor::populateSplitPaddingPatterns(RewritePatternSet &patterns, - PatternBenefit baseBenefit) { - patterns.add(patterns.getContext(), baseBenefit); -} diff --git a/mlir/test/Dialect/Tensor/split-padding.mlir b/mlir/test/Dialect/Tensor/split-padding.mlir deleted file mode 100644 --- a/mlir/test/Dialect/Tensor/split-padding.mlir +++ /dev/null @@ -1,44 +0,0 @@ -// RUN: mlir-opt -split-input-file -test-tensor-transform-patterns=test-split-padding-patterns %s | FileCheck %s - -// CHECK-LABEL: func @pad_all_zero_sizes -func.func @pad_all_zero_sizes(%input: tensor) -> tensor { - %f0 = arith.constant 0.0 : f32 - %c0 = arith.constant 0 : index - %0 = tensor.pad %input low[0, %c0, 0] high[%c0, 0, 0] { - ^bb0(%dim0: index, %dim1: index, %dim2: index): - tensor.yield %f0 : f32 - } : tensor to tensor - return %0 : tensor -} - -// CHECK-NOT: scf.if -// CHECK: tensor.pad - -// ----- - -// CHECK-LABEL: func @pad_non_zero_sizes -// CHECK-SAME: (%[[INPUT:.+]]: tensor, %[[LOW0:.+]]: index, %[[HIGH1:.+]]: index) -func.func @pad_non_zero_sizes(%input: tensor, %low0: index, %high1: index) -> tensor { - %f0 = arith.constant 0.0 : f32 - %0 = tensor.pad %input low[%low0, 0, 0] high[0, %high1, 0] { - ^bb0(%dim0: index, %dim1: index, %dim2: index): - tensor.yield %f0 : f32 - } : tensor to tensor - return %0 : tensor -} - -// CHECK-DAG: %[[F0:.+]] = arith.constant 0.000000e+00 : f32 -// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index -// CHECK: %[[EQ0:.+]] = arith.cmpi eq, %[[LOW0]], %[[C0]] : index -// CHECK: %[[EQ1:.+]] = arith.cmpi eq, %[[HIGH1]], %[[C0]] : index -// CHECK: %[[AND:.+]] = arith.andi %[[EQ0]], %[[EQ1]] : i1 -// CHECK: %[[IF:.+]] = scf.if %[[AND]] -> (tensor) { -// CHECK: scf.yield %[[INPUT]] : tensor -// CHECK: } else { -// CHECK: %[[PAD:.+]] = tensor.pad %[[INPUT]] low[%[[LOW0]], 0, 0] high[0, %[[HIGH1]], 0] { -// CHECK: ^bb0(%{{.+}}: index, %{{.+}}: index, %{{.+}}: index): -// CHECK: tensor.yield %[[F0]] : f32 -// CHECK: } : tensor to tensor -// CHECK: scf.yield %[[PAD]] : tensor -// CHECK: } -// CHECK: return %[[IF]] : tensor diff --git a/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp b/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp --- a/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp +++ b/mlir/test/lib/Dialect/Tensor/TestTensorTransforms.cpp @@ -43,11 +43,6 @@ void runOnOperation() override; - Option testSplitPaddingPatterns{ - *this, "test-split-padding-patterns", - llvm::cl::desc("Test patterns to split tensor.pad ops"), - llvm::cl::init(false)}; - Option testFoldConstantExtractSlice{ *this, "test-fold-constant-extract-slice", llvm::cl::desc("Test folding arith.constant and tensor.extract_slice"), @@ -111,12 +106,6 @@ (void)applyPatternsAndFoldGreedily(rootOp, std::move(patterns)); } -static void applySplitPaddingPatterns(Operation *rootOp) { - RewritePatternSet patterns(rootOp->getContext()); - tensor::populateSplitPaddingPatterns(patterns); - (void)applyPatternsAndFoldGreedily(rootOp, std::move(patterns)); -} - static void applyFoldConstantExtractSlicePatterns(Operation *rootOp) { RewritePatternSet patterns(rootOp->getContext()); tensor::ControlConstantExtractSliceFusionFn controlFn = @@ -291,8 +280,6 @@ Operation *rootOp = getOperation(); if (testSimplifyPackPatterns) applySimplifyPackPatterns(rootOp); - if (testSplitPaddingPatterns) - applySplitPaddingPatterns(rootOp); if (testFoldConstantExtractSlice) applyFoldConstantExtractSlicePatterns(rootOp); if (testFoldConsecutiveInsertExtractSlice)