diff --git a/mlir/include/mlir/Dialect/Linalg/Passes.h b/mlir/include/mlir/Dialect/Linalg/Passes.h --- a/mlir/include/mlir/Dialect/Linalg/Passes.h +++ b/mlir/include/mlir/Dialect/Linalg/Passes.h @@ -20,7 +20,7 @@ std::unique_ptr> createLinalgFoldUnitExtentDimsPass(); -std::unique_ptr createLinalgFusionOfTensorOpsPass(); +std::unique_ptr createLinalgElementwiseOpFusionPass(); std::unique_ptr createFoldReshapeOpsByLinearizationPass(); std::unique_ptr> diff --git a/mlir/include/mlir/Dialect/Linalg/Passes.td b/mlir/include/mlir/Dialect/Linalg/Passes.td --- a/mlir/include/mlir/Dialect/Linalg/Passes.td +++ b/mlir/include/mlir/Dialect/Linalg/Passes.td @@ -56,9 +56,9 @@ ]; } -def LinalgFusionOfTensorOps : Pass<"linalg-fusion-for-tensor-ops"> { - let summary = "Fuse operations on RankedTensorType in linalg dialect"; - let constructor = "mlir::createLinalgFusionOfTensorOpsPass()"; +def LinalgElementwiseOpFusion : Pass<"linalg-fuse-elementwise-ops"> { + let summary = "Fuse elementwise operations on tensors"; + let constructor = "mlir::createLinalgElementwiseOpFusionPass()"; let options = [ Option<"allowFoldingUnitDimReshapes", "allow-folding-unit-dim-reshapes", "bool", /*default=*/"false", diff --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt @@ -5,9 +5,9 @@ Detensorize.cpp Distribution.cpp DropUnitDims.cpp + ElementwiseOpFusion.cpp ElementwiseToLinalg.cpp Fusion.cpp - FusionOnTensors.cpp Generalization.cpp Hoisting.cpp InlineScalarOperands.cpp diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp rename from mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp rename to mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -1,4 +1,4 @@ -//===- Fusion.cpp - Implementation of linalg Fusion -----------------------===// +//===- ElementwiseOpFusion.cpp - Implementation of linalg Fusion ---------===/// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -1281,8 +1281,8 @@ }; /// Pass that fuses generic ops on tensors. Used only for testing. -struct FusionOfTensorOpsPass - : public LinalgFusionOfTensorOpsBase { +struct LinalgElementwiseOpFusionPass + : public LinalgElementwiseOpFusionBase { void runOnOperation() override { Operation *op = getOperation(); RewritePatternSet patterns(op->getContext()); @@ -1410,8 +1410,8 @@ patterns.add(context); } -std::unique_ptr mlir::createLinalgFusionOfTensorOpsPass() { - return std::make_unique(); +std::unique_ptr mlir::createLinalgElementwiseOpFusionPass() { + return std::make_unique(); } std::unique_ptr mlir::createFoldReshapeOpsByLinearizationPass() { diff --git a/mlir/test/Dialect/Linalg/fusion-tensor.mlir b/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir rename from mlir/test/Dialect/Linalg/fusion-tensor.mlir rename to mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir --- a/mlir/test/Dialect/Linalg/fusion-tensor.mlir +++ b/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -linalg-fusion-for-tensor-ops -split-input-file | FileCheck %s +// RUN: mlir-opt %s -linalg-fuse-elementwise-ops -split-input-file | FileCheck %s // CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)> #map0 = affine_map<(d0, d1) -> (d0, d1)> diff --git a/mlir/test/Dialect/Linalg/reshape_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_fusion.mlir --- a/mlir/test/Dialect/Linalg/reshape_fusion.mlir +++ b/mlir/test/Dialect/Linalg/reshape_fusion.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -linalg-fusion-for-tensor-ops="allow-folding-unit-dim-reshapes=false" -split-input-file | FileCheck %s -// RUN: mlir-opt %s -linalg-fusion-for-tensor-ops="allow-folding-unit-dim-reshapes=true" -split-input-file | FileCheck %s --check-prefix=FOLDUNITDIM +// RUN: mlir-opt %s -linalg-fuse-elementwise-ops="allow-folding-unit-dim-reshapes=false" -split-input-file | FileCheck %s +// RUN: mlir-opt %s -linalg-fuse-elementwise-ops="allow-folding-unit-dim-reshapes=true" -split-input-file | FileCheck %s --check-prefix=FOLDUNITDIM #map0 = affine_map<(d0, d1, d2) -> (d2, d0, d1)> #map1 = affine_map<(d0, d1, d2) -> (d1, d2, d0)> #map2 = affine_map<(d0, d1, d2) -> ()>