diff --git a/mlir/include/mlir/Transforms/LoopUtils.h b/mlir/include/mlir/Transforms/LoopUtils.h --- a/mlir/include/mlir/Transforms/LoopUtils.h +++ b/mlir/include/mlir/Transforms/LoopUtils.h @@ -287,6 +287,11 @@ separateFullTiles(MutableArrayRef nest, SmallVectorImpl *fullTileNest = nullptr); +/// Replaces all uses of `orig` with `replacement` except if the user is listed +/// in `exceptions`. +void replaceAllUsesExcept(Value orig, Value replacement, + const SmallPtrSetImpl &exceptions); + } // end namespace mlir #endif // MLIR_TRANSFORMS_LOOP_UTILS_H diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -24,6 +24,7 @@ #include "mlir/IR/PatternMatch.h" #include "mlir/Support/LLVM.h" #include "mlir/Transforms/FoldUtils.h" +#include "mlir/Transforms/LoopUtils.h" #include "llvm/ADT/SetVector.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -97,7 +98,26 @@ } auto operands = getAssumedNonViewOperands(op); clonedViews.append(operands.begin(), operands.end()); - return op.clone(b, loc, clonedViews); + + Operation *clonedOp = op.clone(b, loc, clonedViews); + // When the producer is an IndexedGenercOp, we have to transform its block + // IV arguments according to the tiling of the consumer, i.e. offset them by + // the values computed in `loopRanges`. + if (auto indexedGenericOp = dyn_cast(clonedOp)) { + auto &block = indexedGenericOp.region().front(); + + OpBuilder::InsertionGuard g(b); + b.setInsertionPointToStart(&block); + for (unsigned i = 0, e = indexedGenericOp.getNumLoops(); i < e; ++i) { + Value oldIndex = block.getArgument(i); + Value newIndex = b.create(indexedGenericOp.getLoc(), oldIndex, + loopRanges[i].offset); + replaceAllUsesExcept( + oldIndex, newIndex, + SmallPtrSet{newIndex.getDefiningOp()}); + } + } + return clonedOp; } struct ViewDimension { @@ -284,10 +304,6 @@ LLVM_DEBUG(dbgs() << "\n***Consider producer:\t" << *dependence.dependentOpView.op << "\n"); auto producer = cast(dependence.dependentOpView.op); - if (isa(dependence.dependentOpView.op)) { - LLVM_DEBUG(dbgs() << "Not fusing indexed_generic producer"); - continue; - } // Check that the dependence is indeed on the input `consumerIdx` view. auto consumedView = dependence.indexingView; diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -1158,17 +1158,6 @@ return tileLoops; } -// Replaces all uses of `orig` with `replacement` except if the user is listed -// in `exceptions`. -static void -replaceAllUsesExcept(Value orig, Value replacement, - const SmallPtrSetImpl &exceptions) { - for (auto &use : llvm::make_early_inc_range(orig.getUses())) { - if (exceptions.count(use.getOwner()) == 0) - use.set(replacement); - } -} - /// Return the new lower bound, upper bound, and step in that order. Insert any /// additional bounds calculations before the given builder and any additional /// conversion back to the original loop induction value inside the given Block. @@ -2382,3 +2371,12 @@ return success(); } + +void mlir::replaceAllUsesExcept( + Value orig, Value replacement, + const SmallPtrSetImpl &exceptions) { + for (auto &use : llvm::make_early_inc_range(orig.getUses())) { + if (exceptions.count(use.getOwner()) == 0) + use.set(replacement); + } +} diff --git a/mlir/test/Dialect/Linalg/fusion.mlir b/mlir/test/Dialect/Linalg/fusion.mlir --- a/mlir/test/Dialect/Linalg/fusion.mlir +++ b/mlir/test/Dialect/Linalg/fusion.mlir @@ -604,111 +604,6 @@ // CHECK: linalg.generic // CHECK: mulf -// ----- - -#map5 = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> -#map6 = affine_map<(d0, d1) -> (d0, d1)> -#id_2d = affine_map<(i, j) -> (i, j)> -#pointwise_2d_trait = { - args_in = 2, - args_out = 1, - indexing_maps = [#id_2d, #id_2d, #id_2d], - iterator_types = ["parallel", "parallel"] -} -func @indexed_generic_test(%A: memref, - %B: memref, - %C: memref, - %D: memref) { - linalg.generic #pointwise_2d_trait %A, %B, %C { - ^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors - %2 = addf %e, %arg5 : f32 - linalg.yield %2 : f32 - }: memref, memref, memref - %c1 = constant 1 : index - %c0 = constant 0 : index - %c25 = constant 25 : index - %c10 = constant 10 : index - %0 = dim %C, 0 : memref - %1 = dim %C, 1 : memref - %2 = dim %D, 0 : memref - %3 = dim %D, 1 : memref - loop.for %arg2 = %c0 to %0 step %c10 { - loop.for %arg3 = %c0 to %1 step %c25 { - %4 = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] : - memref to memref - %5 = std.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] : - memref to memref - linalg.indexed_generic { - indexing_maps = [#map6, #map6], - iterator_types = ["parallel", "parallel"], - args_in = 1, - args_out = 1 - } %4, %5 { - ^bb0(%arg4: index, %arg5: index, %arg6: f32, %arg7: f32): - %6 = addi %arg4, %arg2 : index - %7 = addi %arg5, %arg3 : index - %8 = index_cast %6 : index to i32 - %9 = sitofp %8 : i32 to f32 - %10 = index_cast %7 : index to i32 - %11 = sitofp %10 : i32 to f32 - %12 = addf %9, %11 : f32 - linalg.yield %12 : f32 - }: memref, memref - } - } - return -} -// CHECK-LABEL: func @indexed_generic_test -// CHECK: loop.for -// CHECK: loop.for -// CHECK-NOT: loop.for -// CHECK: linalg.generic -// CHECK: addf -// CHECK: linalg.indexed_generic -// CHECK: index_cast - -// ----- - -// -// We should not be fusing indexed_generic into a generic yet. -// https://bugs.llvm.org/show_bug.cgi?id=44875 -// - -#map0 = affine_map<(d0)[s0,s1] -> (d0 * s1 + s0)> -#pointwise_map = affine_map<(d0) -> (d0)> -#pointwise_1d_trait = { - args_in = 1, - args_out = 1, - indexing_maps = [#pointwise_map, #pointwise_map], - iterator_types = ["parallel"] -} - -func @nofuse_indexed_generic(%A: memref, %B: memref, %C: memref) { - linalg.indexed_generic #pointwise_1d_trait %A, %B { - ^bb0(%i: index, %a: f32, %b: f32): - linalg.yield %a : f32 - }: memref, memref - - %c0 = constant 0 : index - %c1 = constant 1 : index - %c10 = constant 10 : index - %dB = dim %B, 0 : memref - loop.for %i = %c0 to %dB step %c10 { - %subB = subview %B[%i][%c10][%c1] : memref to memref - %subC = subview %C[%i][%c10][%c1] : memref to memref - linalg.generic #pointwise_1d_trait %subB, %subC { - ^bb0(%b: f32, %c: f32): - linalg.yield %b : f32 - }: memref, memref - } - return -} -// CHECK-LABEL: func @nofuse_indexed_generic -// CHECK-NOT: loop.for -// CHECK: linalg.indexed_generic -// CHECK: loop.for -// CHECK-NOT: linalg.indexed_generic -// CHECK: linalg.generic // ----- diff --git a/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Linalg/fusion_indexed_generic.mlir @@ -0,0 +1,186 @@ +// RUN: mlir-opt %s -linalg-fusion -split-input-file | FileCheck %s --dump-input-on-failure + +#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> +#id_2d = affine_map<(d0, d1) -> (d0, d1)> +#pointwise_2d_trait = { + args_in = 2, + args_out = 1, + indexing_maps = [#id_2d, #id_2d, #id_2d], + iterator_types = ["parallel", "parallel"] +} +func @fuse_indexed_generic_consumer(%A: memref, + %B: memref, + %C: memref, + %D: memref) { + linalg.generic #pointwise_2d_trait %A, %B, %C { + ^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors + %2 = addf %e, %arg5 : f32 + linalg.yield %2 : f32 + }: memref, memref, memref + %c1 = constant 1 : index + %c0 = constant 0 : index + %c25 = constant 25 : index + %c10 = constant 10 : index + %0 = dim %C, 0 : memref + %1 = dim %C, 1 : memref + %2 = dim %D, 0 : memref + %3 = dim %D, 1 : memref + loop.for %arg2 = %c0 to %0 step %c10 { + loop.for %arg3 = %c0 to %1 step %c25 { + %4 = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + %5 = std.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + linalg.indexed_generic { + indexing_maps = [#id_2d, #id_2d], + iterator_types = ["parallel", "parallel"], + args_in = 1, + args_out = 1 + } %4, %5 { + ^bb0(%arg4: index, %arg5: index, %arg6: f32, %arg7: f32): + %6 = addi %arg4, %arg2 : index + %7 = addi %arg5, %arg3 : index + %8 = index_cast %6 : index to i32 + %9 = sitofp %8 : i32 to f32 + %10 = index_cast %7 : index to i32 + %11 = sitofp %10 : i32 to f32 + %12 = addf %9, %11 : f32 + linalg.yield %12 : f32 + }: memref, memref + } + } + return +} +// CHECK-LABEL: func @fuse_indexed_generic_consumer +// CHECK: loop.for +// CHECK: loop.for +// CHECK-NOT: loop.for +// CHECK: linalg.generic +// CHECK-NOT: addi +// CHECK: addf +// CHECK: linalg.indexed_generic +// CHECK: index_cast + +// ----- + +#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> +#id_2d = affine_map<(d0, d1) -> (d0, d1)> +#pointwise_2d_trait = { + args_in = 2, + args_out = 1, + indexing_maps = [#id_2d, #id_2d, #id_2d], + iterator_types = ["parallel", "parallel"] +} +func @fuse_indexed_generic_producer(%A: memref, + %B: memref, + %C: memref, + %D: memref) { + %c1 = constant 1 : index + %c0 = constant 0 : index + %c25 = constant 25 : index + %c10 = constant 10 : index + linalg.indexed_generic #pointwise_2d_trait %A, %B, %C { + ^bb0(%i: index, %j: index, %a: f32, %b: f32, %c: f32): // no predecessors + %i_int = index_cast %i: index to i32 + %i_float = sitofp %i_int : i32 to f32 + %ab = addf %a, %b : f32 + %out = addf %ab, %i_float : f32 + linalg.yield %out : f32 + }: memref, memref, memref + %C_X = dim %C, 0 : memref + %C_Y = dim %C, 1 : memref + %D_X = dim %D, 0 : memref + %D_Y = dim %D, 1 : memref + loop.parallel (%arg2, %arg3) = (%c0, %c0) to (%C_X, %C_Y) step (%c10, %c25) { + %C_view = std.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + %D_view = std.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + linalg.generic { + indexing_maps = [#id_2d, #id_2d], + iterator_types = ["parallel", "parallel"], + args_in = 1, + args_out = 1 + } %C_view, %D_view { + ^bb0( %a: f32, %b: f32): + %ab = addf %a, %b : f32 + linalg.yield %ab : f32 + }: memref, memref + } + return +} +// CHECK-LABEL: func @fuse_indexed_generic_producer +// CHECK: loop.parallel ([[I:%.*]], [[J:%.*]]) = +// CHECK-NOT: loop.parallel +// CHECK: linalg.indexed_generic +// CHECK: ^bb0([[i:%.*]]: index, [[j:%.*]]: index +// CHECK: [[i_new:%.*]] = addi [[i]], [[I]] : index +// CHECK: [[j_new:%.*]] = addi [[j]], [[J]] : index +// CHECK: {{.*}} = index_cast [[i_new]] : index to i32 +// CHECK: linalg.generic +// CHECK: addf + +// ----- + +#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> +#id_2d = affine_map<(d0, d1) -> (d0, d1)> +#pointwise_2d_trait = { + args_in = 2, + args_out = 1, + indexing_maps = [#id_2d, #id_2d, #id_2d], + iterator_types = ["parallel", "parallel"] +} +func @fuse_indexed_generic_producer_tile_second_dim_only(%A: memref, + %B: memref, + %C: memref, + %D: memref) { + %c1 = constant 1 : index + %c3 = constant 3 : index + %c0 = constant 0 : index + linalg.indexed_generic #pointwise_2d_trait %A, %B, %C { + ^bb0(%i: index, %j: index, %a: f32, %b: f32, %c: f32): // no predecessors + %j_int = index_cast %j: index to i32 + %j_float = sitofp %j_int : i32 to f32 + %ab = addf %a, %b : f32 + %out = addf %ab, %j_float : f32 + linalg.yield %out : f32 + }: memref, memref, memref + %C_X = dim %C, 0 : memref + %C_Y = dim %C, 1 : memref + %D_X = dim %D, 0 : memref + %D_Y = dim %D, 1 : memref + %3 = linalg.range %c0 : %C_Y : %c3 : !linalg.range + loop.parallel (%j) = (%c0) to (%C_Y) step (%c3) { + %0 = affine.min affine_map<(d0, d1, d2) -> (d0, d1 - d2)>(%c3, %C_Y, %j) + %C_view = subview %C[%c0, %j] [%C_X, %0] [%c1, %c1] : + memref to memref + + %1 = affine.min affine_map<(d0, d1, d2) -> (d0, d1 - d2)>(%c3, %D_Y, %j) + %D_view = subview %D[%c0, %j] [%D_X, %1] [%c1, %c1] : + memref to memref + + linalg.generic { + indexing_maps = [#id_2d, #id_2d], + iterator_types = ["parallel", "parallel"], + args_in = 1, + args_out = 1 + } %C_view, %D_view { + ^bb0( %a: f32, %b: f32): + %ab = addf %a, %b : f32 + linalg.yield %ab : f32 + }: memref, memref + loop.yield + } + return +} +// CHECK-LABEL: func @fuse_indexed_generic_producer_tile_second_dim_only +// CHECK: [[C0:%.*]] = constant 0 : index +// CHECK: loop.parallel ([[J:%.*]]) = +// CHECK-NOT: loop.parallel +// CHECK: linalg.indexed_generic +// CHECK: ^bb0([[i:%.*]]: index, [[j:%.*]]: index +// CHECK: [[i_new:%.*]] = addi [[i]], [[C0]] : index +// CHECK: [[j_new:%.*]] = addi [[j]], [[J]] : index +// CHECK: {{.*}} = index_cast [[j_new]] : index to i32 +// CHECK: linalg.generic +// CHECK: addf