diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -188,6 +188,27 @@ SmallPtrSet{newIndex}); } } + // When the producer has index semantics, we have to transform the indices of + // the producer according to the tiling of the consumer, i.e. offset them by + // the values computed in `loopRanges`. + if (producer.hasIndexSemantics()) { + assert(clonedOp->getNumRegions() == 1 && + clonedOp->getRegion(0).getBlocks().size() == 1 && + "expected producer to have one block."); + // Shift all indices by the tile offset. + Block &block = clonedOp->getRegion(0).front(); + for (IndexOp indexOp : block.getOps()) { + OpBuilder::InsertionGuard g(builder); + builder.setInsertionPointAfter(indexOp); + AffineExpr index, offset; + bindDims(builder.getContext(), index, offset); + AffineApplyOp applyOp = builder.create( + indexOp.getLoc(), index + offset, + ValueRange{indexOp.getResult(), loopRanges[indexOp.dim()].offset}); + indexOp.getResult().replaceAllUsesExcept( + applyOp, SmallPtrSet{applyOp}); + } + } return clonedOp; } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -232,8 +232,7 @@ "expected linalg operation to have one block."); Block &block = op->getRegion(0).front(); - for (IndexOp indexOp : - llvm::make_early_inc_range(block.getOps())) { + for (IndexOp indexOp : block.getOps()) { auto rangeIndex = loopIndexToRangeIndex.find(indexOp.dim()); if (rangeIndex == loopIndexToRangeIndex.end()) continue; diff --git a/mlir/test/Dialect/Linalg/fusion-indexed-generic.mlir b/mlir/test/Dialect/Linalg/fusion-indexed-generic.mlir --- a/mlir/test/Dialect/Linalg/fusion-indexed-generic.mlir +++ b/mlir/test/Dialect/Linalg/fusion-indexed-generic.mlir @@ -62,6 +62,70 @@ // ----- +#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> +#id_2d = affine_map<(d0, d1) -> (d0, d1)> +#pointwise_2d_trait = { + indexing_maps = [#id_2d, #id_2d, #id_2d], + iterator_types = ["parallel", "parallel"] +} +func @fuse_indexed_consumer(%A: memref, + %B: memref, + %C: memref, + %D: memref) { + linalg.generic #pointwise_2d_trait + ins(%A, %B: memref, memref) + outs(%C : memref) { + ^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors + %2 = addf %e, %arg5 : f32 + linalg.yield %2 : f32 + } + %c1 = constant 1 : index + %c0 = constant 0 : index + %c25 = constant 25 : index + %c10 = constant 10 : index + %0 = memref.dim %C, %c0 : memref + %1 = memref.dim %C, %c1 : memref + %2 = memref.dim %D, %c0 : memref + %3 = memref.dim %D, %c1 : memref + scf.for %arg2 = %c0 to %0 step %c10 { + scf.for %arg3 = %c0 to %1 step %c25 { + %4 = memref.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + %5 = memref.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + linalg.generic { + indexing_maps = [#id_2d, #id_2d], + iterator_types = ["parallel", "parallel"]} + ins(%4 : memref) + outs(%5 : memref) { + ^bb0(%arg4: f32, %arg5: f32): + %idx0 = linalg.index 0 : index + %idx1 = linalg.index 1 : index + %6 = addi %idx0, %arg2 : index + %7 = addi %idx1, %arg3 : index + %8 = index_cast %6 : index to i32 + %9 = sitofp %8 : i32 to f32 + %10 = index_cast %7 : index to i32 + %11 = sitofp %10 : i32 to f32 + %12 = addf %9, %11 : f32 + linalg.yield %12 : f32 + } + } + } + return +} +// CHECK-LABEL: func @fuse_indexed_consumer +// CHECK: scf.for +// CHECK: scf.for +// CHECK-NOT: scf.for +// CHECK: linalg.generic +// CHECK-NOT: affine.apply +// CHECK: addf +// CHECK: linalg.generic +// CHECK: index_cast + +// ----- + #map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> #id_2d = affine_map<(d0, d1) -> (d0, d1)> #pointwise_2d_trait = { @@ -124,6 +188,56 @@ // ----- +#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> +func @fuse_indexed_producer(%A: memref, + %B: memref) { + %c1 = constant 1 : index + %c0 = constant 0 : index + %c25 = constant 25 : index + %c10 = constant 10 : index + linalg.generic { + indexing_maps = [affine_map<(i, j) -> (j, i)>], + iterator_types = ["parallel", "parallel"]} + outs(%A : memref) { + ^bb0(%a: index): // no predecessors + %idx0 = linalg.index 0 : index + %idx1 = linalg.index 1 : index + %0 = addi %idx0, %idx1 : index + linalg.yield %0 : index + } + %A_X = memref.dim %A, %c0 : memref + %A_Y = memref.dim %A, %c1 : memref + scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%A_X, %A_Y) step (%c10, %c25) { + %A_view = memref.subview %A[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + %B_view = memref.subview %B[%arg2, %arg3][%c10, %c25][%c1, %c1] : + memref to memref + linalg.generic { + indexing_maps = [affine_map<(i, j) -> (i, j)>, + affine_map<(i, j) -> (i, j)>], + iterator_types = ["parallel", "parallel"]} + ins(%A_view : memref) + outs(%B_view : memref) { + ^bb0(%a: index, %b: index): + linalg.yield %a : index + } + } + return +} +// CHECK: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)> +// CHECK-LABEL: func @fuse_indexed_producer +// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) = +// CHECK: linalg.generic +// CHECK: [[idx0:%.*]] = linalg.index 0 : index +// CHECK: [[i_new:%.*]] = affine.apply [[$MAP]]([[idx0]], [[J]]) +// CHECK: [[idx1:%.*]] = linalg.index 1 : index +// CHECK: [[j_new:%.*]] = affine.apply [[$MAP]]([[idx1]], [[I]]) +// CHECK: [[sum:%.*]] = addi [[i_new]], [[j_new]] : index +// CHECK: linalg.yield [[sum]] : index +// CHECK: linalg.generic + +// ----- + #map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> #id_2d = affine_map<(d0, d1) -> (d0, d1)> #pointwise_2d_trait = { @@ -192,49 +306,48 @@ // ----- #map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)> -#id_2d = affine_map<(d0, d1) -> (d0, d1)> -#pointwise_2d_trait = { - indexing_maps = [#id_2d], - iterator_types = ["parallel", "parallel"] -} -func @index_op(%A: memref, - %B: memref) { - linalg.generic #pointwise_2d_trait - outs(%B : memref) { - ^bb0(%arg6: index): // no predecessors - %2 = constant 0 : index - linalg.yield %2 : index - } +func @fuse_indexed_producer_tiled_second_dim_only(%A: memref, + %B: memref) { %c1 = constant 1 : index %c0 = constant 0 : index %c25 = constant 25 : index - %c10 = constant 10 : index - %0 = memref.dim %A, %c0 : memref - %1 = memref.dim %A, %c1 : memref - %2 = memref.dim %B, %c0 : memref - %3 = memref.dim %B, %c1 : memref - scf.for %arg2 = %c0 to %0 step %c10 { - scf.for %arg3 = %c0 to %1 step %c25 { - %4 = memref.subview %A[%arg2, %arg3][%c10, %c25][%c1, %c1] : - memref to memref - %5 = memref.subview %B[%arg2, %arg3][%c10, %c25][%c1, %c1] : - memref to memref - linalg.generic { - indexing_maps = [#id_2d, #id_2d], - iterator_types = ["parallel", "parallel"]} - ins(%4 : memref) - outs(%5 : memref) { - ^bb0(%arg6: index, %arg7: index): - %6 = linalg.index 0 : index - linalg.yield %6 : index - } + linalg.generic { + indexing_maps = [affine_map<(i, j) -> (i, j)>], + iterator_types = ["parallel", "parallel"]} + outs(%A : memref) { + ^bb0(%a: index): // no predecessors + %idx0 = linalg.index 0 : index + %idx1 = linalg.index 1 : index + %0 = addi %idx0, %idx1 : index + linalg.yield %0 : index + } + %A_X = memref.dim %A, %c0 : memref + %A_Y = memref.dim %A, %c1 : memref + scf.parallel (%arg3) = (%c0) to (%A_Y) step (%c25) { + %A_view = memref.subview %A[%c0, %arg3][%A_X, %c25][%c1, %c1] : + memref to memref + %B_view = memref.subview %B[%c0, %arg3][%A_X, %c25][%c1, %c1] : + memref to memref + linalg.generic { + indexing_maps = [affine_map<(i, j) -> (i, j)>, + affine_map<(i, j) -> (i, j)>], + iterator_types = ["parallel", "parallel"]} + ins(%A_view : memref) + outs(%B_view : memref) { + ^bb0(%a: index, %b: index): + linalg.yield %a : index } } return } -// CHECK-LABEL: func @index_op -// CHECK: linalg.generic -// CHECK: scf.for -// CHECK: scf.for -// CHECK-NOT: scf.for -// CHECK: linalg.generic +// CHECK: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)> +// CHECK-LABEL: func @fuse_indexed_producer_tiled_second_dim_only +// CHECK: scf.parallel ([[J:%.*]]) = +// CHECK: linalg.generic +// CHECK: [[idx0:%.*]] = linalg.index 0 : index +// CHECK: [[idx1:%.*]] = linalg.index 1 : index +// CHECK: [[j_new:%.*]] = affine.apply [[$MAP]]([[idx1]], [[J]]) +// CHECK: [[sum:%.*]] = addi [[idx0]], [[j_new]] : index +// CHECK: linalg.yield [[sum]] : index +// CHECK: linalg.generic + diff --git a/mlir/test/lib/Transforms/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Transforms/TestLinalgFusionTransforms.cpp --- a/mlir/test/lib/Transforms/TestLinalgFusionTransforms.cpp +++ b/mlir/test/lib/Transforms/TestLinalgFusionTransforms.cpp @@ -126,10 +126,6 @@ // Save original Linalg ops, we only want to make a pass over those. SmallVector linalgOps; f.walk([&](LinalgOp op) { - // TODO: remove hasIndexSemantics check once index ops are supported. - if (op.hasIndexSemantics()) - return; - // TODO: support multi-results. if (op->getNumResults() <= 1) linalgOps.push_back(op);