diff --git a/mlir/include/mlir/IR/AffineMap.h b/mlir/include/mlir/IR/AffineMap.h --- a/mlir/include/mlir/IR/AffineMap.h +++ b/mlir/include/mlir/IR/AffineMap.h @@ -327,6 +327,21 @@ /// ``` AffineMap concatAffineMaps(ArrayRef maps); +/// Returns the map that results from projecting out the dimensions specified in +/// `projectedDimensions`. The projected dimensions are set to 0. +/// +/// Example: +/// 1) map : affine_map<(d0, d1, d2) -> (d0, d1)> +/// projected_dimensions : {2} +/// result : affine_map<(d0, d1) -> (d0, d1)> +/// +/// 2) map : affine_map<(d0, d1) -> (d0 + d1)> +/// projected_dimensions : {1} +/// result : affine_map<(d0) -> (d0)> +/// +/// 3) map : affine_map<(d0, d1, d2) -> (d0, d1)> +/// projected_dimensions : {1} +/// result : affine_map<(d0, d1) -> (d0, 0)> AffineMap getProjectedMap(AffineMap map, ArrayRef projectedDimensions); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -221,9 +221,8 @@ static SmallVector makeTiledShapes(OpBuilder &b, Location loc, LinalgOp linalgOp, - ValueRange operands, AffineMap map, ValueRange ivs, + ArrayRef tiledOperands, AffineMap map, ValueRange ivs, ValueRange tileSizes, ValueRange allShapeSizes) { - assert(operands.size() == linalgOp.getShapedOperands().size()); assert(ivs.size() == static_cast(llvm::count_if( llvm::make_range(tileSizes.begin(), tileSizes.end()), [](Value v) { return !isZero(v); })) && @@ -243,11 +242,9 @@ subShapeSizes.push_back(size - std_constant_index(1)); } - auto *op = linalgOp.getOperation(); - SmallVector res; - res.reserve(op->getNumOperands()); - for (auto en : llvm::enumerate(operands)) { + res.reserve(tiledOperands.size()); + for (auto en : llvm::enumerate(tiledOperands)) { Value shapedOp = en.value(); ShapedType shapedType = shapedOp.getType().cast(); unsigned rank = shapedType.getRank(); @@ -342,6 +339,7 @@ LoopIndexToRangeIndexMap loopIndexToRangeIndex; std::tie(loopRanges, loopIndexToRangeIndex) = makeTiledLoopRanges( b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes); + SmallVector iteratorTypes; for (auto attr : enumerate(op.iterator_types().cast().getValue())) { @@ -574,10 +572,10 @@ static void insertTilingPatterns(OwningRewritePatternList &patterns, const LinalgTilingOptions &options, MLIRContext *ctx) { - RewritePatternList< + RewritePatternList::insert(patterns, options, ctx); + >::insert(patterns, options, ctx); } static void applyTilingToLoopPatterns(LinalgTilingLoopType loopType, diff --git a/mlir/test/Dialect/Linalg/tile-tensors.mlir b/mlir/test/Dialect/Linalg/tile-tensors.mlir --- a/mlir/test/Dialect/Linalg/tile-tensors.mlir +++ b/mlir/test/Dialect/Linalg/tile-tensors.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" | FileCheck %s +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -split-input-file | FileCheck %s // CHECK-LABEL: func @matmul_tensors( // CHECK-SAME: %[[TA:[0-9a-z]+]]: tensor @@ -26,3 +26,97 @@ // CHECK: return %[[TD0]] : tensor return %0 : tensor } + +// ----- + +func @generic_op_tensors( + %arg0 : tensor, %arg1 : tensor) -> tensor { + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %0 = dim %arg0, %c0 : tensor + %1 = dim %arg0, %c1 : tensor + %2 = dim %arg0, %c2 : tensor + %3 = linalg.init_tensor [%0, %1, %2] : tensor + %4 = linalg.generic + {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d2, d1)>, + affine_map<(d0, d1, d2) -> (d2, d1, d0)>], + iterator_types = ["parallel", "parallel", "parallel"]} + ins(%arg0, %arg1 : tensor, tensor) + outs(%3 : tensor) { + ^bb0(%arg2 : f32, %arg3: f32, %arg4: f32): + %5 = addf %arg2, %arg3 : f32 + linalg.yield %5 : f32 + } -> tensor + return %4 : tensor +} + +// CHECK-LABEL: func @generic_op_tensors +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor +// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor +// CHECK: %[[INIT:.+]] = linalg.init_tensor +// CHECK: %[[TD0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC0:.+]] = %[[INIT]]) -> (tensor) { +// CHECK: %[[TD1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC1:.+]] = %[[TC0]]) -> (tensor) { +// CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor) { +// CHECK: %[[STARG0:.+]] = subtensor %[[ARG0]][{{.+}}] : tensor to tensor +// CHECK: %[[STARG1:.+]] = subtensor %[[ARG1]][{{.+}}] : tensor to tensor +// CHECK: %[[STARG2:.+]] = subtensor %[[TC2]][{{.+}}] : tensor to tensor +// CHECK: %[[STRETURN:.+]] = linalg.generic +// CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor, tensor) +// CHECK-SAME: outs(%[[STARG2]] : tensor) +// CHECK: %[[TD:.+]] = subtensor_insert %[[STRETURN]] into %[[TC2]] +// CHECK: scf.yield %[[TD]] +// CHECK: } +// CHECK: scf.yield %[[TD2]] +// CHECK: } +// CHECK: scf.yield %[[TD1]] +// CHECK: } +// CHECK: return %[[TD0]] + +// ----- + +func @indexed_generic_op_tensors( + %arg0 : tensor, %arg1 : tensor) -> tensor { + %c0 = constant 0 : index + %c1 = constant 1 : index + %c2 = constant 2 : index + %0 = dim %arg0, %c0 : tensor + %1 = dim %arg0, %c1 : tensor + %2 = dim %arg0, %c2 : tensor + %3 = linalg.init_tensor [%0, %1, %2] : tensor + %4 = linalg.indexed_generic + {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d2, d1)>, + affine_map<(d0, d1, d2) -> (d2, d1, d0)>], + iterator_types = ["parallel", "parallel", "parallel"]} + ins(%arg0, %arg1 : tensor, tensor) + outs(%3 : tensor) { + ^bb0(%arg2 : index, %arg3 : index, %arg4 : index, %arg5 : f32, %arg6: f32, %arg7: f32): + %5 = addf %arg5, %arg6 : f32 + linalg.yield %5 : f32 + } -> tensor + return %4 : tensor +} + +// CHECK-LABEL: func @indexed_generic_op_tensors +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor +// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor +// CHECK: %[[INIT:.+]] = linalg.init_tensor +// CHECK: %[[TD0:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC0:.+]] = %[[INIT]]) -> (tensor) { +// CHECK: %[[TD1:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC1:.+]] = %[[TC0]]) -> (tensor) { +// CHECK: %[[TD2:.+]] = scf.for %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[TC2:.+]] = %[[TC1]]) -> (tensor) { +// CHECK: %[[STARG0:.+]] = subtensor %[[ARG0]][{{.+}}] : tensor to tensor +// CHECK: %[[STARG1:.+]] = subtensor %[[ARG1]][{{.+}}] : tensor to tensor +// CHECK: %[[STARG2:.+]] = subtensor %[[TC2]][{{.+}}] : tensor to tensor +// CHECK: %[[STRETURN:.+]] = linalg.indexed_generic +// CHECK-SAME: ins(%[[STARG0]], %[[STARG1]] : tensor, tensor) +// CHECK-SAME: outs(%[[STARG2]] : tensor) +// CHECK: %[[TD:.+]] = subtensor_insert %[[STRETURN]] into %[[TC2]] +// CHECK: scf.yield %[[TD]] +// CHECK: } +// CHECK: scf.yield %[[TD2]] +// CHECK: } +// CHECK: scf.yield %[[TD1]] +// CHECK: } +// CHECK: return %[[TD0]]