Index: mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h =================================================================== --- mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h +++ mlir/include/mlir/Dialect/MemRef/Transforms/Passes.h @@ -27,7 +27,7 @@ } // namespace vector namespace memref { - +class AllocOp; //===----------------------------------------------------------------------===// // Patterns //===----------------------------------------------------------------------===// @@ -51,6 +51,31 @@ /// terms of shapes of its input operands. void populateResolveShapedTypeResultDimsPatterns(RewritePatternSet &patterns); +/// Transformation to do multi-buffering/array expansion to remove dependencies +/// on the temporary allocation between consecutive loop iterations. +/// This converts: +/// ``` +/// %0 = memref.alloc() : memref<4x128xf32> +/// scf.for %iv = %c1 to %c1024 step %c3 { +/// memref.copy %1, %0 : memref<4x128xf32> to memref<4x128xf32> +/// "some_use"(%0) : (memref<4x128xf32>) -> () +/// } +/// ``` +/// into: +/// ``` +/// %0 = memref.alloc() : memref<5x4x128xf32> +/// scf.for %iv = %c1 to %c1024 step %c3 { +/// %s = arith.subi %iv, %c1 : index +/// %d = arith.divsi %s, %c3 : index +/// %i = arith.remsi %d, %c5 : index +/// %sv = memref.subview %0[%i, 0, 0] [1, 4, 128] [1, 1, 1] : +/// memref<5x4x128xf32> to memref<4x128xf32, #map0> +/// memref.copy %1, %sv : memref<4x128xf32> to memref<4x128xf32, #map0> +/// "some_use"(%sv) : (memref<4x128xf32, $map0>) -> () +/// } +/// ``` +bool multiBuffering(memref::AllocOp allocOp, unsigned multiplier); + //===----------------------------------------------------------------------===// // Passes //===----------------------------------------------------------------------===// Index: mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt =================================================================== --- mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt +++ mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt @@ -2,6 +2,7 @@ ComposeSubView.cpp ExpandOps.cpp FoldSubViewOps.cpp + MultiBuffering.cpp NormalizeMemRefs.cpp ResolveShapedTypeResultDims.cpp Index: mlir/lib/Dialect/MemRef/Transforms/MultiBuffering.cpp =================================================================== --- /dev/null +++ mlir/lib/Dialect/MemRef/Transforms/MultiBuffering.cpp @@ -0,0 +1,122 @@ +//===----------- MultiBuffering.cpp ---------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements multi buffering transformation. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/MemRef/Transforms/Passes.h" +#include "mlir/Dialect/SCF/SCF.h" +#include "mlir/IR/Dominance.h" + +using namespace mlir; + +/// Return true if the op fully overwrite the given `buffer` value. +static bool overrideBuffer(Operation *op, Value buffer) { + return isa(op) && cast(op).target() == buffer; +} + +/// Replace the uses of `oldOp` with the given `val` and for subview uses +/// propagate the type change. Changing the memref type may require propagating +/// it through subview ops so we cannot just do a replaceAllUse but need to +/// propagate the type change. +static void replaceUsesAndPropagateType(Operation *oldOp, Value val, + OpBuilder &builder) { + SmallVector opToDelete; + SmallVector operandsToReplace; + for (OpOperand &use : oldOp->getUses()) { + auto subviewUse = dyn_cast(use.getOwner()); + if (subviewUse) { + builder.setInsertionPoint(subviewUse); + Type newType = memref::SubViewOp::inferRankReducedResultType( + subviewUse.getType().getRank(), val.getType().cast(), + extractFromI64ArrayAttr(subviewUse.static_offsets()), + extractFromI64ArrayAttr(subviewUse.static_sizes()), + extractFromI64ArrayAttr(subviewUse.static_strides())); + Value newSubview = builder.create( + subviewUse->getLoc(), newType.cast(), val, + subviewUse.getMixedOffsets(), subviewUse.getMixedSizes(), + subviewUse.getMixedStrides()); + replaceUsesAndPropagateType(subviewUse, newSubview, builder); + opToDelete.push_back(use.getOwner()); + continue; + } + // Save the operand to and replace outside the loop to not invalidate the + // iterator. + operandsToReplace.push_back(&use); + } + for (OpOperand *operand : operandsToReplace) + operand->set(val); + // Clean up old subview ops. + for (Operation *op : opToDelete) + op->erase(); +} + +/// Transformation to do multi-buffering/array expansion to remove dependencies +/// on the temporary allocation between consecutive loop iterations. +// This is not a pattern as it requires propagating the new memref type to its +// uses and requires updating subview ops. +bool mlir::memref::multiBuffering(memref::AllocOp allocOp, + unsigned multiplier) { + DominanceInfo dom(allocOp->getParentOp()); + scf::ForOp candidateLoop; + for (Operation *user : allocOp->getUsers()) { + auto parentLoop = user->getParentOfType(); + if (!parentLoop) + return false; + /// Make sure there is no loop carried dependency on the allocation. + if (!overrideBuffer(user, allocOp.getResult())) + continue; + // If the user doesn't dominate all the other users keep looking. + if (llvm::any_of(allocOp->getUsers(), [&](Operation *otherUser) { + return otherUser->getNumRegions() != 0 || + !dom.dominates(user, otherUser); + })) + continue; + candidateLoop = parentLoop; + break; + } + if (!candidateLoop) + return false; + SmallVector newShape(1, multiplier); + ArrayRef oldShape = allocOp.getType().getShape(); + newShape.append(oldShape.begin(), oldShape.end()); + auto newMemref = MemRefType::get(newShape, allocOp.getType().getElementType(), + MemRefLayoutAttrInterface(), + allocOp.getType().getMemorySpace()); + OpBuilder builder(allocOp); + Location loc = allocOp->getLoc(); + auto newAlloc = builder.create(loc, newMemref); + builder.setInsertionPoint(candidateLoop.getBody(), + candidateLoop.getBody()->begin()); + // Calculate the iteration index = ((iv - inital_val) / step) % multiplier. + Value bufferIndex = builder.create( + loc, candidateLoop.getInductionVar(), candidateLoop.getLowerBound()); + bufferIndex = + builder.create(loc, bufferIndex, candidateLoop.getStep()); + Value numSlices = builder.create(loc, multiplier); + bufferIndex = builder.create(loc, bufferIndex, numSlices); + + SmallVector offsets, sizes, strides; + offsets.push_back(bufferIndex); + offsets.append(oldShape.size(), builder.getIndexAttr(0)); + strides.append(oldShape.size() + 1, builder.getIndexAttr(1)); + sizes.push_back(builder.getIndexAttr(1)); + for (int64_t size : oldShape) + sizes.push_back(builder.getIndexAttr(size)); + auto dstMemref = + memref::SubViewOp::inferRankReducedResultType( + allocOp.getType().getRank(), newMemref, offsets, sizes, strides) + .cast(); + Value subview = builder.create(loc, dstMemref, newAlloc, + offsets, sizes, strides); + replaceUsesAndPropagateType(allocOp, subview, builder); + allocOp.erase(); + return true; +} Index: mlir/test/Dialect/MemRef/multibuffering.mlir =================================================================== --- /dev/null +++ mlir/test/Dialect/MemRef/multibuffering.mlir @@ -0,0 +1,85 @@ +// RUN: mlir-opt %s -allow-unregistered-dialect -test-multi-buffering=multiplier=5 -split-input-file | FileCheck %s + +// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 128 + s0 + d1)> + +// CHECK-LABEL: func @multi_buffer +func @multi_buffer(%a: memref<1024x1024xf32>) { +// CHECK-DAG: %[[A:.*]] = memref.alloc() : memref<5x4x128xf32> +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index + %0 = memref.alloc() : memref<4x128xf32> + %c1024 = arith.constant 1024 : index + %c1 = arith.constant 1 : index + %c3 = arith.constant 3 : index +// CHECK: scf.for %[[IV:.*]] = %[[C1]] + scf.for %arg2 = %c1 to %c1024 step %c3 { +// CHECK: %[[S:.*]] = arith.subi %[[IV]], %[[C1]] : index +// CHECK: %[[D:.*]] = arith.divsi %[[S]], %[[C3]] : index +// CHECK: %[[C5:.*]] = arith.constant 5 : index +// CHECK: %[[I:.*]] = arith.remsi %[[D]], %[[C5]] : index +// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, #[[$MAP0]]> + %1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] : + memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> +// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, #[[$MAP0]]> + memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32> +// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> () + "some_use"(%0) : (memref<4x128xf32>) -> () +// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> () + "some_use"(%0) : (memref<4x128xf32>) -> () + } + return +} + +// ----- +// CHECK: #[[$MAP0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * 128 + s0 + d1)> + +// CHECK-LABEL: func @multi_buffer_subview_use +func @multi_buffer_subview_use(%a: memref<1024x1024xf32>) { +// CHECK-DAG: %[[A:.*]] = memref.alloc() : memref<5x4x128xf32> +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index + %0 = memref.alloc() : memref<4x128xf32> + %c1024 = arith.constant 1024 : index + %c1 = arith.constant 1 : index + %c3 = arith.constant 3 : index +// CHECK: scf.for %[[IV:.*]] = %[[C1]] + scf.for %arg2 = %c1 to %c1024 step %c3 { +// CHECK: %[[S:.*]] = arith.subi %[[IV]], %[[C1]] : index +// CHECK: %[[D:.*]] = arith.divsi %[[S]], %[[C3]] : index +// CHECK: %[[C5:.*]] = arith.constant 5 : index +// CHECK: %[[I:.*]] = arith.remsi %[[D]], %[[C5]] : index +// CHECK: %[[SV:.*]] = memref.subview %[[A]][%[[I]], 0, 0] [1, 4, 128] [1, 1, 1] : memref<5x4x128xf32> to memref<4x128xf32, #[[$MAP0]]> + %1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] : + memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> +// CHECK: memref.copy %{{.*}}, %[[SV]] : memref<4x128xf32, #{{.*}}> to memref<4x128xf32, #[[$MAP0]]> + memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32> +// CHECK: %[[SV1:.*]] = memref.subview %[[SV]][0, 1] [4, 127] [1, 1] : memref<4x128xf32, #[[$MAP0]]> to memref<4x127xf32, #[[$MAP0]]> + %s = memref.subview %0[0, 1] [4, 127] [1, 1] : + memref<4x128xf32> to memref<4x127xf32, affine_map<(d0, d1) -> (d0 * 128 + d1 + 1)>> +// CHECK: "some_use"(%[[SV1]]) : (memref<4x127xf32, #[[$MAP0]]>) -> () + "some_use"(%s) : (memref<4x127xf32, affine_map<(d0, d1) -> (d0 * 128 + d1 + 1)>>) -> () +// CHECK: "some_use"(%[[SV]]) : (memref<4x128xf32, #[[$MAP0]]>) -> () + "some_use"(%0) : (memref<4x128xf32>) -> () + } + return +} + +// ----- + +// CHECK-LABEL: func @multi_buffer_negative +func @multi_buffer_negative(%a: memref<1024x1024xf32>) { +// CHECK-NOT: %{{.*}} = memref.alloc() : memref<5x4x128xf32> +// CHECK: %{{.*}} = memref.alloc() : memref<4x128xf32> + %0 = memref.alloc() : memref<4x128xf32> + %c1024 = arith.constant 1024 : index + %c0 = arith.constant 0 : index + %c3 = arith.constant 3 : index + scf.for %arg2 = %c0 to %c1024 step %c3 { + "blocking_use"(%0) : (memref<4x128xf32>) -> () + %1 = memref.subview %a[%arg2, 0] [4, 128] [1, 1] : + memref<1024x1024xf32> to memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> + memref.copy %1, %0 : memref<4x128xf32, affine_map<(d0, d1)[s0] -> (d0 * 1024 + s0 + d1)>> to memref<4x128xf32> + "some_use"(%0) : (memref<4x128xf32>) -> () + } + return +} Index: mlir/test/lib/Dialect/MemRef/CMakeLists.txt =================================================================== --- mlir/test/lib/Dialect/MemRef/CMakeLists.txt +++ mlir/test/lib/Dialect/MemRef/CMakeLists.txt @@ -1,11 +1,13 @@ # Exclude tests from libMLIR.so add_mlir_library(MLIRMemRefTestPasses TestComposeSubView.cpp + TestMultiBuffering.cpp EXCLUDE_FROM_LIBMLIR LINK_LIBS PUBLIC MLIRPass + MLIRMemRef MLIRMemRefTransforms MLIRTestDialect ) Index: mlir/test/lib/Dialect/MemRef/TestMultiBuffering.cpp =================================================================== --- /dev/null +++ mlir/test/lib/Dialect/MemRef/TestMultiBuffering.cpp @@ -0,0 +1,49 @@ +//===- TestComposeSubView.cpp - Test composed subviews --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/MemRef/Transforms/Passes.h" +#include "mlir/Pass/Pass.h" + +using namespace mlir; + +namespace { +struct TestMultiBufferingPass + : public PassWrapper> { + TestMultiBufferingPass() = default; + TestMultiBufferingPass(const TestMultiBufferingPass &pass) + : PassWrapper(pass) {} + + StringRef getArgument() const final { return "test-multi-buffering"; } + StringRef getDescription() const final { + return "Test multi buffering transformation"; + } + void runOnOperation() override; + Option multiplier{ + *this, "multiplier", + llvm::cl::desc( + "Decide how many versions of the buffer should be created,"), + llvm::cl::init(2)}; +}; + +void TestMultiBufferingPass::runOnOperation() { + SmallVector allocs; + getOperation().walk( + [&allocs](memref::AllocOp alloc) { allocs.push_back(alloc); }); + for (memref::AllocOp alloc : allocs) + multiBuffering(alloc, multiplier); +} +} // namespace + +namespace mlir { +namespace test { +void registerTestMultiBuffering() { + PassRegistration(); +} +} // namespace test +} // namespace mlir Index: mlir/tools/mlir-opt/mlir-opt.cpp =================================================================== --- mlir/tools/mlir-opt/mlir-opt.cpp +++ mlir/tools/mlir-opt/mlir-opt.cpp @@ -75,6 +75,7 @@ void registerTestDynamicPipelinePass(); void registerTestExpandTanhPass(); void registerTestComposeSubView(); +void registerTestMultiBuffering(); void registerTestGpuParallelLoopMappingPass(); void registerTestIRVisitorsPass(); void registerTestGenericIRVisitorsPass(); @@ -167,6 +168,7 @@ mlir::test::registerTestDynamicPipelinePass(); mlir::test::registerTestExpandTanhPass(); mlir::test::registerTestComposeSubView(); + mlir::test::registerTestMultiBuffering(); mlir::test::registerTestGpuParallelLoopMappingPass(); mlir::test::registerTestIRVisitorsPass(); mlir::test::registerTestGenericIRVisitorsPass(); Index: utils/bazel/llvm-project-overlay/mlir/BUILD.bazel =================================================================== --- utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -7911,6 +7911,7 @@ ":MemRefDialect", ":MemRefPassIncGen", ":Pass", + ":SCFDialect", ":StandardOps", ":Support", ":TensorDialect", Index: utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel =================================================================== --- utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel +++ utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel @@ -438,6 +438,7 @@ deps = [ ":TestDialect", "//mlir:Affine", + "//mlir:MemRefDialect", "//mlir:MemRefTransforms", "//mlir:Pass", "//mlir:Transforms",