diff --git a/mlir/include/mlir/Transforms/TopologicalSortUtils.h b/mlir/include/mlir/Transforms/TopologicalSortUtils.h --- a/mlir/include/mlir/Transforms/TopologicalSortUtils.h +++ b/mlir/include/mlir/Transforms/TopologicalSortUtils.h @@ -95,6 +95,16 @@ Block *block, function_ref isOperandReady = nullptr); +/// Compute a topological ordering of the given ops. All ops must belong to the +/// specified block. +/// +/// Note: Only the specified ops contain incomplete/interrupted SSA use-def +/// chains, the result may not actually be a topological sorting with respect to +/// the entire program. +SmallVector computeTopologicalSorting( + Block *block, ArrayRef ops, + function_ref isOperandReady = nullptr); + } // end namespace mlir #endif // MLIR_TRANSFORMS_TOPOLOGICALSORTUTILS_H diff --git a/mlir/lib/Transforms/Utils/TopologicalSortUtils.cpp b/mlir/lib/Transforms/Utils/TopologicalSortUtils.cpp --- a/mlir/lib/Transforms/Utils/TopologicalSortUtils.cpp +++ b/mlir/lib/Transforms/Utils/TopologicalSortUtils.cpp @@ -8,29 +8,19 @@ #include "mlir/Transforms/TopologicalSortUtils.h" #include "mlir/IR/OpDefinition.h" +#include "llvm/ADT/SetVector.h" using namespace mlir; -bool mlir::sortTopologically( - Block *block, llvm::iterator_range ops, - function_ref isOperandReady) { - if (ops.empty()) - return true; - - // The set of operations that have not yet been scheduled. - DenseSet unscheduledOps; - // Mark all operations as unscheduled. - for (Operation &op : ops) - unscheduledOps.insert(&op); - - Block::iterator nextScheduledOp = ops.begin(); - Block::iterator end = ops.end(); - +/// Return `true` if the given operation is ready to be scheduled. +template +static bool isOpReady(Block *block, Operation *op, OpSetT &unscheduledOps, + function_ref isOperandReady) { // An operation is ready to be scheduled if all its operands are ready. An // operation is ready if: const auto isReady = [&](Value value, Operation *top) { // - the user-provided callback marks it as ready, - if (isOperandReady && isOperandReady(value, top)) + if (isOperandReady && isOperandReady(value, op)) return true; Operation *parent = value.getDefiningOp(); // - it is a block argument, @@ -41,12 +31,83 @@ if (!ancestor) return true; // - it is defined in a nested region, or - if (ancestor == top) + if (ancestor == op) return true; // - its ancestor in the block is scheduled. return !unscheduledOps.contains(ancestor); }; + // An operation is recursively ready to be scheduled of it and its nested + // operations are ready. + WalkResult readyToSchedule = op->walk([&](Operation *nestedOp) { + return llvm::all_of(nestedOp->getOperands(), + [&](Value operand) { return isReady(operand, op); }) + ? WalkResult::advance() + : WalkResult::interrupt(); + }); + return !readyToSchedule.wasInterrupted(); +} + +SmallVector mlir::computeTopologicalSorting( + Block *block, ArrayRef ops, + function_ref isOperandReady) { + if (ops.empty()) + return {}; + + // The set of operations that have not yet been scheduled. + SetVector unscheduledOps; + + // Mark all operations as unscheduled. + for (Operation *op : ops) { + assert(op->getBlock() == block && "op must belong to block"); + unscheduledOps.insert(op); + } + + SmallVector orderedOps; + while (!unscheduledOps.empty()) { + bool scheduledAtLeastOnce = false; + + // Loop over the ops that are not sorted yet, try to find the ones "ready", + // i.e. the ones for which there aren't any operand produced by an op in the + // set. + unsigned i = 0; + while (i < unscheduledOps.size()) { + Operation *op = unscheduledOps[i]; + if (!isOpReady(block, op, unscheduledOps, isOperandReady)) { + ++i; + continue; + } + unscheduledOps.erase(unscheduledOps.begin() + i); + orderedOps.push_back(op); + scheduledAtLeastOnce = true; + } + + // If no operations were scheduled, just schedule the first op and continue. + if (!scheduledAtLeastOnce) { + Operation *op = unscheduledOps.front(); + unscheduledOps.erase(unscheduledOps.begin()); + orderedOps.push_back(op); + } + } + + return orderedOps; +} + +bool mlir::sortTopologically( + Block *block, llvm::iterator_range ops, + function_ref isOperandReady) { + if (ops.empty()) + return true; + + // The set of operations that have not yet been scheduled. + DenseSet unscheduledOps; + // Mark all operations as unscheduled. + for (Operation &op : ops) + unscheduledOps.insert(&op); + + Block::iterator nextScheduledOp = ops.begin(); + Block::iterator end = ops.end(); + bool allOpsScheduled = true; while (!unscheduledOps.empty()) { bool scheduledAtLeastOnce = false; @@ -56,16 +117,7 @@ // set, and "schedule" it (move it before the `nextScheduledOp`). for (Operation &op : llvm::make_early_inc_range(llvm::make_range(nextScheduledOp, end))) { - // An operation is recursively ready to be scheduled of it and its nested - // operations are ready. - WalkResult readyToSchedule = op.walk([&](Operation *nestedOp) { - return llvm::all_of( - nestedOp->getOperands(), - [&](Value operand) { return isReady(operand, &op); }) - ? WalkResult::advance() - : WalkResult::interrupt(); - }); - if (readyToSchedule.wasInterrupted()) + if (!isOpReady(block, &op, unscheduledOps, isOperandReady)) continue; // Schedule the operation by moving it to the start. diff --git a/mlir/test/Transforms/test-toposort.mlir b/mlir/test/Transforms/test-toposort.mlir --- a/mlir/test/Transforms/test-toposort.mlir +++ b/mlir/test/Transforms/test-toposort.mlir @@ -1,14 +1,20 @@ // RUN: mlir-opt -topological-sort %s | FileCheck %s +// RUN: mlir-opt -test-topological-sort-analysis %s | FileCheck %s -check-prefix=CHECK-ANALYSIS // Test producer is after user. // CHECK-LABEL: test.graph_region -test.graph_region { +// CHECK-ANALYSIS-LABEL: test.graph_region +test.graph_region attributes{"__root__"} { // CHECK-NEXT: test.foo // CHECK-NEXT: test.baz // CHECK-NEXT: test.bar - %0 = "test.foo"() : () -> i32 - "test.bar"(%1, %0) : (i32, i32) -> () - %1 = "test.baz"() : () -> i32 + + // CHECK-ANALYSIS-NEXT: test.foo{{.*}} {__pos__ = 0 + // CHECK-ANALYSIS-NEXT: test.bar{{.*}} {__pos__ = 2 + // CHECK-ANALYSIS-NEXT: test.baz{{.*}} {__pos__ = 1 + %0 = "test.foo"() {__selected__} : () -> i32 + "test.bar"(%1, %0) {__selected__} : (i32, i32) -> () + %1 = "test.baz"() {__selected__} : () -> i32 } // Test cycles. diff --git a/mlir/test/lib/Transforms/CMakeLists.txt b/mlir/test/lib/Transforms/CMakeLists.txt --- a/mlir/test/lib/Transforms/CMakeLists.txt +++ b/mlir/test/lib/Transforms/CMakeLists.txt @@ -5,6 +5,7 @@ TestControlFlowSink.cpp TestInlining.cpp TestIntRangeInference.cpp + TestTopologicalSort.cpp EXCLUDE_FROM_LIBMLIR diff --git a/mlir/test/lib/Transforms/TestTopologicalSort.cpp b/mlir/test/lib/Transforms/TestTopologicalSort.cpp new file mode 100644 --- /dev/null +++ b/mlir/test/lib/Transforms/TestTopologicalSort.cpp @@ -0,0 +1,69 @@ +//===- TestInlining.cpp - Pass to inline calls in the test dialect --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// TODO: This pass is only necessary because the main inlining pass +// has not abstracted away the call+callee relationship. When the inlining +// interface has this support, this pass should be removed. +// +//===----------------------------------------------------------------------===// + +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/TopologicalSortUtils.h" + +using namespace mlir; + +namespace { +struct TestTopologicalSortAnalysisPass + : public PassWrapper> { + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestTopologicalSortAnalysisPass) + + StringRef getArgument() const final { + return "test-topological-sort-analysis"; + } + StringRef getDescription() const final { + return "Test topological sorting of ops"; + } + + void runOnOperation() override { + Operation *op = getOperation(); + OpBuilder builder(op->getContext()); + + op->walk([&](Operation *root) { + if (!root->hasAttr("__root__")) + return WalkResult::advance(); + + assert(root->getNumRegions() == 1 && root->getRegion(0).hasOneBlock() && + "expected one block"); + Block *block = &root->getRegion(0).front(); + SmallVector selectedOps; + block->walk([&](Operation *op) { + if (op->hasAttr("__selected__")) + selectedOps.push_back(op); + }); + + SmallVector orderedOps = + computeTopologicalSorting(block, selectedOps); + for (const auto &it : llvm::enumerate(orderedOps)) + it.value()->setAttr("__pos__", builder.getIndexAttr(it.index())); + + return WalkResult::advance(); + }); + } +}; +} // namespace + +namespace mlir { +namespace test { +void registerTestTopologicalSortAnalysisPass() { + PassRegistration(); +} +} // namespace test +} // namespace mlir diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp --- a/mlir/tools/mlir-opt/mlir-opt.cpp +++ b/mlir/tools/mlir-opt/mlir-opt.cpp @@ -111,6 +111,7 @@ void registerTestSliceAnalysisPass(); void registerTestTensorTransforms(); void registerTestTilingInterface(); +void registerTestTopologicalSortAnalysisPass(); void registerTestTransformDialectInterpreterPass(); void registerTestVectorLowerings(); void registerTestNvgpuLowerings(); @@ -207,6 +208,7 @@ mlir::test::registerTestSliceAnalysisPass(); mlir::test::registerTestTensorTransforms(); mlir::test::registerTestTilingInterface(); + mlir::test::registerTestTopologicalSortAnalysisPass(); mlir::test::registerTestTransformDialectInterpreterPass(); mlir::test::registerTestVectorLowerings(); mlir::test::registerTestNvgpuLowerings();