diff --git a/llvm/include/llvm/Transforms/Utils/CodeLayout.h b/llvm/include/llvm/Transforms/Utils/CodeLayout.h new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/Transforms/Utils/CodeLayout.h @@ -0,0 +1,58 @@ +//===- CodeLayout.h - Code layout/placement algorithms ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file +/// Declares methods and data structures for code layout algorithms. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_CODELAYOUT_H +#define LLVM_TRANSFORMS_UTILS_CODELAYOUT_H + +#include "llvm/ADT/DenseMap.h" + +#include + +namespace llvm { + +class MachineBasicBlock; + +/// Find a layout of nodes (basic blocks) of a given CFG optimizing jump +/// locality and thus processor I-cache utilization. This is achieved via +/// increasing the number of fall-through jumps and co-locating frequently +/// executed nodes together. +/// The nodes are assumed to be indexed by integers from [0, |V|) so that the +/// current order is the identity permutation. +/// \p NodeSizes: The sizes of the nodes (in bytes). +/// \p NodeCounts: The execution counts of the nodes in the profile. +/// \p EdgeCounts: The execution counts of every edge (jump) in the profile. The +/// map also defines the edges in CFG and should include 0-count edges. +/// \returns The best block order found. +std::vector applyExtTspLayout( + const std::vector &NodeSizes, + const std::vector &NodeCounts, + const DenseMap, uint64_t> &EdgeCounts); + +/// Estimate the "quality" of a given node order in CFG. The higher the score, +/// the better the order is. The score is designed to reflect the locality of +/// the given order, which is anti-correlated with the number of I-cache misses +/// in a typical execution of the function. +double calcExtTspScore( + const std::vector &Order, const std::vector &NodeSizes, + const std::vector &NodeCounts, + const DenseMap, uint64_t> &EdgeCounts); + +/// Estimate the "quality" of the current node order in CFG. +double calcExtTspScore( + const std::vector &NodeSizes, + const std::vector &NodeCounts, + const DenseMap, uint64_t> &EdgeCounts); + +} // end namespace llvm + +#endif // LLVM_TRANSFORMS_UTILS_CODELAYOUT_H diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -61,6 +61,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Transforms/Utils/CodeLayout.h" #include #include #include @@ -193,6 +194,11 @@ cl::init(2), cl::Hidden); +static cl::opt EnableExtTspBlockPlacement( + "enable-ext-tsp-block-placement", cl::Hidden, cl::init(false), + cl::desc("Enable machine block placement based on the ext-tsp model, " + "optimizing I-cache utilization.")); + namespace llvm { extern cl::opt StaticLikelyProb; extern cl::opt ProfileLikelyProb; @@ -557,6 +563,15 @@ /// but a local analysis would not find them. void precomputeTriangleChains(); + /// Apply a post-processing step optimizing block placement. + void applyExtTsp(); + + /// Modify the existing block placement in the function and adjust all jumps. + void assignBlockOrder(const std::vector &NewOrder); + + /// Create a single CFG chain from the current block order. + void createCFGChainExtTsp(); + public: static char ID; // Pass identification, replacement for typeid @@ -3387,6 +3402,15 @@ } } + // Apply a post-processing optimizing block placement. + if (MF.size() >= 3 && EnableExtTspBlockPlacement) { + // Find a new placement and modify the layout of the blocks in the function. + applyExtTsp(); + + // Re-create CFG chain so that we can optimizeBranches and alignBlocks. + createCFGChainExtTsp(); + } + optimizeBranches(); alignBlocks(); @@ -3413,12 +3437,147 @@ MBFI->view("MBP." + MF.getName(), false); } - // We always return true as we have no way to track whether the final order // differs from the original order. return true; } +void MachineBlockPlacement::applyExtTsp() { + // Prepare data; blocks are indexed by their index in the current ordering. + DenseMap BlockIndex; + BlockIndex.reserve(F->size()); + std::vector CurrentBlockOrder; + CurrentBlockOrder.reserve(F->size()); + size_t NumBlocks = 0; + for (const MachineBasicBlock &MBB : *F) { + BlockIndex[&MBB] = NumBlocks++; + CurrentBlockOrder.push_back(&MBB); + } + + auto BlockSizes = std::vector(F->size()); + auto BlockCounts = std::vector(F->size()); + DenseMap, uint64_t> JumpCounts; + for (MachineBasicBlock &MBB : *F) { + // Getting the block frequency. + BlockFrequency BlockFreq = MBFI->getBlockFreq(&MBB); + BlockCounts[BlockIndex[&MBB]] = BlockFreq.getFrequency(); + // Getting the block size: + // - approximate the size of an instruction by 4 bytes, and + // - ignore debug instructions. + // Note: getting the exact size of each block is target-dependent and can be + // done by extending the interface of MCCodeEmitter. Experimentally we do + // not see a perf improvement with the exact block sizes. + auto NonDbgInsts = + instructionsWithoutDebug(MBB.instr_begin(), MBB.instr_end()); + int NumInsts = std::distance(NonDbgInsts.begin(), NonDbgInsts.end()); + BlockSizes[BlockIndex[&MBB]] = 4 * NumInsts; + // Getting jump frequencies. + for (MachineBasicBlock *Succ : MBB.successors()) { + auto EP = MBPI->getEdgeProbability(&MBB, Succ); + BlockFrequency EdgeFreq = BlockFreq * EP; + auto Edge = std::make_pair(BlockIndex[&MBB], BlockIndex[Succ]); + JumpCounts[Edge] = EdgeFreq.getFrequency(); + } + } + + LLVM_DEBUG(dbgs() << "Applying ext-tsp layout for |V| = " << F->size() + << " with profile = " << F->getFunction().hasProfileData() + << " (" << F->getName().str() << ")" + << "\n"); + LLVM_DEBUG( + dbgs() << format(" original layout score: %0.2f\n", + calcExtTspScore(BlockSizes, BlockCounts, JumpCounts))); + + // Run the layout algorithm. + auto NewOrder = applyExtTspLayout(BlockSizes, BlockCounts, JumpCounts); + std::vector NewBlockOrder; + NewBlockOrder.reserve(F->size()); + for (uint64_t Node : NewOrder) { + NewBlockOrder.push_back(CurrentBlockOrder[Node]); + } + LLVM_DEBUG(dbgs() << format(" optimized layout score: %0.2f\n", + calcExtTspScore(NewOrder, BlockSizes, BlockCounts, + JumpCounts))); + + // Assign new block order. + assignBlockOrder(NewBlockOrder); +} + +void MachineBlockPlacement::assignBlockOrder( + const std::vector &NewBlockOrder) { + assert(F->size() == NewBlockOrder.size() && "Incorrect size of block order"); + F->RenumberBlocks(); + + bool HasChanges = false; + for (size_t I = 0; I < NewBlockOrder.size(); I++) { + if (NewBlockOrder[I] != F->getBlockNumbered(I)) { + HasChanges = true; + break; + } + } + // Stop early if the new block order is identical to the existing one. + if (!HasChanges) + return; + + SmallVector PrevFallThroughs(F->getNumBlockIDs()); + for (auto &MBB : *F) { + PrevFallThroughs[MBB.getNumber()] = MBB.getFallThrough(); + } + + // Sort basic blocks in the function according to the computed order. + DenseMap NewIndex; + for (const MachineBasicBlock *MBB : NewBlockOrder) { + NewIndex[MBB] = NewIndex.size(); + } + F->sort([&](MachineBasicBlock &L, MachineBasicBlock &R) { + return NewIndex[&L] < NewIndex[&R]; + }); + + // Update basic block branches by inserting explicit fallthrough branches + // when required and re-optimize branches when possible. + const TargetInstrInfo *TII = F->getSubtarget().getInstrInfo(); + SmallVector Cond; + for (auto &MBB : *F) { + MachineFunction::iterator NextMBB = std::next(MBB.getIterator()); + MachineFunction::iterator EndIt = MBB.getParent()->end(); + auto *FTMBB = PrevFallThroughs[MBB.getNumber()]; + // If this block had a fallthrough before we need an explicit unconditional + // branch to that block if the fallthrough block is not adjacent to the + // block in the new order. + if (FTMBB && (NextMBB == EndIt || &*NextMBB != FTMBB)) { + TII->insertUnconditionalBranch(MBB, FTMBB, MBB.findBranchDebugLoc()); + } + + // It might be possible to optimize branches by flipping the condition. + Cond.clear(); + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; + if (TII->analyzeBranch(MBB, TBB, FBB, Cond)) + continue; + MBB.updateTerminator(FTMBB); + } + +#ifndef NDEBUG + // Make sure we correctly constructed all branches. + F->verify(this, "After optimized block reordering"); +#endif +} + +void MachineBlockPlacement::createCFGChainExtTsp() { + BlockToChain.clear(); + ComputedEdges.clear(); + ChainAllocator.DestroyAll(); + + MachineBasicBlock *HeadBB = &F->front(); + BlockChain *FunctionChain = + new (ChainAllocator.Allocate()) BlockChain(BlockToChain, HeadBB); + + for (MachineBasicBlock &MBB : *F) { + if (HeadBB == &MBB) + continue; // Ignore head of the chain + FunctionChain->merge(&MBB, nullptr); + } +} + namespace { /// A pass to compute block placement statistics. diff --git a/llvm/lib/Transforms/Utils/CMakeLists.txt b/llvm/lib/Transforms/Utils/CMakeLists.txt --- a/llvm/lib/Transforms/Utils/CMakeLists.txt +++ b/llvm/lib/Transforms/Utils/CMakeLists.txt @@ -14,6 +14,7 @@ CloneFunction.cpp CloneModule.cpp CodeExtractor.cpp + CodeLayout.cpp CodeMoverUtils.cpp CtorUtils.cpp Debugify.cpp diff --git a/llvm/lib/Transforms/Utils/CodeLayout.cpp b/llvm/lib/Transforms/Utils/CodeLayout.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Transforms/Utils/CodeLayout.cpp @@ -0,0 +1,942 @@ +//===- CodeLayout.cpp - Implementation of code layout algorithms ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// ExtTSP - layout of basic blocks with i-cache optimization. +// +// The algorithm tries to find a layout of nodes (basic blocks) of a given CFG +// optimizing jump locality and thus processor I-cache utilization. This is +// achieved via increasing the number of fall-through jumps and co-locating +// frequently executed nodes together. The name follows the underlying +// optimization problem, Extended-TSP, which is a generalization of classical +// (maximum) Traveling Salesmen Problem. +// +// The algorithm is a greedy heuristic that works with chains (ordered lists) +// of basic blocks. Initially all chains are isolated basic blocks. On every +// iteration, we pick a pair of chains whose merging yields the biggest increase +// in the ExtTSP score, which models how i-cache "friendly" a specific chain is. +// A pair of chains giving the maximum gain is merged into a new chain. The +// procedure stops when there is only one chain left, or when merging does not +// increase ExtTSP. In the latter case, the remaining chains are sorted by +// density in the decreasing order. +// +// An important aspect is the way two chains are merged. Unlike earlier +// algorithms (e.g., based on the approach of Pettis-Hansen), two +// chains, X and Y, are first split into three, X1, X2, and Y. Then we +// consider all possible ways of gluing the three chains (e.g., X1YX2, X1X2Y, +// X2X1Y, X2YX1, YX1X2, YX2X1) and choose the one producing the largest score. +// This improves the quality of the final result (the search space is larger) +// while keeping the implementation sufficiently fast. +// +// Reference: +// * A. Newell and S. Pupyrev, Improved Basic Block Reordering, +// IEEE Transactions on Computers, 2020 +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Utils/CodeLayout.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" + +using namespace llvm; +#define DEBUG_TYPE "code-layout" + +// Algorithm-specific constants. The values are tuned for the best performance +// of large-scale front-end bound binaries. +static cl::opt + ForwardWeight("ext-tsp-forward-weight", cl::Hidden, cl::init(0.1), + cl::desc("The weight of forward jumps for ExtTSP value")); + +static cl::opt + BackwardWeight("ext-tsp-backward-weight", cl::Hidden, cl::init(0.1), + cl::desc("The weight of backward jumps for ExtTSP value")); + +static cl::opt ForwardDistance( + "ext-tsp-forward-distance", cl::Hidden, cl::init(1024), + cl::desc("The maximum distance (in bytes) of a forward jump for ExtTSP")); + +static cl::opt BackwardDistance( + "ext-tsp-backward-distance", cl::Hidden, cl::init(640), + cl::desc("The maximum distance (in bytes) of a backward jump for ExtTSP")); + +// The maximum size of a chain for splitting. Larger values of the threshold +// may yield better quality at the cost of worsen run-time. +static cl::opt ChainSplitThreshold( + "ext-tsp-chain-split-threshold", cl::Hidden, cl::init(128), + cl::desc("The maximum size of a chain to apply splitting")); + +// The option enables splitting (large) chains along in-coming and out-going +// jumps. This typically results in a better quality. +static cl::opt EnableChainSplitAlongJumps( + "ext-tsp-enable-chain-split-along-jumps", cl::Hidden, cl::init(true), + cl::desc("The maximum size of a chain to apply splitting")); + +namespace { + +// Epsilon for comparison of doubles. +constexpr double EPS = 1e-8; + +// Compute the Ext-TSP score for a jump between a given pair of blocks, +// using their sizes, (estimated) addresses and the jump execution count. +double extTSPScore(uint64_t SrcAddr, uint64_t SrcSize, uint64_t DstAddr, + uint64_t Count) { + // Fallthrough + if (SrcAddr + SrcSize == DstAddr) { + // Assume that FallthroughWeight = 1.0 after normalization + return static_cast(Count); + } + // Forward + if (SrcAddr + SrcSize < DstAddr) { + const auto Dist = DstAddr - (SrcAddr + SrcSize); + if (Dist <= ForwardDistance) { + double Prob = 1.0 - static_cast(Dist) / ForwardDistance; + return ForwardWeight * Prob * Count; + } + return 0; + } + // Backward + const auto Dist = SrcAddr + SrcSize - DstAddr; + if (Dist <= BackwardDistance) { + double Prob = 1.0 - static_cast(Dist) / BackwardDistance; + return BackwardWeight * Prob * Count; + } + return 0; +} + +/// A type of merging two chains, X and Y. The former chain is split into +/// X1 and X2 and then concatenated with Y in the order specified by the type. +enum class MergeTypeTy : int { X_Y, X1_Y_X2, Y_X2_X1, X2_X1_Y }; + +/// The gain of merging two chains, that is, the Ext-TSP score of the merge +/// together with the corresponfiding merge 'type' and 'offset'. +class MergeGainTy { +public: + explicit MergeGainTy() {} + explicit MergeGainTy(double Score, size_t MergeOffset, MergeTypeTy MergeType) + : Score(Score), MergeOffset(MergeOffset), MergeType(MergeType) {} + + double score() const { return Score; } + + size_t mergeOffset() const { return MergeOffset; } + + MergeTypeTy mergeType() const { return MergeType; } + + // Returns 'true' iff Other is preferred over this. + bool operator<(const MergeGainTy &Other) const { + return (Other.Score > EPS && Other.Score > Score + EPS); + } + + // Update the current gain if Other is preferred over this. + void updateIfLessThan(const MergeGainTy &Other) { + if (*this < Other) + *this = Other; + } + +private: + double Score{-1.0}; + size_t MergeOffset{0}; + MergeTypeTy MergeType{MergeTypeTy::X_Y}; +}; + +class Block; +class Jump; +class Chain; +class ChainEdge; + +/// A node in the graph, typically corresponding to a basic block in CFG. +class Block { +public: + Block(const Block &) = delete; + Block(Block &&) = default; + Block &operator=(const Block &) = delete; + Block &operator=(Block &&) = default; + + // The original index of the block in CFG. + size_t Index{0}; + // The index of the block in the current chain. + size_t CurIndex{0}; + // Size of the block in the binary. + uint64_t Size{0}; + // Execution count of the block in the profile data. + uint64_t ExecutionCount{0}; + // Current chain of the node. + Chain *CurChain{nullptr}; + // An offset of the block in the current chain. + mutable uint64_t EstimatedAddr{0}; + // Forced successor of the block in CFG. + Block *ForcedSucc{nullptr}; + // Forced predecessor of the block in CFG. + Block *ForcedPred{nullptr}; + // Outgoing jumps from the block. + std::vector OutJumps; + // Incoming jumps to the block. + std::vector InJumps; + +public: + explicit Block(size_t Index, uint64_t Size_, uint64_t EC) + : Index(Index), Size(Size_), ExecutionCount(EC) {} + bool isEntry() const { return Index == 0; } +}; + +/// An arc in the graph, typically corresponding to a jump between two blocks. +class Jump { +public: + Jump(const Jump &) = delete; + Jump(Jump &&) = default; + Jump &operator=(const Jump &) = delete; + Jump &operator=(Jump &&) = default; + + // Source block of the jump. + Block *Source; + // Target block of the jump. + Block *Target; + // Execution count of the arc in the profile data. + uint64_t ExecutionCount{0}; + +public: + explicit Jump(Block *Source, Block *Target, uint64_t ExecutionCount) + : Source(Source), Target(Target), ExecutionCount(ExecutionCount) {} +}; + +/// A chain (ordered sequence) of blocks. +class Chain { +public: + Chain(const Chain &) = delete; + Chain(Chain &&) = default; + Chain &operator=(const Chain &) = delete; + Chain &operator=(Chain &&) = default; + + explicit Chain(uint64_t Id, Block *Block) + : Id(Id), Score(0), Blocks(1, Block) {} + + uint64_t id() const { return Id; } + + bool isEntry() const { return Blocks[0]->Index == 0; } + + double score() const { return Score; } + + void setScore(double NewScore) { Score = NewScore; } + + const std::vector &blocks() const { return Blocks; } + + const std::vector> &edges() const { + return Edges; + } + + ChainEdge *getEdge(Chain *Other) const { + for (auto It : Edges) { + if (It.first == Other) + return It.second; + } + return nullptr; + } + + void removeEdge(Chain *Other) { + auto It = Edges.begin(); + while (It != Edges.end()) { + if (It->first == Other) { + Edges.erase(It); + return; + } + It++; + } + } + + void addEdge(Chain *Other, ChainEdge *Edge) { + Edges.push_back(std::make_pair(Other, Edge)); + } + + void merge(Chain *Other, const std::vector &MergedBlocks) { + Blocks = MergedBlocks; + // Update the block's chains + for (size_t Idx = 0; Idx < Blocks.size(); Idx++) { + Blocks[Idx]->CurChain = this; + Blocks[Idx]->CurIndex = Idx; + } + } + + void mergeEdges(Chain *Other); + + void clear() { + Blocks.clear(); + Blocks.shrink_to_fit(); + Edges.clear(); + Edges.shrink_to_fit(); + } + +private: + // Unique chain identifier. + uint64_t Id; + // Cached ext-tsp score for the chain. + double Score; + // Blocks of the chain. + std::vector Blocks; + // Adjacent chains and corresponding edges (lists of jumps). + std::vector> Edges; +}; + +/// An edge in CFG representing jumps between two chains. +/// When blocks are merged into chains, the edges are combined too so that +/// there is always at most one edge between a pair of chains +class ChainEdge { +public: + ChainEdge(const ChainEdge &) = delete; + ChainEdge(ChainEdge &&) = default; + ChainEdge &operator=(const ChainEdge &) = delete; + ChainEdge &operator=(ChainEdge &&) = default; + + explicit ChainEdge(Jump *Jump) + : SrcChain(Jump->Source->CurChain), DstChain(Jump->Target->CurChain), + Jumps(1, Jump) {} + + const std::vector &jumps() const { return Jumps; } + + void changeEndpoint(Chain *From, Chain *To) { + if (From == SrcChain) + SrcChain = To; + if (From == DstChain) + DstChain = To; + } + + void appendJump(Jump *Jump) { Jumps.push_back(Jump); } + + void moveJumps(ChainEdge *Other) { + Jumps.insert(Jumps.end(), Other->Jumps.begin(), Other->Jumps.end()); + Other->Jumps.clear(); + Other->Jumps.shrink_to_fit(); + } + + bool hasCachedMergeGain(Chain *Src, Chain *Dst) const { + return Src == SrcChain ? CacheValidForward : CacheValidBackward; + } + + MergeGainTy getCachedMergeGain(Chain *Src, Chain *Dst) const { + return Src == SrcChain ? CachedGainForward : CachedGainBackward; + } + + void setCachedMergeGain(Chain *Src, Chain *Dst, MergeGainTy MergeGain) { + if (Src == SrcChain) { + CachedGainForward = MergeGain; + CacheValidForward = true; + } else { + CachedGainBackward = MergeGain; + CacheValidBackward = true; + } + } + + void invalidateCache() { + CacheValidForward = false; + CacheValidBackward = false; + } + +private: + // Source chain. + Chain *SrcChain{nullptr}; + // Destination chain. + Chain *DstChain{nullptr}; + // Original jumps in the binary with correspinding execution counts. + std::vector Jumps; + // Cached ext-tsp value for merging the pair of chains. + // Since the gain of merging (Src, Dst) and (Dst, Src) might be different, + // we store both values here. + MergeGainTy CachedGainForward; + MergeGainTy CachedGainBackward; + // Whether the cached value must be recomputed. + bool CacheValidForward{false}; + bool CacheValidBackward{false}; +}; + +void Chain::mergeEdges(Chain *Other) { + assert(this != Other && "cannot merge a chain with itself"); + + // Update edges adjacent to chain Other + for (auto EdgeIt : Other->Edges) { + const auto DstChain = EdgeIt.first; + const auto DstEdge = EdgeIt.second; + const auto TargetChain = DstChain == Other ? this : DstChain; + auto CurEdge = getEdge(TargetChain); + if (CurEdge == nullptr) { + DstEdge->changeEndpoint(Other, this); + this->addEdge(TargetChain, DstEdge); + if (DstChain != this && DstChain != Other) { + DstChain->addEdge(this, DstEdge); + } + } else { + CurEdge->moveJumps(DstEdge); + } + // Cleanup leftover edge + if (DstChain != Other) { + DstChain->removeEdge(Other); + } + } +} + +using BlockIter = std::vector::const_iterator; + +/// A wrapper around three chains of blocks; it is used to avoid extra +/// instantiation of the vectors. +class MergedChain { +public: + MergedChain(BlockIter Begin1, BlockIter End1, BlockIter Begin2 = BlockIter(), + BlockIter End2 = BlockIter(), BlockIter Begin3 = BlockIter(), + BlockIter End3 = BlockIter()) + : Begin1(Begin1), End1(End1), Begin2(Begin2), End2(End2), Begin3(Begin3), + End3(End3) {} + + template void forEach(const F &Func) const { + for (auto It = Begin1; It != End1; It++) + Func(*It); + for (auto It = Begin2; It != End2; It++) + Func(*It); + for (auto It = Begin3; It != End3; It++) + Func(*It); + } + + std::vector getBlocks() const { + std::vector Result; + Result.reserve(std::distance(Begin1, End1) + std::distance(Begin2, End2) + + std::distance(Begin3, End3)); + Result.insert(Result.end(), Begin1, End1); + Result.insert(Result.end(), Begin2, End2); + Result.insert(Result.end(), Begin3, End3); + return Result; + } + + const Block *getFirstBlock() const { return *Begin1; } + +private: + BlockIter Begin1; + BlockIter End1; + BlockIter Begin2; + BlockIter End2; + BlockIter Begin3; + BlockIter End3; +}; + +/// The implementation of the ExtTSP algorithm. +class ExtTSPImpl { + using EdgeT = std::pair; + using EdgeCountMap = DenseMap; + +public: + ExtTSPImpl(size_t NumNodes, const std::vector &NodeSizes, + const std::vector &NodeCounts, + const EdgeCountMap &EdgeCounts) + : NumNodes(NumNodes) { + initialize(NodeSizes, NodeCounts, EdgeCounts); + } + + /// Run the algorithm and return an optimized ordering of blocks. + void run(std::vector &Result) { + // Pass 1: Merge blocks with their mutually forced successors + mergeForcedPairs(); + + // Pass 2: Merge pairs of chains while improving the ExtTSP objective + mergeChainPairs(); + + // Pass 3: Merge cold blocks to reduce code size + mergeColdChains(); + + // Collect blocks from all chains + concatChains(Result); + } + +private: + /// Initialize the algorithm's data structures. + void initialize(const std::vector &NodeSizes, + const std::vector &NodeCounts, + const EdgeCountMap &EdgeCounts) { + // Initialize blocks + AllBlocks.reserve(NumNodes); + for (uint64_t Node = 0; Node < NumNodes; Node++) { + uint64_t Size = std::max(NodeSizes[Node], 1ULL); + uint64_t ExecutionCount = NodeCounts[Node]; + // The execution count of the entry block is set to at least 1 + if (Node == 0 && ExecutionCount == 0) + ExecutionCount = 1; + AllBlocks.emplace_back(Node, Size, ExecutionCount); + } + + // Initialize jumps between blocks + SuccNodes = std::vector>(NumNodes); + PredNodes = std::vector>(NumNodes); + AllJumps.reserve(EdgeCounts.size()); + for (auto It : EdgeCounts) { + auto Pred = It.first.first; + auto Succ = It.first.second; + // Ignore self-edges + if (Pred == Succ) + continue; + + SuccNodes[Pred].push_back(Succ); + PredNodes[Succ].push_back(Pred); + auto ExecutionCount = It.second; + if (ExecutionCount > 0) { + auto &Block = AllBlocks[Pred]; + auto &SuccBlock = AllBlocks[Succ]; + AllJumps.emplace_back(&Block, &SuccBlock, ExecutionCount); + SuccBlock.InJumps.push_back(&AllJumps.back()); + Block.OutJumps.push_back(&AllJumps.back()); + } + } + + // Initialize chains + AllChains.reserve(NumNodes); + HotChains.reserve(NumNodes); + for (auto &Block : AllBlocks) { + AllChains.emplace_back(Block.Index, &Block); + Block.CurChain = &AllChains.back(); + if (Block.ExecutionCount > 0) { + HotChains.push_back(&AllChains.back()); + } + } + + // Initialize chain edges + AllEdges.reserve(AllJumps.size()); + for (auto &Block : AllBlocks) { + for (auto &Jump : Block.OutJumps) { + const auto SuccBlock = Jump->Target; + auto CurEdge = Block.CurChain->getEdge(SuccBlock->CurChain); + // this edge is already present in the graph + if (CurEdge != nullptr) { + assert(SuccBlock->CurChain->getEdge(Block.CurChain) != nullptr); + CurEdge->appendJump(Jump); + continue; + } + // this is a new edge + AllEdges.emplace_back(Jump); + Block.CurChain->addEdge(SuccBlock->CurChain, &AllEdges.back()); + SuccBlock->CurChain->addEdge(Block.CurChain, &AllEdges.back()); + } + } + } + + /// For a pair of blocks, A and B, block B is the forced successor of A, + /// if (i) all jumps (based on profile) from A goes to B and (ii) all jumps + /// to B are from A. Such blocks should be adjacent in the optimal ordering; + /// the method finds and merges such pairs of blocks. + void mergeForcedPairs() { + // Find fallthroughs based on edge weights + for (auto &Block : AllBlocks) { + if (SuccNodes[Block.Index].size() == 1 && + PredNodes[SuccNodes[Block.Index][0]].size() == 1 && + SuccNodes[Block.Index][0] != 0) { + size_t SuccIndex = SuccNodes[Block.Index][0]; + Block.ForcedSucc = &AllBlocks[SuccIndex]; + AllBlocks[SuccIndex].ForcedPred = &Block; + } + } + + // There might be 'cycles' in the forced dependencies, since profile + // data isn't 100% accurate. Typically this is observed in loops, when the + // loop edges are the hottest successors for the basic blocks of the loop. + // Break the cycles by choosing the block with the smallest index as the + // head. This helps to keep the original order of the loops, which likely + // have already been rotated in the optimized manner. + for (auto &Block : AllBlocks) { + if (Block.ForcedSucc == nullptr || Block.ForcedPred == nullptr) + continue; + + auto SuccBlock = Block.ForcedSucc; + while (SuccBlock != nullptr && SuccBlock != &Block) { + SuccBlock = SuccBlock->ForcedSucc; + } + if (SuccBlock == nullptr) + continue; + // Break the cycle + AllBlocks[Block.ForcedPred->Index].ForcedSucc = nullptr; + Block.ForcedPred = nullptr; + } + + // Merge blocks with their fallthrough successors + for (auto &Block : AllBlocks) { + if (Block.ForcedPred == nullptr && Block.ForcedSucc != nullptr) { + auto CurBlock = &Block; + while (CurBlock->ForcedSucc != nullptr) { + const auto NextBlock = CurBlock->ForcedSucc; + mergeChains(Block.CurChain, NextBlock->CurChain, 0, MergeTypeTy::X_Y); + CurBlock = NextBlock; + } + } + } + } + + /// Merge pairs of chains while improving the ExtTSP objective. + void mergeChainPairs() { + /// Deterministically compare pairs of chains + auto compareChainPairs = [](const Chain *A1, const Chain *B1, + const Chain *A2, const Chain *B2) { + if (A1 != A2) + return A1->id() < A2->id(); + return B1->id() < B2->id(); + }; + + while (HotChains.size() > 1) { + Chain *BestChainPred = nullptr; + Chain *BestChainSucc = nullptr; + auto BestGain = MergeGainTy(); + // Iterate over all pairs of chains + for (auto ChainPred : HotChains) { + // Get candidates for merging with the current chain + for (auto EdgeIter : ChainPred->edges()) { + auto ChainSucc = EdgeIter.first; + auto ChainEdge = EdgeIter.second; + // Ignore loop edges + if (ChainPred == ChainSucc) + continue; + + // Compute the gain of merging the two chains + auto CurGain = getBestMergeGain(ChainPred, ChainSucc, ChainEdge); + if (CurGain.score() <= EPS) + continue; + + if (BestGain < CurGain || + (std::abs(CurGain.score() - BestGain.score()) < EPS && + compareChainPairs(ChainPred, ChainSucc, BestChainPred, + BestChainSucc))) { + BestGain = CurGain; + BestChainPred = ChainPred; + BestChainSucc = ChainSucc; + } + } + } + + // Stop merging when there is no improvement + if (BestGain.score() <= EPS) + break; + + // Merge the best pair of chains + mergeChains(BestChainPred, BestChainSucc, BestGain.mergeOffset(), + BestGain.mergeType()); + } + } + + /// Merge cold blocks to reduce code size. + void mergeColdChains() { + for (size_t SrcBB = 0; SrcBB < NumNodes; SrcBB++) { + // Iterating over neighbors in the reverse order to make sure original + // fallthrough jumps are merged first + size_t NumSuccs = SuccNodes[SrcBB].size(); + for (size_t Idx = 0; Idx < NumSuccs; Idx++) { + auto DstBB = SuccNodes[SrcBB][NumSuccs - Idx - 1]; + auto SrcChain = AllBlocks[SrcBB].CurChain; + auto DstChain = AllBlocks[DstBB].CurChain; + if (SrcChain != DstChain && !DstChain->isEntry() && + SrcChain->blocks().back()->Index == SrcBB && + DstChain->blocks().front()->Index == DstBB) { + mergeChains(SrcChain, DstChain, 0, MergeTypeTy::X_Y); + } + } + } + } + + /// Compute the Ext-TSP score for a given block order and a list of jumps. + double extTSPScore(const MergedChain &MergedBlocks, + const std::vector &Jumps) const { + if (Jumps.empty()) + return 0.0; + uint64_t CurAddr = 0; + MergedBlocks.forEach([&](const Block *BB) { + BB->EstimatedAddr = CurAddr; + CurAddr += BB->Size; + }); + + double Score = 0; + for (auto &Jump : Jumps) { + const auto SrcBlock = Jump->Source; + const auto DstBlock = Jump->Target; + Score += ::extTSPScore(SrcBlock->EstimatedAddr, SrcBlock->Size, + DstBlock->EstimatedAddr, Jump->ExecutionCount); + } + return Score; + } + + /// Compute the gain of merging two chains. + /// + /// The function considers all possible ways of merging two chains and + /// computes the one having the largest increase in ExtTSP objective. The + /// result is a pair with the first element being the gain and the second + /// element being the corresponding merging type. + MergeGainTy getBestMergeGain(Chain *ChainPred, Chain *ChainSucc, + ChainEdge *Edge) const { + if (Edge->hasCachedMergeGain(ChainPred, ChainSucc)) { + return Edge->getCachedMergeGain(ChainPred, ChainSucc); + } + + // Precompute jumps between ChainPred and ChainSucc + auto Jumps = Edge->jumps(); + auto EdgePP = ChainPred->getEdge(ChainPred); + if (EdgePP != nullptr) { + Jumps.insert(Jumps.end(), EdgePP->jumps().begin(), EdgePP->jumps().end()); + } + assert(!Jumps.empty() && "trying to merge chains w/o jumps"); + + // The object holds the best currently chosen gain of merging the two chains + MergeGainTy Gain = MergeGainTy(); + + /// Given a merge offset and a list of merge types, try to merge two chains + /// and update Gain with a better alternative + auto tryChainMerging = [&](size_t Offset, + const std::vector &MergeTypes) { + // Skip merging corresponding to concatenation w/o splitting + if (Offset == 0 || Offset == ChainPred->blocks().size()) + return; + // Skip merging if it breaks Forced successors + auto BB = ChainPred->blocks()[Offset - 1]; + if (BB->ForcedSucc != nullptr) + return; + // Apply the merge, compute the corresponding gain, and update the best + // value, if the merge is beneficial + for (auto &MergeType : MergeTypes) { + Gain.updateIfLessThan( + computeMergeGain(ChainPred, ChainSucc, Jumps, Offset, MergeType)); + } + }; + + // Try to concatenate two chains w/o splitting + Gain.updateIfLessThan( + computeMergeGain(ChainPred, ChainSucc, Jumps, 0, MergeTypeTy::X_Y)); + + if (EnableChainSplitAlongJumps) { + // Attach (a part of) ChainPred before the first block of ChainSucc + for (auto &Jump : ChainSucc->blocks().front()->InJumps) { + const auto SrcBlock = Jump->Source; + if (SrcBlock->CurChain != ChainPred) + continue; + size_t Offset = SrcBlock->CurIndex + 1; + tryChainMerging(Offset, {MergeTypeTy::X1_Y_X2, MergeTypeTy::X2_X1_Y}); + } + + // Attach (a part of) ChainPred after the last block of ChainSucc + for (auto &Jump : ChainSucc->blocks().back()->OutJumps) { + const auto DstBlock = Jump->Source; + if (DstBlock->CurChain != ChainPred) + continue; + size_t Offset = DstBlock->CurIndex; + tryChainMerging(Offset, {MergeTypeTy::X1_Y_X2, MergeTypeTy::Y_X2_X1}); + } + } + + // Try to break ChainPred in various ways and concatenate with ChainSucc + if (ChainPred->blocks().size() <= ChainSplitThreshold) { + for (size_t Offset = 1; Offset < ChainPred->blocks().size(); Offset++) { + // Try to split the chain in different ways. In practice, applying + // X2_Y_X1 merging is almost never provides benefits; thus, we exclude + // it from consideration to reduce the search space + tryChainMerging(Offset, {MergeTypeTy::X1_Y_X2, MergeTypeTy::Y_X2_X1, + MergeTypeTy::X2_X1_Y}); + } + } + Edge->setCachedMergeGain(ChainPred, ChainSucc, Gain); + return Gain; + } + + /// Compute the score gain of merging two chains, respecting a given + /// merge 'type' and 'offset'. + /// + /// The two chains are not modified in the method. + MergeGainTy computeMergeGain(const Chain *ChainPred, const Chain *ChainSucc, + const std::vector &Jumps, + size_t MergeOffset, + MergeTypeTy MergeType) const { + auto MergedBlocks = mergeBlocks(ChainPred->blocks(), ChainSucc->blocks(), + MergeOffset, MergeType); + + // Do not allow a merge that does not preserve the original entry block + if ((ChainPred->isEntry() || ChainSucc->isEntry()) && + !MergedBlocks.getFirstBlock()->isEntry()) + return MergeGainTy(); + + // The gain for the new chain + auto NewGainScore = extTSPScore(MergedBlocks, Jumps) - ChainPred->score(); + return MergeGainTy(NewGainScore, MergeOffset, MergeType); + } + + /// Merge two chains of blocks respecting a given merge 'type' and 'offset'. + /// + /// If MergeType == 0, then the result is a concatentation of two chains. + /// Otherwise, the first chain is cut into two sub-chains at the offset, + /// and merged using all possible ways of concatenating three chains. + MergedChain mergeBlocks(const std::vector &X, + const std::vector &Y, size_t MergeOffset, + MergeTypeTy MergeType) const { + // Split the first chain, X, into X1 and X2 + BlockIter BeginX1 = X.begin(); + BlockIter EndX1 = X.begin() + MergeOffset; + BlockIter BeginX2 = X.begin() + MergeOffset; + BlockIter EndX2 = X.end(); + BlockIter BeginY = Y.begin(); + BlockIter EndY = Y.end(); + + // Construct a new chain from the three existing ones + switch (MergeType) { + case MergeTypeTy::X_Y: + return MergedChain(BeginX1, EndX2, BeginY, EndY); + case MergeTypeTy::X1_Y_X2: + return MergedChain(BeginX1, EndX1, BeginY, EndY, BeginX2, EndX2); + case MergeTypeTy::Y_X2_X1: + return MergedChain(BeginY, EndY, BeginX2, EndX2, BeginX1, EndX1); + case MergeTypeTy::X2_X1_Y: + return MergedChain(BeginX2, EndX2, BeginX1, EndX1, BeginY, EndY); + } + llvm_unreachable("unexpected chain merge type"); + } + + /// Merge chain From into chain Into, update the list of active chains, + /// adjacency information, and the corresponding cached values. + void mergeChains(Chain *Into, Chain *From, size_t MergeOffset, + MergeTypeTy MergeType) { + assert(Into != From && "a chain cannot be merged with itself"); + + // Merge the blocks + auto MergedBlocks = + mergeBlocks(Into->blocks(), From->blocks(), MergeOffset, MergeType); + Into->merge(From, MergedBlocks.getBlocks()); + Into->mergeEdges(From); + From->clear(); + + // Update cached ext-tsp score for the new chain + auto SelfEdge = Into->getEdge(Into); + if (SelfEdge != nullptr) { + MergedBlocks = MergedChain(Into->blocks().begin(), Into->blocks().end()); + Into->setScore(extTSPScore(MergedBlocks, SelfEdge->jumps())); + } + + // Remove chain From from the list of active chains + auto Iter = std::remove(HotChains.begin(), HotChains.end(), From); + HotChains.erase(Iter, HotChains.end()); + + // Invalidate caches + for (auto EdgeIter : Into->edges()) { + EdgeIter.second->invalidateCache(); + } + } + + /// Concatenate all chains into a final order of blocks. + void concatChains(std::vector &Order) { + // Collect chains and calculate some stats for their sorting + std::vector SortedChains; + DenseMap ChainDensity; + for (auto &Chain : AllChains) { + if (!Chain.blocks().empty()) { + SortedChains.push_back(&Chain); + // Using doubles to avoid overflow of ExecutionCount + double Size = 0; + double ExecutionCount = 0; + for (auto Block : Chain.blocks()) { + Size += static_cast(Block->Size); + ExecutionCount += static_cast(Block->ExecutionCount); + } + assert(Size > 0 && "a chain of zero size"); + ChainDensity[&Chain] = ExecutionCount / Size; + } + } + + // Sorting chains by density in the decreasing order + std::stable_sort(SortedChains.begin(), SortedChains.end(), + [&](const Chain *C1, const Chain *C2) { + // Makre sure the original entry block is at the + // beginning of the order + if (C1->isEntry() != C2->isEntry()) { + return C1->isEntry(); + } + + const double D1 = ChainDensity[C1]; + const double D2 = ChainDensity[C2]; + // Compare by density and break ties by chain identifiers + return (D1 != D2) ? (D1 > D2) : (C1->id() < C2->id()); + }); + + // Collect the blocks in the order specified by their chains + Order.reserve(NumNodes); + for (auto Chain : SortedChains) { + for (auto Block : Chain->blocks()) { + Order.push_back(Block->Index); + } + } + } + +private: + /// The number of nodes in the graph. + const size_t NumNodes; + + /// Successors of each node. + std::vector> SuccNodes; + + /// Predecessors of each node. + std::vector> PredNodes; + + /// All basic blocks. + std::vector AllBlocks; + + /// All jumps between blocks. + std::vector AllJumps; + + /// All chains of basic blocks. + std::vector AllChains; + + /// All edges between chains. + std::vector AllEdges; + + /// Active chains. The vector gets updated at runtime when chains are merged. + std::vector HotChains; +}; + +} // end of anonymous namespace + +std::vector llvm::applyExtTspLayout( + const std::vector &NodeSizes, + const std::vector &NodeCounts, + const DenseMap, uint64_t> &EdgeCounts) { + size_t NumNodes = NodeSizes.size(); + + // Verify correctness of the input data. + assert(NodeCounts.size() == NodeSizes.size() && "Incorrect input"); + assert(NumNodes > 2 && "Incorrect input"); + + // Apply the reordering algorithm. + auto Alg = ExtTSPImpl(NumNodes, NodeSizes, NodeCounts, EdgeCounts); + std::vector Result; + Alg.run(Result); + + // Verify correctness of the output. + assert(Result.front() == 0 && "Original entry point is not preserved"); + assert(Result.size() == NumNodes && "Incorrect size of reordered layout"); + return Result; +} + +double llvm::calcExtTspScore( + const std::vector &Order, const std::vector &NodeSizes, + const std::vector &NodeCounts, + const DenseMap, uint64_t> &EdgeCounts) { + // Estimate addresses of the blocks in memory + auto Addr = std::vector(NodeSizes.size(), 0); + for (size_t Idx = 1; Idx < Order.size(); Idx++) { + Addr[Order[Idx]] = Addr[Order[Idx - 1]] + NodeSizes[Order[Idx - 1]]; + } + + // Increase the score for each jump + double Score = 0; + for (auto It : EdgeCounts) { + auto Pred = It.first.first; + auto Succ = It.first.second; + uint64_t Count = It.second; + Score += extTSPScore(Addr[Pred], NodeSizes[Pred], Addr[Succ], Count); + } + return Score; +} + +double llvm::calcExtTspScore( + const std::vector &NodeSizes, + const std::vector &NodeCounts, + const DenseMap, uint64_t> &EdgeCounts) { + auto Order = std::vector(NodeSizes.size()); + for (size_t Idx = 0; Idx < NodeSizes.size(); Idx++) { + Order[Idx] = Idx; + } + return calcExtTspScore(Order, NodeSizes, NodeCounts, EdgeCounts); +} diff --git a/llvm/test/CodeGen/X86/code_placement_ext_tsp.ll b/llvm/test/CodeGen/X86/code_placement_ext_tsp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/code_placement_ext_tsp.ll @@ -0,0 +1,410 @@ +; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux -enable-ext-tsp-block-placement=1 < %s | FileCheck %s +; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux -enable-ext-tsp-block-placement=1 -ext-tsp-chain-split-threshold=0 -ext-tsp-enable-chain-split-along-jumps=0 < %s | FileCheck %s -check-prefix=CHECK2 + +define void @func1a() { +; Test that the algorithm positions the most likely successor first +; +; +-----+ +; | b0 | -+ +; +-----+ | +; | | +; | 40 | +; v | +; +-----+ | +; | b1 | | 100 +; +-----+ | +; | | +; | 40 | +; v | +; +-----+ | +; | b2 | <+ +; +-----+ +; +; CHECK-LABEL: func1a: +; CHECK: b0 +; CHECK: b2 +; CHECK: b1 + +b0: + %call = call zeroext i1 @a() + br i1 %call, label %b1, label %b2, !prof !1 + +b1: + call void @d() + call void @d() + call void @d() + br label %b2 + +b2: + call void @e() + ret void +} + + +define void @func1b() { +; Test that the algorithm prefers many fallthroughs even in the presence of +; a heavy successor +; +; +-----+ +; | b0 | -+ +; +-----+ | +; | | +; | 80 | +; v | +; +-----+ | +; | b1 | | 100 +; +-----+ | +; | | +; | 80 | +; v | +; +-----+ | +; | b2 | <+ +; +-----+ +; +; CHECK-LABEL: func1b: +; CHECK: b0 +; CHECK: b1 +; CHECK: b2 + +b0: + %call = call zeroext i1 @a() + br i1 %call, label %b1, label %b2, !prof !2 + +b1: + call void @d() + call void @d() + call void @d() + br label %b2 + +b2: + call void @e() + ret void +} + + +define void @func2() !prof !3 { +; Test that the algorithm positions the hot chain continuously +; +; +----+ [7] +-------+ +; | b1 | <----- | b0 | +; +----+ +-------+ +; | | +; | | [15] +; | v +; | +-------+ +; | | b3 | +; | +-------+ +; | | +; | | [15] +; | v +; | +-------+ [31] +; | | | -------+ +; | | b4 | | +; | | | <------+ +; | +-------+ +; | | +; | | [15] +; | v +; | [7] +-------+ +; +---------> | b2 | +; +-------+ +; +; CHECK-LABEL: func2: +; CHECK: b0 +; CHECK: b3 +; CHECK: b4 +; CHECK: b2 +; CHECK: b1 + +b0: + call void @d() + call void @d() + call void @d() + %call = call zeroext i1 @a() + br i1 %call, label %b1, label %b3, !prof !4 + +b1: + call void @d() + br label %b2 + +b2: + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + ret void + +b3: + call void @d() + br label %b4 + +b4: + call void @d() + %call2 = call zeroext i1 @a() + br i1 %call2, label %b2, label %b4, !prof !5 +} + + +define void @func3() !prof !6 { +; A larger test where it is beneficial for locality to break the loop +; +; +--------+ +; | b0 | +; +--------+ +; | +; | [177] +; v +; +----+ [177] +---------------------------+ +; | b5 | <------- | b1 | +; +----+ +---------------------------+ +; | ^ ^ +; | [196] | [124] | [70] +; v | | +; +----+ [70] +--------+ | | +; | b4 | <------- | b2 | | | +; +----+ +--------+ | | +; | | | | +; | | [124] | | +; | v | | +; | +--------+ | | +; | | b3 | -+ | +; | +--------+ | +; | | +; +-----------------------------------+ +; +; CHECK-LABEL: func3: +; CHECK: b0 +; CHECK: b1 +; CHECK: b2 +; CHECK: b3 +; CHECK: b5 +; CHECK: b4 + +b0: + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + br label %b1 + +b1: + %call = call zeroext i1 @a() + br i1 %call, label %b5, label %b2, !prof !7 + +b2: + call void @d() + call void @d() + call void @d() + call void @d() + %call2 = call zeroext i1 @a() + br i1 %call2, label %b3, label %b4, !prof !8 + +b3: + call void @d() + call void @f() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + call void @d() + br label %b1 + +b4: + call void @d() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + call void @e() + br label %b1 + +b5: + ret void +} + +define void @func_loop() !prof !9 { +; Test that the algorithm can rotate loops in the presence of profile data. +; +; +--------+ +; | entry | +; +--------+ +; | +; | 1 +; v +; +--------+ 16 +--------+ +; | if.then| <---- | header | <+ +; +--------+ +--------+ | +; | | | +; | | 16 | +; | v | +; | +--------+ | +; | | if.else| | 31 +; | +--------+ | +; | | | +; | | 16 | +; | v | +; | 16 +--------+ | +; +------------> | if.end | -+ +; +--------+ +; | +; | 1 +; v +; +--------+ +; | end | +; +--------+ +; +; CHECK-LABEL: func_loop: +; CHECK: if.else +; CHECK: if.end +; CHECK: header +; CHECK: if.then + +entry: + br label %header + +header: + call void @e() + %call = call zeroext i1 @a() + br i1 %call, label %if.then, label %if.else, !prof !10 + +if.then: + call void @f() + br label %if.end + +if.else: + call void @g() + br label %if.end + +if.end: + call void @h() + %call2 = call zeroext i1 @a() + br i1 %call2, label %header, label %end + +end: + ret void +} + +define void @func4() !prof !11 { +; Test verifying that, if enabled, chains can be split in order to improve the +; objective (by creating more fallthroughs) +; +; +-------+ +; | entry |--------+ +; +-------+ | +; | | +; | 27 | +; v | +; +-------+ | +; | b1 | -+ | +; +-------+ | | +; | | | +; | 10 | | 0 +; v | | +; +-------+ | | +; | b3 | | 17 | +; +-------+ | | +; | | | +; | 10 | | +; v | | +; +-------+ | | +; | b2 | <+ ----+ +; +-------+ +; +; With chain splitting enabled: +; CHECK-LABEL: func4: +; CHECK: entry +; CHECK: b1 +; CHECK: b3 +; CHECK: b2 +; +; With chain splitting disabled: +; CHECK2-LABEL: func4: +; CHECK2: entry +; CHECK2: b1 +; CHECK2: b2 +; CHECK2: b3 + +entry: + call void @b() + %call2 = call zeroext i1 @a() + br i1 %call2, label %b1, label %b2, !prof !12 + +b1: + call void @c() + %call = call zeroext i1 @a() + br i1 %call, label %b2, label %b3, !prof !13 + +b2: + call void @d() + ret void + +b3: + call void @e() + br label %b2 +} + +declare zeroext i1 @a() +declare void @b() +declare void @c() +declare void @d() +declare void @e() +declare void @g() +declare void @f() +declare void @h() + +!1 = !{!"branch_weights", i32 40, i32 100} +!2 = !{!"branch_weights", i32 80, i32 100} +!3 = !{!"function_entry_count", i64 2200} +!4 = !{!"branch_weights", i32 700, i32 1500} +!5 = !{!"branch_weights", i32 1500, i32 3100} +!6 = !{!"function_entry_count", i64 177} +!7 = !{!"branch_weights", i32 177, i32 196} +!8 = !{!"branch_weights", i32 125, i32 70} +!9 = !{!"function_entry_count", i64 1} +!10 = !{!"branch_weights", i32 16, i32 16} +!11 = !{!"function_entry_count", i64 1} +!12 = !{!"branch_weights", i32 27, i32 0} +!13 = !{!"branch_weights", i32 17, i32 10} diff --git a/llvm/test/CodeGen/X86/code_placement_ext_tsp_large.ll b/llvm/test/CodeGen/X86/code_placement_ext_tsp_large.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/code_placement_ext_tsp_large.ll @@ -0,0 +1,296 @@ +; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux -enable-ext-tsp-block-placement=1 -ext-tsp-chain-split-threshold=128 -debug-only=block-placement < %s 2>&1 | FileCheck %s +; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux -enable-ext-tsp-block-placement=1 -ext-tsp-chain-split-threshold=1 -debug-only=block-placement < %s 2>&1 | FileCheck %s -check-prefix=CHECK2 +; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux -enable-ext-tsp-block-placement=0 -debug-only=block-placement < %s 2>&1 | FileCheck %s -check-prefix=CHECK3 + +@yydebug = dso_local global i32 0, align 4 + +define void @func_large() !prof !0 { +; A largee CFG instance where chain splitting helps to +; compute a better basic block ordering. The test verifies that with chain +; splitting, the resulting layout is improved (e.g., the score is increased). +; +; +----------------+ +; | b0 [76 bytes] | -------------------+ +; +----------------+ | +; | | +; | 3,065,981,778 | +; v | +; +----------------+ 766,495,444 +----------------+ | +; | b8 [244 bytes] | <--------------- | b2 [4 bytes] | | +; +----------------+ +----------------+ | +; | ^ | | +; | | | 2,299,486,333 | +; | | 766,495,444 v | +; | | +----------------+ | +; | +----------------------- | b3 [12 bytes] | | +; | +----------------+ | +; | | | +; | | 1,532,990,888 | +; | v | +; | +----------------+ | 574,869,946 +; | +-------------- | b4 [12 bytes] | | +; | | +----------------+ | +; | | | | +; | | | 574,871,583 | +; | | v | +; | | +----------------+ | +; | | | b5 [116 bytes] | -+ | +; | | +----------------+ | | +; | | | | | +; | | | 1,636 | | +; | | v | | +; | | +----------------+ | | +; | | +------ | b6 [32 bytes] | | | +; | | | +----------------+ | | +; | | | | | | +; | | | | 7 | 3,065,981,778 | +; | | | v | | +; | | | +----------------+ | | +; | | | 1,628 | b9 [16 bytes] | | | +; | | | +----------------+ | | +; | | | | | | +; | | | | 7 | | +; | | | v | | +; | | | +----------------+ | | +; | | +-----> | b7 [12 bytes] | | | +; | | +----------------+ | | +; | | | | | +; | | 958,119,305 | 1,636 | | +; | | v v v +; | | +------------------------------------------+ +; | +-------------> | | +; | 1,532,990,889 | b1 [36 bytes] | +; +-------------------------------> | | +; +------------------------------------------+ +; +; An expected output with a large chain-split-threshold -- the layout score is +; increased by ~17% +; +; CHECK-LABEL: Applying ext-tsp layout +; CHECK: original layout score: 9171074274.27 +; CHECK: optimized layout score: 10756755324.57 +; CHECK: b0 +; CHECK: b2 +; CHECK: b3 +; CHECK: b4 +; CHECK: b5 +; CHECK: b8 +; CHECK: b1 +; CHECK: b6 +; CHECK: b7 +; CHECK: b9 +; +; An expected output with chain-split-threshold=1 (disabling splitting) -- the +; increase of the layout score is smaller, ~7%: +; +; CHECK2-LABEL: Applying ext-tsp layout +; CHECK2: original layout score: 9171074274.27 +; CHECK2: optimized layout score: 9810644873.57 +; CHECK2: b0 +; CHECK2: b2 +; CHECK2: b3 +; CHECK2: b4 +; CHECK2: b5 +; CHECK2: b1 +; CHECK2: b8 +; CHECK2: b6 +; CHECK2: b7 +; CHECK2: b9 +; +; An expected output with ext-tsp disabled -- the layout is not modified: +; +; CHECK3-LABEL: func_large: +; CHECK3: b0 +; CHECK3: b1 +; CHECK3: b2 +; CHECK3: b3 +; CHECK3: b4 +; CHECK3: b5 +; CHECK3: b6 +; CHECK3: b7 +; CHECK3: b8 +; CHECK3: b9 + +b0: + %0 = load i32, i32* @yydebug, align 4 + %cmp = icmp ne i32 %0, 0 + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + call void @a() + br i1 %cmp, label %b1, label %b2, !prof !1 +b1: + call void @b() + call void @b() + call void @b() + call void @b() + call void @b() + call void @b() + call void @b() + call void @b() + ret void +b2: + call void @c() + call void @c() + call void @c() + call void @c() + call void @c() + call void @c() + call void @c() + call void @c() + call void @c() + call void @c() + br i1 %cmp, label %b3, label %b8, !prof !2 +b3: + call void @d() + call void @d() + br i1 %cmp, label %b4, label %b8, !prof !3 +b4: + call void @e() + call void @e() + br i1 %cmp, label %b5, label %b1, !prof !4 +b5: + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + call void @f() + br i1 %cmp, label %b1, label %b6, !prof !5 +b6: + call void @g() + call void @g() + call void @g() + call void @g() + call void @g() + call void @g() + call void @g() + br i1 %cmp, label %b7, label %b9, !prof !6 +b7: + call void @h() + call void @h() + br label %b1 +b8: + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + call void @i() + br label %b1 +b9: + call void @j() + call void @j() + call void @j() + br label %b7 +} + + +declare void @a() +declare void @b() +declare void @c() +declare void @d() +declare void @e() +declare void @f() +declare void @g() +declare void @h() +declare void @i() +declare void @j() + +!0 = !{!"function_entry_count", i64 6131963556} +!1 = !{!"branch_weights", i32 3065981778, i32 3065981778} +!2 = !{!"branch_weights", i32 2299486333, i32 766495444} +!3 = !{!"branch_weights", i32 1532990888, i32 766495444} +!4 = !{!"branch_weights", i32 574871583, i32 958119305} +!5 = !{!"branch_weights", i32 574869946, i32 1636} +!6 = !{!"branch_weights", i32 1628, i32 7}