diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -374,6 +374,7 @@ void initializeReachingDefAnalysisPass(PassRegistry&); void initializeReassociateLegacyPassPass(PassRegistry&); void initializeRedundantDbgInstEliminationPass(PassRegistry&); +void initializeRegAllocEvictionAdvisorAnalysisPass(PassRegistry &); void initializeRegAllocFastPass(PassRegistry&); void initializeRegBankSelectPass(PassRegistry&); void initializeRegToMemLegacyPass(PassRegistry&); diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt --- a/llvm/lib/CodeGen/CMakeLists.txt +++ b/llvm/lib/CodeGen/CMakeLists.txt @@ -137,6 +137,7 @@ ReachingDefAnalysis.cpp RegAllocBase.cpp RegAllocBasic.cpp + RegAllocEvictionAdvisor.cpp RegAllocFast.cpp RegAllocGreedy.cpp RegAllocPBQP.cpp diff --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h new file mode 100644 --- /dev/null +++ b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.h @@ -0,0 +1,210 @@ +//===- RegAllocEvictionAdvisor.h - Interference resolution ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H +#define LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H + +#include "AllocationOrder.h" +#include "llvm/ADT/IndexedMap.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/CodeGen/LiveInterval.h" +#include "llvm/CodeGen/LiveIntervals.h" +#include "llvm/CodeGen/LiveRegMatrix.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/Register.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/Pass.h" + +namespace llvm { + +using SmallVirtRegSet = SmallSet; + +// Live ranges pass through a number of stages as we try to allocate them. +// Some of the stages may also create new live ranges: +// +// - Region splitting. +// - Per-block splitting. +// - Local splitting. +// - Spilling. +// +// Ranges produced by one of the stages skip the previous stages when they are +// dequeued. This improves performance because we can skip interference checks +// that are unlikely to give any results. It also guarantees that the live +// range splitting algorithm terminates, something that is otherwise hard to +// ensure. +enum LiveRangeStage { + /// Newly created live range that has never been queued. + RS_New, + + /// Only attempt assignment and eviction. Then requeue as RS_Split. + RS_Assign, + + /// Attempt live range splitting if assignment is impossible. + RS_Split, + + /// Attempt more aggressive live range splitting that is guaranteed to make + /// progress. This is used for split products that may not be making + /// progress. + RS_Split2, + + /// Live range will be spilled. No more splitting will be attempted. + RS_Spill, + + /// Live range is in memory. Because of other evictions, it might get moved + /// in a register in the end. + RS_Memory, + + /// There is nothing more we can do to this live range. Abort compilation + /// if it can't be assigned. + RS_Done +}; + +class RegAllocEvictionAdvisor { +public: + RegAllocEvictionAdvisor(const RegAllocEvictionAdvisor &) = delete; + RegAllocEvictionAdvisor(RegAllocEvictionAdvisor &&) = delete; + virtual ~RegAllocEvictionAdvisor() = default; + + virtual MCRegister + tryFindEvictionCandidate(LiveInterval &, AllocationOrder &, uint8_t, + const SmallVirtRegSet &) const = 0; + virtual bool + shouldEvictInterference(LiveInterval &VirtReg, MCRegister PhysReg, + bool IsHint, + const SmallVirtRegSet &FixedRegisters) const = 0; + + LiveRangeStage getStage(const LiveInterval &VirtReg) const { + return ExtraRegInfo[VirtReg.reg()].Stage; + } + LiveRangeStage getStage(Register Reg) const { + return ExtraRegInfo[Reg].Stage; + } + + void onNewRegCount(size_t Count) { ExtraRegInfo.resize(Count); } + void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { + ExtraRegInfo.resize(MRI->getNumVirtRegs()); + ExtraRegInfo[VirtReg.reg()].Stage = Stage; + } + unsigned getCascade(Register Reg) const { return ExtraRegInfo[Reg].Cascade; } + void setCascade(Register Reg, unsigned Cascade) { + ExtraRegInfo[Reg].Cascade = Cascade; + } + unsigned getOrAssignNewCascade(Register Reg) { + unsigned Cascade = ExtraRegInfo[Reg].Cascade; + if (!Cascade) + Cascade = ExtraRegInfo[Reg].Cascade = NextCascade++; + return Cascade; + } + + unsigned getCascadeOrCurrentNext(Register Reg) const { + unsigned Cascade = getCascade(Reg); + if (!Cascade) + Cascade = NextCascade; + return Cascade; + } + + void addNew(Register Reg) { + ExtraRegInfo.grow(Reg); + if (ExtraRegInfo[Reg].Stage == RS_New) + ExtraRegInfo[Reg].Stage = RS_Assign; + } + template + void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { + ExtraRegInfo.resize(MRI->getNumVirtRegs()); + for (; Begin != End; ++Begin) { + Register Reg = *Begin; + if (ExtraRegInfo[Reg].Stage == RS_New) + ExtraRegInfo[Reg].Stage = NewStage; + } + } + + /// Returns true if the given \p PhysReg is a callee saved register and has + /// not been used for allocation yet. + bool isUnusedCalleeSavedReg(MCRegister PhysReg) const; + + void LRE_DidCloneVirtReg(Register New, Register Old) { + // Cloning a register we haven't even heard about yet? Just ignore it. + if (!ExtraRegInfo.inBounds(Old)) + return; + + // LRE may clone a virtual register because dead code elimination causes it + // to be split into connected components. The new components are much + // smaller than the original, so they should get a new chance at being + // assigned. same stage as the parent. + ExtraRegInfo[Old].Stage = RS_Assign; + ExtraRegInfo.grow(New); + ExtraRegInfo[New] = ExtraRegInfo[Old]; + } + +protected: + RegAllocEvictionAdvisor(const MachineFunction &MF, LiveRegMatrix *Matrix, + LiveIntervals *LIS, VirtRegMap *VRM, + const RegisterClassInfo &RegClassInfo); + Register canReassign(LiveInterval &VirtReg, Register PrevReg) const; + + const MachineFunction &MF; + LiveRegMatrix *const Matrix; + LiveIntervals *const LIS; + VirtRegMap *const VRM; + MachineRegisterInfo *const MRI; + const TargetRegisterInfo *const TRI; + const RegisterClassInfo &RegClassInfo; + const ArrayRef RegCosts; + +private: + struct RegInfo { + LiveRangeStage Stage = RS_New; + + // Cascade - Eviction loop prevention. See canEvictInterference(). + unsigned Cascade = 0; + + RegInfo() = default; + }; + + IndexedMap ExtraRegInfo; + unsigned NextCascade = 1; +}; + +class RegAllocEvictionAdvisorAnalysis : public ImmutablePass { +public: + RegAllocEvictionAdvisorAnalysis(); + static char ID; + std::unique_ptr + getAdvisor(const MachineFunction &MF, LiveRegMatrix *Matrix, + LiveIntervals *LIS, VirtRegMap *VRM, + const RegisterClassInfo &RegClassInfo); + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesAll(); + } +}; + +/// Cost of evicting interference - used by default advisor, and the eviction +/// chain heuristic in RegAllocGreedy. +// FIXME: this can be probably made an implementation detail of the default +// advisor, if the eviction chain logic can be refactored. +struct EvictionCost { + unsigned BrokenHints = 0; ///< Total number of broken hints. + float MaxWeight = 0; ///< Maximum spill weight evicted. + + EvictionCost() = default; + + bool isMax() const { return BrokenHints == ~0u; } + + void setMax() { BrokenHints = ~0u; } + + void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } + + bool operator<(const EvictionCost &O) const { + return std::tie(BrokenHints, MaxWeight) < + std::tie(O.BrokenHints, O.MaxWeight); + } +}; +} // namespace llvm + +#endif // LLVM_CODEGEN_REGALLOCEVICTIONADVISOR_H diff --git a/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/CodeGen/RegAllocEvictionAdvisor.cpp @@ -0,0 +1,327 @@ +//===- RegAllocEvictionAdvisor.cpp - eviction advisor ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Implementation of the default eviction advisor and of the Analysis pass. +// +//===----------------------------------------------------------------------===// + +#include "RegAllocEvictionAdvisor.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/RegisterClassInfo.h" +#include "llvm/CodeGen/VirtRegMap.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; +#define DEBUG_TYPE "regalloc" + +// TODO: this is the same for InlineAdvisor and will likely be the same for +// other places we use learned policies, so we can unify them. +enum class AdvisorMode : int { Default, Release, Development }; + +static cl::opt + Mode("regalloc-enable-advisor", cl::Hidden, cl::init(AdvisorMode::Default), + cl::desc("Enable regalloc advisor mode"), + cl::values(clEnumValN(AdvisorMode::Default, "default", "Default"), + clEnumValN(AdvisorMode::Release, "release", "precompiled"), + clEnumValN(AdvisorMode::Development, "development", + "for training"))); + +static cl::opt EnableLocalReassignment( + "enable-local-reassign", cl::Hidden, + cl::desc("Local reassignment can yield better allocation decisions, but " + "may be compile time intensive"), + cl::init(false)); + +namespace { + +class DefaultEvictionAdvisor : public RegAllocEvictionAdvisor { +public: + DefaultEvictionAdvisor(const MachineFunction &MF, LiveRegMatrix *Matrix, + LiveIntervals *LIS, VirtRegMap *VRM, + const RegisterClassInfo &RegClassInfo) + : RegAllocEvictionAdvisor(MF, Matrix, LIS, VRM, RegClassInfo), + EnableLocalReassign(EnableLocalReassignment || + MF.getSubtarget().enableRALocalReassignment( + MF.getTarget().getOptLevel())) {} + +private: + bool shouldEvictInterference( + LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint, + const SmallVirtRegSet &FixedRegisters) const override { + EvictionCost MaxCost; + MaxCost.setBrokenHints(1); + return mayEvictInterferenceBasedOnCost(VirtReg, PhysReg, true, MaxCost, + FixedRegisters); + } + + bool + mayEvictInterferenceBasedOnCost(LiveInterval &VirtReg, MCRegister PhysReg, + bool IsHint, EvictionCost &MaxCost, + const SmallVirtRegSet &FixedRegisters) const; + MCRegister tryFindEvictionCandidate(LiveInterval &, AllocationOrder &, + uint8_t, + const SmallVirtRegSet &) const override; + bool shouldEvict(LiveInterval &A, bool IsHint, LiveInterval &B, + bool BreaksHint) const; + const bool EnableLocalReassign; +}; + +} // namespace + +char RegAllocEvictionAdvisorAnalysis::ID = 0; + +INITIALIZE_PASS(RegAllocEvictionAdvisorAnalysis, "reg-alloc-eviction-advisor", + "Register Allocation Eviction Advisor", false, true) + +RegAllocEvictionAdvisorAnalysis::RegAllocEvictionAdvisorAnalysis() + : ImmutablePass(ID) { + initializeRegAllocEvictionAdvisorAnalysisPass( + *PassRegistry::getPassRegistry()); +} + +RegAllocEvictionAdvisor::RegAllocEvictionAdvisor( + const MachineFunction &MF, LiveRegMatrix *Matrix, LiveIntervals *LIS, + VirtRegMap *VRM, const RegisterClassInfo &RegClassInfo) + : MF(MF), Matrix(Matrix), LIS(LIS), VRM(VRM), MRI(&VRM->getRegInfo()), + TRI(MF.getSubtarget().getRegisterInfo()), RegClassInfo(RegClassInfo), + RegCosts(TRI->getRegisterCosts(MF)) {} + +bool RegAllocEvictionAdvisor::isUnusedCalleeSavedReg(MCRegister PhysReg) const { + MCRegister CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg); + if (!CSR) + return false; + + return !Matrix->isPhysRegUsed(PhysReg); +} + +Register RegAllocEvictionAdvisor::canReassign(LiveInterval &VirtReg, + Register PrevReg) const { + auto Order = + AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix); + MCRegister PhysReg; + for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) { + if ((*I).id() == PrevReg.id()) + continue; + + MCRegUnitIterator Units(*I, TRI); + for (; Units.isValid(); ++Units) { + // Instantiate a "subquery", not to be confused with the Queries array. + LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]); + if (subQ.checkInterference()) + break; + } + // If no units have interference, break out with the current PhysReg. + if (!Units.isValid()) + PhysReg = *I; + } + if (PhysReg) + LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from " + << printReg(PrevReg, TRI) << " to " + << printReg(PhysReg, TRI) << '\n'); + return PhysReg; +} + +MCRegister DefaultEvictionAdvisor::tryFindEvictionCandidate( + LiveInterval &VirtReg, AllocationOrder &Order, uint8_t CostPerUseLimit, + const SmallVirtRegSet &FixedRegisters) const { + // Keep track of the cheapest interference seen so far. + EvictionCost BestCost; + BestCost.setMax(); + MCRegister BestPhys; + unsigned OrderLimit = Order.getOrder().size(); + + // When we are just looking for a reduced cost per use, don't break any + // hints, and only evict smaller spill weights. + if (CostPerUseLimit < uint8_t(~0u)) { + BestCost.BrokenHints = 0; + BestCost.MaxWeight = VirtReg.weight(); + + // Check of any registers in RC are below CostPerUseLimit. + const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg()); + uint8_t MinCost = RegClassInfo.getMinCost(RC); + if (MinCost >= CostPerUseLimit) { + LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " + << MinCost << ", no cheaper registers to be found.\n"); + return 0; + } + + // It is normal for register classes to have a long tail of registers with + // the same cost. We don't need to look at them if they're too expensive. + if (RegCosts[Order.getOrder().back()] >= CostPerUseLimit) { + OrderLimit = RegClassInfo.getLastCostChange(RC); + LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit + << " regs.\n"); + } + } + + for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E; + ++I) { + MCRegister PhysReg = *I; + assert(PhysReg); + if (RegCosts[PhysReg] >= CostPerUseLimit) + continue; + // The first use of a callee-saved register in a function has cost 1. + // Don't start using a CSR when the CostPerUseLimit is low. + if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) { + LLVM_DEBUG( + dbgs() << printReg(PhysReg, TRI) << " would clobber CSR " + << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI) + << '\n'); + continue; + } + + if (!mayEvictInterferenceBasedOnCost(VirtReg, PhysReg, false, BestCost, + FixedRegisters)) + continue; + + // Best so far. + BestPhys = PhysReg; + + // Stop if the hint can be used. + if (I.isHint()) + break; + } + return BestPhys; +} + +bool DefaultEvictionAdvisor::mayEvictInterferenceBasedOnCost( + LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint, + EvictionCost &MaxCost, const SmallVirtRegSet &FixedRegisters) const { + // It is only possible to evict virtual register interference. + if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) + return false; + + bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); + + // Find VirtReg's cascade number. This will be unassigned if VirtReg was + // never involved in an eviction before. If a cascade number was assigned, + // deny evicting anything with the same or a newer cascade number. This + // prevents infinite eviction loops. + // + // This works out so a register without a cascade number is allowed to evict + // anything, and it can be evicted by anything. + unsigned Cascade = getCascadeOrCurrentNext(VirtReg.reg()); + + EvictionCost Cost; + for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { + LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); + // If there is 10 or more interferences, chances are one is heavier. + const auto &Interferences = Q.interferingVRegs(10); + if (Interferences.size() >= 10) + return false; + + // Check if any interfering live range is heavier than MaxWeight. + for (LiveInterval *Intf : reverse(Q.interferingVRegs())) { + assert(Register::isVirtualRegister(Intf->reg()) && + "Only expecting virtual register interference from query"); + + // Do not allow eviction of a virtual register if we are in the middle + // of last-chance recoloring and this virtual register is one that we + // have scavenged a physical register for. + if (FixedRegisters.count(Intf->reg())) + return false; + + // Never evict spill products. They cannot split or spill. + if (getStage(*Intf) == RS_Done) + return false; + // Once a live range becomes small enough, it is urgent that we find a + // register for it. This is indicated by an infinite spill weight. These + // urgent live ranges get to evict almost anything. + // + // Also allow urgent evictions of unspillable ranges from a strictly + // larger allocation order. + bool Urgent = + !VirtReg.isSpillable() && + (Intf->isSpillable() || + RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) < + RegClassInfo.getNumAllocatableRegs( + MRI->getRegClass(Intf->reg()))); + // Only evict older cascades or live ranges without a cascade. + unsigned IntfCascade = getCascade(Intf->reg()); + if (Cascade <= IntfCascade) { + if (!Urgent) + return false; + // We permit breaking cascades for urgent evictions. It should be the + // last resort, though, so make it really expensive. + Cost.BrokenHints += 10; + } + // Would this break a satisfied hint? + bool BreaksHint = VRM->hasPreferredPhys(Intf->reg()); + // Update eviction cost. + Cost.BrokenHints += BreaksHint; + Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight()); + // Abort if this would be too expensive. + if (!(Cost < MaxCost)) + return false; + if (Urgent) + continue; + // Apply the eviction policy for non-urgent evictions. + if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) + return false; + // If !MaxCost.isMax(), then we're just looking for a cheap register. + // Evicting another local live range in this case could lead to + // suboptimal coloring. + if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && + (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) { + return false; + } + } + } + MaxCost = Cost; + return true; +} + +/// shouldEvict - determine if A should evict the assigned live range B. The +/// eviction policy defined by this function together with the allocation +/// order defined by enqueue() decides which registers ultimately end up being +/// split and spilled. +/// +/// Cascade numbers are used to prevent infinite loops if this function is a +/// cyclic relation. +/// +/// @param A The live range to be assigned. +/// @param IsHint True when A is about to be assigned to its preferred +/// register. +/// @param B The live range to be evicted. +/// @param BreaksHint True when B is already assigned to its preferred +/// register. +bool DefaultEvictionAdvisor::shouldEvict(LiveInterval &A, bool IsHint, + LiveInterval &B, + bool BreaksHint) const { + bool CanSplit = getStage(B) < RS_Spill; + + // Be fairly aggressive about following hints as long as the evictee can be + // split. + if (CanSplit && IsHint && !BreaksHint) + return true; + + if (A.weight() > B.weight()) { + LLVM_DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight() << '\n'); + return true; + } + return false; +} + +std::unique_ptr +RegAllocEvictionAdvisorAnalysis::getAdvisor( + const MachineFunction &MF, LiveRegMatrix *Matrix, LiveIntervals *LIS, + VirtRegMap *VRM, const RegisterClassInfo &RegClassInfo) { + switch (Mode) { + case AdvisorMode::Default: + return std::make_unique(MF, Matrix, LIS, VRM, + RegClassInfo); + case AdvisorMode::Development: + case AdvisorMode::Release: + MF.getFunction().getParent()->getContext().emitError( + "Unsupported reg alloc advisor mode"); + return nullptr; + } +} \ No newline at end of file diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -15,6 +15,7 @@ #include "InterferenceCache.h" #include "LiveDebugVariables.h" #include "RegAllocBase.h" +#include "RegAllocEvictionAdvisor.h" #include "SpillPlacement.h" #include "SplitKit.h" #include "llvm/ADT/ArrayRef.h" @@ -57,6 +58,7 @@ #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/VirtRegMap.h" +#include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/Function.h" #include "llvm/IR/LLVMContext.h" #include "llvm/MC/MCRegisterInfo.h" @@ -69,7 +71,6 @@ #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/IR/DebugInfoMetadata.h" #include #include #include @@ -111,12 +112,6 @@ "and interference cutoffs of last chance recoloring"), cl::Hidden); -static cl::opt EnableLocalReassignment( - "enable-local-reassign", cl::Hidden, - cl::desc("Local reassignment can yield better allocation decisions, but " - "may be compile time intensive"), - cl::init(false)); - static cl::opt EnableDeferredSpilling( "enable-deferred-spilling", cl::Hidden, cl::desc("Instead of spilling a variable right away, defer the actual " @@ -148,7 +143,6 @@ // Convenient shortcuts. using PQueue = std::priority_queue>; using SmallLISet = SmallPtrSet; - using SmallVirtRegSet = SmallSet; // context MachineFunction *MF; @@ -172,49 +166,8 @@ // state std::unique_ptr SpillerInstance; PQueue Queue; - unsigned NextCascade; std::unique_ptr VRAI; - - // Live ranges pass through a number of stages as we try to allocate them. - // Some of the stages may also create new live ranges: - // - // - Region splitting. - // - Per-block splitting. - // - Local splitting. - // - Spilling. - // - // Ranges produced by one of the stages skip the previous stages when they are - // dequeued. This improves performance because we can skip interference checks - // that are unlikely to give any results. It also guarantees that the live - // range splitting algorithm terminates, something that is otherwise hard to - // ensure. - enum LiveRangeStage { - /// Newly created live range that has never been queued. - RS_New, - - /// Only attempt assignment and eviction. Then requeue as RS_Split. - RS_Assign, - - /// Attempt live range splitting if assignment is impossible. - RS_Split, - - /// Attempt more aggressive live range splitting that is guaranteed to make - /// progress. This is used for split products that may not be making - /// progress. - RS_Split2, - - /// Live range will be spilled. No more splitting will be attempted. - RS_Spill, - - - /// Live range is in memory. Because of other evictions, it might get moved - /// in a register in the end. - RS_Memory, - - /// There is nothing more we can do to this live range. Abort compilation - /// if it can't be assigned. - RS_Done - }; + std::unique_ptr EvictAdvisor; // Enum CutOffStage to keep a track whether the register allocation failed // because of the cutoffs encountered in last chance recoloring. @@ -236,56 +189,6 @@ static const char *const StageName[]; #endif - // RegInfo - Keep additional information about each live range. - struct RegInfo { - LiveRangeStage Stage = RS_New; - - // Cascade - Eviction loop prevention. See canEvictInterference(). - unsigned Cascade = 0; - - RegInfo() = default; - }; - - IndexedMap ExtraRegInfo; - - LiveRangeStage getStage(const LiveInterval &VirtReg) const { - return ExtraRegInfo[VirtReg.reg()].Stage; - } - - void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { - ExtraRegInfo.resize(MRI->getNumVirtRegs()); - ExtraRegInfo[VirtReg.reg()].Stage = Stage; - } - - template - void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { - ExtraRegInfo.resize(MRI->getNumVirtRegs()); - for (;Begin != End; ++Begin) { - Register Reg = *Begin; - if (ExtraRegInfo[Reg].Stage == RS_New) - ExtraRegInfo[Reg].Stage = NewStage; - } - } - - /// Cost of evicting interference. - struct EvictionCost { - unsigned BrokenHints = 0; ///< Total number of broken hints. - float MaxWeight = 0; ///< Maximum spill weight evicted. - - EvictionCost() = default; - - bool isMax() const { return BrokenHints == ~0u; } - - void setMax() { BrokenHints = ~0u; } - - void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } - - bool operator<(const EvictionCost &O) const { - return std::tie(BrokenHints, MaxWeight) < - std::tie(O.BrokenHints, O.MaxWeight); - } - }; - /// EvictionTrack - Keeps track of past evictions in order to optimize region /// split decision. class EvictionTrack { @@ -396,10 +299,6 @@ /// Callee-save register cost, calculated once per machine function. BlockFrequency CSRCost; - /// Run or not the local reassignment heuristic. This information is - /// obtained from the TargetSubtargetInfo. - bool EnableLocalReassign; - /// Enable or not the consideration of the cost of local intervals created /// by a split candidate when choosing the best split candidate. bool EnableAdvancedRASplitCost; @@ -468,10 +367,6 @@ bool calcCompactRegion(GlobalSplitCandidate&); void splitAroundRegion(LiveRangeEdit&, ArrayRef); void calcGapWeights(MCRegister, SmallVectorImpl &); - Register canReassign(LiveInterval &VirtReg, Register PrevReg) const; - bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool) const; - bool canEvictInterference(LiveInterval &, MCRegister, bool, EvictionCost &, - const SmallVirtRegSet &) const; bool canEvictInterferenceInRange(const LiveInterval &VirtReg, MCRegister PhysReg, SlotIndex Start, SlotIndex End, EvictionCost &MaxCost) const; @@ -489,8 +384,8 @@ SmallVectorImpl&, const SmallVirtRegSet&); MCRegister tryEvict(LiveInterval &, AllocationOrder &, - SmallVectorImpl &, uint8_t, - const SmallVirtRegSet &); + SmallVectorImpl &, uint8_t, + const SmallVirtRegSet &); MCRegister tryRegionSplit(LiveInterval &, AllocationOrder &, SmallVectorImpl &); /// Calculate cost of region splitting. @@ -545,8 +440,6 @@ BlockFrequency getBrokenHintFreq(const HintsInfo &, MCRegister); void collectHintInfo(Register, HintsInfo &); - bool isUnusedCalleeSavedReg(MCRegister PhysReg) const; - /// Greedy RA statistic to remark. struct RAGreedyStats { unsigned Reloads = 0; @@ -613,6 +506,7 @@ INITIALIZE_PASS_DEPENDENCY(EdgeBundles) INITIALIZE_PASS_DEPENDENCY(SpillPlacement) INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) +INITIALIZE_PASS_DEPENDENCY(RegAllocEvictionAdvisorAnalysis) INITIALIZE_PASS_END(RAGreedy, "greedy", "Greedy Register Allocator", false, false) @@ -679,6 +573,7 @@ AU.addRequired(); AU.addRequired(); AU.addRequired(); + AU.addRequired(); MachineFunctionPass::getAnalysisUsage(AU); } @@ -712,22 +607,11 @@ } void RAGreedy::LRE_DidCloneVirtReg(Register New, Register Old) { - // Cloning a register we haven't even heard about yet? Just ignore it. - if (!ExtraRegInfo.inBounds(Old)) - return; - - // LRE may clone a virtual register because dead code elimination causes it to - // be split into connected components. The new components are much smaller - // than the original, so they should get a new chance at being assigned. - // same stage as the parent. - ExtraRegInfo[Old].Stage = RS_Assign; - ExtraRegInfo.grow(New); - ExtraRegInfo[New] = ExtraRegInfo[Old]; + EvictAdvisor->LRE_DidCloneVirtReg(New, Old); } void RAGreedy::releaseMemory() { SpillerInstance.reset(); - ExtraRegInfo.clear(); GlobalCand.clear(); } @@ -741,15 +625,14 @@ assert(Reg.isVirtual() && "Can only enqueue virtual registers"); unsigned Prio; - ExtraRegInfo.grow(Reg); - if (ExtraRegInfo[Reg].Stage == RS_New) - ExtraRegInfo[Reg].Stage = RS_Assign; + EvictAdvisor->addNew(Reg); - if (ExtraRegInfo[Reg].Stage == RS_Split) { + auto Stage = EvictAdvisor->getStage(Reg); + if (Stage == RS_Split) { // Unsplit ranges that couldn't be allocated immediately are deferred until // everything else has been allocated. Prio = Size; - } else if (ExtraRegInfo[Reg].Stage == RS_Memory) { + } else if (Stage == RS_Memory) { // Memory operand should be considered last. // Change the priority such that Memory operand are assigned in // the reverse order that they came in. @@ -764,7 +647,7 @@ bool ForceGlobal = !ReverseLocal && (Size / SlotIndex::InstrDist) > (2 * RCI.getNumAllocatableRegs(&RC)); - if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() && + if (Stage == RS_Assign && !ForceGlobal && !LI->empty() && LIS->intervalIsInOneMBB(*LI)) { // Allocate original local ranges in linear instruction order. Since they // are singly defined, this produces optimal coloring in the absence of @@ -838,10 +721,8 @@ if (Order.isHint(Hint)) { MCRegister PhysHint = Hint.asMCReg(); LLVM_DEBUG(dbgs() << "missed hint " << printReg(PhysHint, TRI) << '\n'); - EvictionCost MaxCost; - MaxCost.setBrokenHints(1); - if (canEvictInterference(VirtReg, PhysHint, true, MaxCost, - FixedRegisters)) { + if (EvictAdvisor->shouldEvictInterference(VirtReg, PhysHint, true, + FixedRegisters)) { evictInterference(VirtReg, PhysHint, NewVRegs); return PhysHint; } @@ -867,159 +748,6 @@ // Interference eviction //===----------------------------------------------------------------------===// -Register RAGreedy::canReassign(LiveInterval &VirtReg, Register PrevReg) const { - auto Order = - AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix); - MCRegister PhysReg; - for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) { - if ((*I).id() == PrevReg.id()) - continue; - - MCRegUnitIterator Units(*I, TRI); - for (; Units.isValid(); ++Units) { - // Instantiate a "subquery", not to be confused with the Queries array. - LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]); - if (subQ.checkInterference()) - break; - } - // If no units have interference, break out with the current PhysReg. - if (!Units.isValid()) - PhysReg = *I; - } - if (PhysReg) - LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from " - << printReg(PrevReg, TRI) << " to " - << printReg(PhysReg, TRI) << '\n'); - return PhysReg; -} - -/// shouldEvict - determine if A should evict the assigned live range B. The -/// eviction policy defined by this function together with the allocation order -/// defined by enqueue() decides which registers ultimately end up being split -/// and spilled. -/// -/// Cascade numbers are used to prevent infinite loops if this function is a -/// cyclic relation. -/// -/// @param A The live range to be assigned. -/// @param IsHint True when A is about to be assigned to its preferred -/// register. -/// @param B The live range to be evicted. -/// @param BreaksHint True when B is already assigned to its preferred register. -bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, - LiveInterval &B, bool BreaksHint) const { - bool CanSplit = getStage(B) < RS_Spill; - - // Be fairly aggressive about following hints as long as the evictee can be - // split. - if (CanSplit && IsHint && !BreaksHint) - return true; - - if (A.weight() > B.weight()) { - LLVM_DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight() << '\n'); - return true; - } - return false; -} - -/// canEvictInterference - Return true if all interferences between VirtReg and -/// PhysReg can be evicted. -/// -/// @param VirtReg Live range that is about to be assigned. -/// @param PhysReg Desired register for assignment. -/// @param IsHint True when PhysReg is VirtReg's preferred register. -/// @param MaxCost Only look for cheaper candidates and update with new cost -/// when returning true. -/// @returns True when interference can be evicted cheaper than MaxCost. -bool RAGreedy::canEvictInterference( - LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint, - EvictionCost &MaxCost, const SmallVirtRegSet &FixedRegisters) const { - // It is only possible to evict virtual register interference. - if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) - return false; - - bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); - - // Find VirtReg's cascade number. This will be unassigned if VirtReg was never - // involved in an eviction before. If a cascade number was assigned, deny - // evicting anything with the same or a newer cascade number. This prevents - // infinite eviction loops. - // - // This works out so a register without a cascade number is allowed to evict - // anything, and it can be evicted by anything. - unsigned Cascade = ExtraRegInfo[VirtReg.reg()].Cascade; - if (!Cascade) - Cascade = NextCascade; - - EvictionCost Cost; - for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { - LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); - // If there is 10 or more interferences, chances are one is heavier. - const auto &Interferences = Q.interferingVRegs(10); - if (Interferences.size() >= 10) - return false; - - // Check if any interfering live range is heavier than MaxWeight. - for (LiveInterval *Intf : reverse(Interferences)) { - assert(Register::isVirtualRegister(Intf->reg()) && - "Only expecting virtual register interference from query"); - - // Do not allow eviction of a virtual register if we are in the middle - // of last-chance recoloring and this virtual register is one that we - // have scavenged a physical register for. - if (FixedRegisters.count(Intf->reg())) - return false; - - // Never evict spill products. They cannot split or spill. - if (getStage(*Intf) == RS_Done) - return false; - // Once a live range becomes small enough, it is urgent that we find a - // register for it. This is indicated by an infinite spill weight. These - // urgent live ranges get to evict almost anything. - // - // Also allow urgent evictions of unspillable ranges from a strictly - // larger allocation order. - bool Urgent = - !VirtReg.isSpillable() && - (Intf->isSpillable() || - RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) < - RegClassInfo.getNumAllocatableRegs( - MRI->getRegClass(Intf->reg()))); - // Only evict older cascades or live ranges without a cascade. - unsigned IntfCascade = ExtraRegInfo[Intf->reg()].Cascade; - if (Cascade <= IntfCascade) { - if (!Urgent) - return false; - // We permit breaking cascades for urgent evictions. It should be the - // last resort, though, so make it really expensive. - Cost.BrokenHints += 10; - } - // Would this break a satisfied hint? - bool BreaksHint = VRM->hasPreferredPhys(Intf->reg()); - // Update eviction cost. - Cost.BrokenHints += BreaksHint; - Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight()); - // Abort if this would be too expensive. - if (!(Cost < MaxCost)) - return false; - if (Urgent) - continue; - // Apply the eviction policy for non-urgent evictions. - if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) - return false; - // If !MaxCost.isMax(), then we're just looking for a cheap register. - // Evicting another local live range in this case could lead to suboptimal - // coloring. - if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && - (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) { - return false; - } - } - } - MaxCost = Cost; - return true; -} - /// Return true if all interferences between VirtReg and PhysReg between /// Start and End can be evicted. /// @@ -1049,7 +777,7 @@ if (!Register::isVirtualRegister(Intf->reg())) return false; // Never evict spill products. They cannot split or spill. - if (getStage(*Intf) == RS_Done) + if (EvictAdvisor->getStage(*Intf) == RS_Done) return false; // Would this break a satisfied hint? @@ -1112,9 +840,7 @@ // Make sure that VirtReg has a cascade number, and assign that cascade // number to every evicted register. These live ranges than then only be // evicted by a newer cascade, preventing infinite loops. - unsigned Cascade = ExtraRegInfo[VirtReg.reg()].Cascade; - if (!Cascade) - Cascade = ExtraRegInfo[VirtReg.reg()].Cascade = NextCascade++; + unsigned Cascade = EvictAdvisor->getOrAssignNewCascade(VirtReg.reg()); LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI) << " interference: Cascade " << Cascade << '\n'); @@ -1140,25 +866,15 @@ LastEvicted.addEviction(PhysReg, VirtReg.reg(), Intf->reg()); Matrix->unassign(*Intf); - assert((ExtraRegInfo[Intf->reg()].Cascade < Cascade || + assert((EvictAdvisor->getCascade(Intf->reg()) < Cascade || VirtReg.isSpillable() < Intf->isSpillable()) && "Cannot decrease cascade number, illegal eviction"); - ExtraRegInfo[Intf->reg()].Cascade = Cascade; + EvictAdvisor->setCascade(Intf->reg(), Cascade); ++NumEvicted; NewVRegs.push_back(Intf->reg()); } } -/// Returns true if the given \p PhysReg is a callee saved register and has not -/// been used for allocation yet. -bool RAGreedy::isUnusedCalleeSavedReg(MCRegister PhysReg) const { - MCRegister CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg); - if (!CSR) - return false; - - return !Matrix->isPhysRegUsed(PhysReg); -} - /// tryEvict - Try to evict all interferences for a physreg. /// @param VirtReg Currently unassigned virtual register. /// @param Order Physregs to try. @@ -1170,64 +886,8 @@ NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription, TimePassesIsEnabled); - // Keep track of the cheapest interference seen so far. - EvictionCost BestCost; - BestCost.setMax(); - MCRegister BestPhys; - unsigned OrderLimit = Order.getOrder().size(); - - // When we are just looking for a reduced cost per use, don't break any - // hints, and only evict smaller spill weights. - if (CostPerUseLimit < uint8_t(~0u)) { - BestCost.BrokenHints = 0; - BestCost.MaxWeight = VirtReg.weight(); - - // Check of any registers in RC are below CostPerUseLimit. - const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg()); - uint8_t MinCost = RegClassInfo.getMinCost(RC); - if (MinCost >= CostPerUseLimit) { - LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " - << MinCost << ", no cheaper registers to be found.\n"); - return 0; - } - - // It is normal for register classes to have a long tail of registers with - // the same cost. We don't need to look at them if they're too expensive. - if (RegCosts[Order.getOrder().back()] >= CostPerUseLimit) { - OrderLimit = RegClassInfo.getLastCostChange(RC); - LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit - << " regs.\n"); - } - } - - for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E; - ++I) { - MCRegister PhysReg = *I; - assert(PhysReg); - if (RegCosts[PhysReg] >= CostPerUseLimit) - continue; - // The first use of a callee-saved register in a function has cost 1. - // Don't start using a CSR when the CostPerUseLimit is low. - if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) { - LLVM_DEBUG( - dbgs() << printReg(PhysReg, TRI) << " would clobber CSR " - << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI) - << '\n'); - continue; - } - - if (!canEvictInterference(VirtReg, PhysReg, false, BestCost, - FixedRegisters)) - continue; - - // Best so far. - BestPhys = PhysReg; - - // Stop if the hint can be used. - if (I.isHint()) - break; - } - + MCRegister BestPhys = EvictAdvisor->tryFindEvictionCandidate( + VirtReg, Order, CostPerUseLimit, FixedRegisters); if (BestPhys.isValid()) evictInterference(VirtReg, BestPhys, NewVRegs); return BestPhys; @@ -1807,7 +1467,7 @@ SE->finish(&IntvMap); DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); - ExtraRegInfo.resize(MRI->getNumVirtRegs()); + EvictAdvisor->onNewRegCount(MRI->getNumVirtRegs()); unsigned OrigBlocks = SA->getNumLiveBlocks(); // Sort out the new intervals created by splitting. We get four kinds: @@ -1819,13 +1479,13 @@ LiveInterval &Reg = LIS->getInterval(LREdit.get(I)); // Ignore old intervals from DCE. - if (getStage(Reg) != RS_New) + if (EvictAdvisor->getStage(Reg) != RS_New) continue; // Remainder interval. Don't try splitting again, spill if it doesn't // allocate. if (IntvMap[I] == 0) { - setStage(Reg, RS_Spill); + EvictAdvisor->setStage(Reg, RS_Spill); continue; } @@ -1836,7 +1496,7 @@ LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks << " blocks as original.\n"); // Don't allow repeated splitting as a safe guard against looping. - setStage(Reg, RS_Split2); + EvictAdvisor->setStage(Reg, RS_Split2); } continue; } @@ -1901,7 +1561,7 @@ unsigned BestCand = NoCand; for (MCPhysReg PhysReg : Order) { assert(PhysReg); - if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg)) + if (IgnoreCSR && EvictAdvisor->isUnusedCalleeSavedReg(PhysReg)) continue; // Discard bad candidates before we run out of interference cache cursors. @@ -2063,14 +1723,14 @@ // Tell LiveDebugVariables about the new ranges. DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); - ExtraRegInfo.resize(MRI->getNumVirtRegs()); + EvictAdvisor->onNewRegCount(MRI->getNumVirtRegs()); // Sort out the new intervals created by splitting. The remainder interval // goes straight to spilling, the new local ranges get to stay RS_New. for (unsigned I = 0, E = LREdit.size(); I != E; ++I) { LiveInterval &LI = LIS->getInterval(LREdit.get(I)); - if (getStage(LI) == RS_New && IntvMap[I] == 0) - setStage(LI, RS_Spill); + if (EvictAdvisor->getStage(LI) == RS_New && IntvMap[I] == 0) + EvictAdvisor->setStage(LI, RS_Spill); } if (VerifyEnabled) @@ -2155,10 +1815,10 @@ SmallVector IntvMap; SE->finish(&IntvMap); DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS); - ExtraRegInfo.resize(MRI->getNumVirtRegs()); + EvictAdvisor->onNewRegCount(MRI->getNumVirtRegs()); // Assign all new registers to RS_Spill. This was the last chance. - setStage(LREdit.begin(), LREdit.end(), RS_Spill); + EvictAdvisor->setStage(LREdit.begin(), LREdit.end(), RS_Spill); return 0; } @@ -2326,7 +1986,7 @@ // These rules allow a 3 -> 2+3 split once, which we need. They also prevent // excessive splitting and infinite loops. // - bool ProgressRequired = getStage(VirtReg) >= RS_Split2; + bool ProgressRequired = EvictAdvisor->getStage(VirtReg) >= RS_Split2; // Best split candidate. unsigned BestBefore = NumGaps; @@ -2463,7 +2123,7 @@ assert(!ProgressRequired && "Didn't make progress when it was required."); for (unsigned I = 0, E = IntvMap.size(); I != E; ++I) if (IntvMap[I] == 1) { - setStage(LIS->getInterval(LREdit.get(I)), RS_Split2); + EvictAdvisor->setStage(LIS->getInterval(LREdit.get(I)), RS_Split2); LLVM_DEBUG(dbgs() << ' ' << printReg(LREdit.get(I))); } LLVM_DEBUG(dbgs() << '\n'); @@ -2484,7 +2144,7 @@ SmallVectorImpl &NewVRegs, const SmallVirtRegSet &FixedRegisters) { // Ranges must be Split2 or less. - if (getStage(VirtReg) >= RS_Spill) + if (EvictAdvisor->getStage(VirtReg) >= RS_Spill) return 0; // Local intervals are handled separately. @@ -2506,7 +2166,7 @@ // First try to split around a region spanning multiple blocks. RS_Split2 // ranges already made dubious progress with region splitting, so they go // straight to single block splitting. - if (getStage(VirtReg) < RS_Split2) { + if (EvictAdvisor->getStage(VirtReg) < RS_Split2) { MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); if (PhysReg || !NewVRegs.empty()) return PhysReg; @@ -2558,7 +2218,7 @@ // it would not be recolorable as it is in the same state as VirtReg. // However, if VirtReg has tied defs and Intf doesn't, then // there is still a point in examining if it can be recolorable. - if (((getStage(*Intf) == RS_Done && + if (((EvictAdvisor->getStage(*Intf) == RS_Done && MRI->getRegClass(Intf->reg()) == CurRC) && !(hasTiedDef(MRI, VirtReg.reg()) && !hasTiedDef(MRI, Intf->reg()))) || @@ -2622,8 +2282,9 @@ LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n'); // Ranges must be Done. - assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) && - "Last chance recoloring should really be last chance"); + assert( + (EvictAdvisor->getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) && + "Last chance recoloring should really be last chance"); // Set the max depth to LastChanceRecoloringMaxDepth. // We may want to reconsider that if we end up with a too large search space // for target with hundreds of registers. @@ -2813,7 +2474,7 @@ RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order, MCRegister PhysReg, uint8_t &CostPerUseLimit, SmallVectorImpl &NewVRegs) { - if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) { + if (EvictAdvisor->getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) { // We choose spill over using the CSR for the first time if the spill cost // is lower than CSRCost. SA->analyze(&VirtReg); @@ -2825,7 +2486,7 @@ CostPerUseLimit = 1; return 0; } - if (getStage(VirtReg) < RS_Split) { + if (EvictAdvisor->getStage(VirtReg) < RS_Split) { // We choose pre-splitting over using the CSR for the first time if // the cost of splitting is lower than CSRCost. SA->analyze(&VirtReg); @@ -3058,8 +2719,8 @@ // When NewVRegs is not empty, we may have made decisions such as evicting // a virtual register, go with the earlier decisions and use the physical // register. - if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) && - NewVRegs.empty()) { + if (CSRCost.getFrequency() && + EvictAdvisor->isUnusedCalleeSavedReg(PhysReg) && NewVRegs.empty()) { MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg, CostPerUseLimit, NewVRegs); if (CSRReg || !NewVRegs.empty()) @@ -3070,9 +2731,9 @@ return PhysReg; } - LiveRangeStage Stage = getStage(VirtReg); + LiveRangeStage Stage = EvictAdvisor->getStage(VirtReg); LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade " - << ExtraRegInfo[VirtReg.reg()].Cascade << '\n'); + << EvictAdvisor->getCascade(VirtReg.reg()) << '\n'); // Try to evict a less worthy live range, but only for ranges from the primary // queue. The RS_Split ranges already failed to do this, and they should not @@ -3101,7 +2762,7 @@ // Wait until the second time, when all smaller ranges have been allocated. // This gives a better picture of the interference to split around. if (Stage < RS_Split) { - setStage(VirtReg, RS_Split); + EvictAdvisor->setStage(VirtReg, RS_Split); LLVM_DEBUG(dbgs() << "wait for second round\n"); NewVRegs.push_back(VirtReg.reg()); return 0; @@ -3127,12 +2788,12 @@ // Finally spill VirtReg itself. if ((EnableDeferredSpilling || TRI->shouldUseDeferredSpillingForVirtReg(*MF, VirtReg)) && - getStage(VirtReg) < RS_Memory) { + EvictAdvisor->getStage(VirtReg) < RS_Memory) { // TODO: This is experimental and in particular, we do not model // the live range splitting done by spilling correctly. // We would need a deep integration with the spiller to do the // right thing here. Anyway, that is still good for early testing. - setStage(VirtReg, RS_Memory); + EvictAdvisor->setStage(VirtReg, RS_Memory); LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n"); NewVRegs.push_back(VirtReg.reg()); } else { @@ -3140,7 +2801,7 @@ TimerGroupDescription, TimePassesIsEnabled); LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); spiller().spill(LRE); - setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); + EvictAdvisor->setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); // Tell LiveDebugVariables about the new ranges. Ranges not being covered by // the new regs are kept in LDV (still mapping to the old register), until @@ -3323,10 +2984,6 @@ TII = MF->getSubtarget().getInstrInfo(); RCI.runOnMachineFunction(mf); - EnableLocalReassign = EnableLocalReassignment || - MF->getSubtarget().enableRALocalReassignment( - MF->getTarget().getOptLevel()); - EnableAdvancedRASplitCost = ConsiderLocalIntervalCost.getNumOccurrences() ? ConsiderLocalIntervalCost @@ -3351,6 +3008,8 @@ initializeCSRCost(); RegCosts = TRI->getRegisterCosts(*MF); + EvictAdvisor = getAnalysis().getAdvisor( + *MF, Matrix, LIS, VRM, RegClassInfo); VRAI = std::make_unique(*MF, *LIS, *VRM, *Loops, *MBFI); SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM, *VRAI)); @@ -3361,9 +3020,6 @@ SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI, *VRAI)); - ExtraRegInfo.clear(); - ExtraRegInfo.resize(MRI->getNumVirtRegs()); - NextCascade = 1; IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); GlobalCand.resize(32); // This will grow as needed. SetOfBrokenHints.clear(); diff --git a/llvm/test/CodeGen/Generic/llc-start-stop.ll b/llvm/test/CodeGen/Generic/llc-start-stop.ll --- a/llvm/test/CodeGen/Generic/llc-start-stop.ll +++ b/llvm/test/CodeGen/Generic/llc-start-stop.ll @@ -18,7 +18,7 @@ ; START-AFTER-NEXT: Dominator Tree Construction ; RUN: llc < %s -debug-pass=Structure -start-before=loop-reduce -o /dev/null 2>&1 | FileCheck %s -check-prefix=START-BEFORE -; START-BEFORE: -machine-branch-prob -domtree +; START-BEFORE: -machine-branch-prob -reg-alloc-eviction-advisor -domtree ; START-BEFORE: FunctionPass Manager ; START-BEFORE: Loop Strength Reduction ; START-BEFORE-NEXT: Basic Alias Analysis (stateless AA impl)