Index: lib/CodeGen/RegAllocFast.cpp =================================================================== --- lib/CodeGen/RegAllocFast.cpp +++ lib/CodeGen/RegAllocFast.cpp @@ -54,7 +54,13 @@ STATISTIC(NumStores, "Number of stores added"); STATISTIC(NumLoads , "Number of loads added"); -STATISTIC(NumCopies, "Number of copies coalesced"); +STATISTIC(NumCoalesced, "Number of copies coalesced"); + +#ifndef NDEBUG +// FIXME: Remove this switch when all testcases are fixed! +static cl::opt IgnoreMissingDefs("rafast-ignore-missing-defs", + cl::Hidden); +#endif static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator); @@ -85,10 +91,13 @@ MachineInstr *LastUse = nullptr; ///< Last instr to use reg. unsigned VirtReg; ///< Virtual register number. MCPhysReg PhysReg = 0; ///< Currently held here. - unsigned short LastOpNum = 0; ///< OpNum on LastUse. - bool Dirty = false; ///< Register needs spill. + bool LiveOut = false; ///< Register is possibly live out. + bool Reloaded = false; ///< Register was reloaded. + bool Error = false; ///< Could not allocate. - explicit LiveReg(unsigned v) : VirtReg(v) {} + explicit LiveReg(unsigned VirtReg) : VirtReg(VirtReg) { + assert(TargetRegisterInfo::isVirtualRegister(VirtReg)); + } unsigned getSparseSetIndex() const { return TargetRegisterInfo::virtReg2Index(VirtReg); @@ -96,37 +105,41 @@ }; using LiveRegMap = SparseSet; - /// This map contains entries for each virtual register that is currently /// available in a physical register. LiveRegMap LiveVirtRegs; - DenseMap> LiveDbgValueMap; + DenseMap> LiveDbgValueMap; + /// List of DBG_VALUE that we encountered after the last use of a vreg + /// which could not get allocated yet. + DenseMap> DanglingDbgValues; - /// Track the state of a physical register. - enum RegState { - /// A disabled register is not available for allocation, but an alias may - /// be in use. A register can only be moved out of the disabled state if - /// all aliases are disabled. - regDisabled, + /// Has a bit set for every virtual register for which it was determined + /// that it is alive accross blocks. + BitVector MayLiveAccrossBlocks; + /// State of a register unit. + enum RegState { /// A free register is not currently in use and can be allocated /// immediately without checking aliases. regFree, - /// A reserved register has been assigned explicitly (e.g., setting up a - /// call parameter), and it remains reserved until it is used. - regReserved + /// A pre-assigned register has been assigned before register allocation + /// (e.g., setting up a call parameter). + regPreAssigned, + + /// Used temporarily in reloadAtBegin() to mark register units that are + /// live-in to the basic block. + regLiveIn, /// A register state may also be a virtual register number, indication /// that the physical register is currently allocated to a virtual /// register. In that case, LiveVirtRegs contains the inverse mapping. }; - /// One of the RegState enums, or a virtreg. - std::vector PhysRegState; + /// One of the RegState enums for each register unit. + std::vector RegUnitState; - SmallVector VirtDead; SmallVector Coalesced; /// Set of register units. @@ -136,12 +149,20 @@ /// cannot be allocated. UsedInInstrSet UsedInInstr; + void setPhysRegState(MCPhysReg PhysReg, unsigned NewState); + bool isPhysRegFree(MCPhysReg PhysReg) const; + /// Mark a physreg as used in this instruction. void markRegUsedInInstr(MCPhysReg PhysReg) { for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) UsedInInstr.insert(*Units); } + void unmarkRegUsedInInstr(MCPhysReg PhysReg) { + for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) + UsedInInstr.erase(*Units); + } + /// Check if a physreg or any of its aliases are used in this instruction. bool isRegUsedInInstr(MCPhysReg PhysReg) const { for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) @@ -150,13 +171,10 @@ return false; } - /// This flag is set when LiveRegMap will be cleared completely after - /// spilling all live registers. LiveRegMap entries should not be erased. - bool isBulkSpilling = false; - enum : unsigned { - spillClean = 1, + spillClean = 50, spillDirty = 100, + spillPrefBonus = 20, spillImpossible = ~0u }; @@ -180,23 +198,19 @@ private: bool runOnMachineFunction(MachineFunction &MF) override; + void allocateBasicBlock(MachineBasicBlock &MBB); - void handleThroughOperands(MachineInstr &MI, - SmallVectorImpl &VirtDead); - int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass &RC); - bool isLastUseOfLocalReg(const MachineOperand &MO) const; - - void addKillFlag(const LiveReg &LRI); - void killVirtReg(LiveRegMap::iterator LRI); - void killVirtReg(unsigned VirtReg); - void spillVirtReg(MachineBasicBlock::iterator MI, LiveRegMap::iterator); - void spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg); - - void usePhysReg(MachineOperand &MO); - void definePhysReg(MachineBasicBlock::iterator MI, MCPhysReg PhysReg, - RegState NewState); + void allocateInstruction(MachineInstr &MI); + void handleDebugValue(MachineInstr &MI); + bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg); + bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg); + bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg); + void freePhysReg(MCPhysReg PhysReg); + + int getStackSpaceFor(unsigned VirtReg); + void spillVirtReg(MachineBasicBlock::iterator Before, unsigned VirtReg, + MCPhysReg AssignedReg, bool Kill); unsigned calcSpillCost(MCPhysReg PhysReg) const; - void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg); LiveRegMap::iterator findLiveVirtReg(unsigned VirtReg) { return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg)); @@ -206,15 +220,24 @@ return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg)); } - LiveRegMap::iterator assignVirtToPhysReg(unsigned VirtReg, MCPhysReg PhysReg); - LiveRegMap::iterator allocVirtReg(MachineInstr &MI, LiveRegMap::iterator, - unsigned Hint); - LiveRegMap::iterator defineVirtReg(MachineInstr &MI, unsigned OpNum, - unsigned VirtReg, unsigned Hint); - LiveRegMap::iterator reloadVirtReg(MachineInstr &MI, unsigned OpNum, - unsigned VirtReg, unsigned Hint); - void spillAll(MachineBasicBlock::iterator MI); - bool setPhysReg(MachineInstr &MI, unsigned OpNum, MCPhysReg PhysReg); + void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg); + void allocVirtReg(MachineInstr &MI, LiveRegMap::iterator LRI, + unsigned Hint); + void allocVirtRegUndef(MachineOperand &MO); + void assignDanglingDebugValues(MachineInstr &Def, unsigned VirtReg, + MCPhysReg Reg); + void defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum, + unsigned VirtReg); + void defineVirtReg(MachineInstr &MI, unsigned OpNum, unsigned VirtReg); + void useVirtReg(MachineInstr &MI, unsigned OpNum, unsigned VirtReg); + void reload(MachineBasicBlock::iterator Before, unsigned VirtReg, + MCPhysReg PhsReg); + void reloadAtBegin(MachineBasicBlock &MBB); + void setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg); + + bool mayLiveOut(unsigned VirtReg); + unsigned traceCopies(unsigned VirtReg) const; + unsigned traceCopyChain(unsigned Reg) const; void dumpState(); }; @@ -228,8 +251,7 @@ /// This allocates space for the specified virtual register to be held on the /// stack. -int RegAllocFast::getStackSpaceFor(unsigned VirtReg, - const TargetRegisterClass &RC) { +int RegAllocFast::getStackSpaceFor(unsigned VirtReg) { // Find the location Reg would belong... int SS = StackSlotForVirtReg[VirtReg]; // Already has space allocated? @@ -237,6 +259,7 @@ return SS; // Allocate a new stack object for this spill location... + const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); unsigned Size = TRI->getSpillSize(RC); unsigned Align = TRI->getSpillAlignment(RC); int FrameIdx = MFI->CreateSpillStackObject(Size, Align); @@ -246,280 +269,175 @@ return FrameIdx; } -/// Return true if MO is the only remaining reference to its virtual register, -/// and it is guaranteed to be a block-local register. -bool RegAllocFast::isLastUseOfLocalReg(const MachineOperand &MO) const { - // If the register has ever been spilled or reloaded, we conservatively assume - // it is a global register used in multiple blocks. - if (StackSlotForVirtReg[MO.getReg()] != -1) - return false; - - // Check that the use/def chain has exactly one operand - MO. - MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(MO.getReg()); - if (&*I != &MO) - return false; - return ++I == MRI->reg_nodbg_end(); -} +void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator Before, + unsigned VirtReg, MCPhysReg AssignedReg, + bool Kill) { + LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) + << " in " << printReg(AssignedReg, TRI)); + int FI = getStackSpaceFor(VirtReg); + LLVM_DEBUG(dbgs() << " to stack slot #" << FI << "\n"); -/// Set kill flags on last use of a virtual register. -void RegAllocFast::addKillFlag(const LiveReg &LR) { - if (!LR.LastUse) return; - MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum); - if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) { - if (MO.getReg() == LR.PhysReg) - MO.setIsKill(); - // else, don't do anything we are problably redefining a - // subreg of this register and given we don't track which - // lanes are actually dead, we cannot insert a kill flag here. - // Otherwise we may end up in a situation like this: - // ... = (MO) physreg:sub1, implicit killed physreg - // ... <== Here we would allow later pass to reuse physreg:sub1 - // which is potentially wrong. - // LR:sub0 = ... - // ... = LR.sub1 <== This is going to use physreg:sub1 + const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); + TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI); + ++NumStores; + + // If this register is used by DBG_VALUE then insert new DBG_VALUE to + // identify spilled location as the place to find corresponding variable's + // value. + SmallVectorImpl &LRIDbgValues = LiveDbgValueMap[VirtReg]; + for (MachineInstr *DBG : LRIDbgValues) { + MachineInstr *NewDV = buildDbgValueForSpill(*MBB, Before, *DBG, FI); + assert(NewDV->getParent() == MBB && "dangling parent pointer"); + (void)NewDV; + LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV); } + // Now this register is spilled there is should not be any DBG_VALUE + // pointing to this register because they are all pointing to spilled value + // now. + LRIDbgValues.clear(); } -/// Mark virtreg as no longer available. -void RegAllocFast::killVirtReg(LiveRegMap::iterator LRI) { - addKillFlag(*LRI); - assert(PhysRegState[LRI->PhysReg] == LRI->VirtReg && - "Broken RegState mapping"); - PhysRegState[LRI->PhysReg] = regFree; - // Erase from LiveVirtRegs unless we're spilling in bulk. - if (!isBulkSpilling) - LiveVirtRegs.erase(LRI); +/// Get basic block begin insertion point. +/// This is not just MBB.begin() because surprisingly we have EH_LABEL +/// instructions marking the begin of a basic block. This means we must insert +/// new instructions after such labels... +static MachineBasicBlock::iterator +getMBBBeginInsertionPoint(MachineBasicBlock &MBB) { + MachineBasicBlock::iterator I = MBB.begin(); + while (I != MBB.end() && I->isLabel()) + ++I; + return I; } -/// Mark virtreg as no longer available. -void RegAllocFast::killVirtReg(unsigned VirtReg) { - assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && - "killVirtReg needs a virtual register"); - LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); - if (LRI != LiveVirtRegs.end()) - killVirtReg(LRI); -} - -/// This method spills the value specified by VirtReg into the corresponding -/// stack slot if needed. -void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, - unsigned VirtReg) { - assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && - "Spilling a physical register is illegal!"); - LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); - assert(LRI != LiveVirtRegs.end() && "Spilling unmapped virtual register"); - spillVirtReg(MI, LRI); -} - -/// Do the actual work of spilling. -void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, - LiveRegMap::iterator LRI) { - LiveReg &LR = *LRI; - assert(PhysRegState[LR.PhysReg] == LRI->VirtReg && "Broken RegState mapping"); - - if (LR.Dirty) { - // If this physreg is used by the instruction, we want to kill it on the - // instruction, not on the spill. - bool SpillKill = MachineBasicBlock::iterator(LR.LastUse) != MI; - LR.Dirty = false; - LLVM_DEBUG(dbgs() << "Spilling " << printReg(LRI->VirtReg, TRI) << " in " - << printReg(LR.PhysReg, TRI)); - const TargetRegisterClass &RC = *MRI->getRegClass(LRI->VirtReg); - int FI = getStackSpaceFor(LRI->VirtReg, RC); - LLVM_DEBUG(dbgs() << " to stack slot #" << FI << "\n"); - TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, &RC, TRI); - ++NumStores; // Update statistics - - // If this register is used by DBG_VALUE then insert new DBG_VALUE to - // identify spilled location as the place to find corresponding variable's - // value. - SmallVectorImpl &LRIDbgValues = - LiveDbgValueMap[LRI->VirtReg]; - for (MachineInstr *DBG : LRIDbgValues) { - MachineInstr *NewDV = buildDbgValueForSpill(*MBB, MI, *DBG, FI); - assert(NewDV->getParent() == MBB && "dangling parent pointer"); - (void)NewDV; - LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:" - << "\n" - << *NewDV); - } - // Now this register is spilled there is should not be any DBG_VALUE - // pointing to this register because they are all pointing to spilled value - // now. - LRIDbgValues.clear(); - if (SpillKill) - LR.LastUse = nullptr; // Don't kill register again +/// Reload all currently assigned virtual registers. +void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) { + for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) { + MCPhysReg Reg = P.PhysReg; + // Set state to live-in. This possibly overrides mappings to virtual + // registers but we don't care anymore at this point. + setPhysRegState(Reg, regLiveIn); } - killVirtReg(LRI); -} -/// Spill all dirty virtregs without killing them. -void RegAllocFast::spillAll(MachineBasicBlock::iterator MI) { - if (LiveVirtRegs.empty()) return; - isBulkSpilling = true; // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order // of spilling here is deterministic, if arbitrary. - for (LiveRegMap::iterator I = LiveVirtRegs.begin(), E = LiveVirtRegs.end(); - I != E; ++I) - spillVirtReg(MI, I); - LiveVirtRegs.clear(); - isBulkSpilling = false; + MachineBasicBlock::iterator InsertBefore = getMBBBeginInsertionPoint(MBB); + for (const LiveReg &LR : LiveVirtRegs) { + MCPhysReg PhysReg = LR.PhysReg; + if (PhysReg == 0) + continue; + + unsigned FirstUnit = *MCRegUnitIterator(PhysReg, TRI); + if (RegUnitState[FirstUnit] == regLiveIn) + continue; + + assert((&MBB != &MBB.getParent()->front() || IgnoreMissingDefs) && + "no reload in start block. Missing vreg def?"); + reload(InsertBefore, LR.VirtReg, PhysReg); + } } /// Handle the direct use of a physical register. Check that the register is /// not used by a virtreg. Kill the physreg, marking it free. This may add /// implicit kills to MO->getParent() and invalidate MO. -void RegAllocFast::usePhysReg(MachineOperand &MO) { - // Ignore undef uses. - if (MO.isUndef()) - return; +bool RegAllocFast::usePhysReg(MachineInstr &MI, MCPhysReg Reg) { + assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "expected physreg"); + bool displacedAny = displacePhysReg(MI, Reg); + setPhysRegState(Reg, regPreAssigned); + markRegUsedInInstr(Reg); + return displacedAny; +} - unsigned PhysReg = MO.getReg(); - assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) && - "Bad usePhysReg operand"); +bool RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg Reg) { + bool displacedAny = displacePhysReg(MI, Reg); + setPhysRegState(Reg, regPreAssigned); + return displacedAny; +} - markRegUsedInInstr(PhysReg); - switch (PhysRegState[PhysReg]) { - case regDisabled: - break; - case regReserved: - PhysRegState[PhysReg] = regFree; - LLVM_FALLTHROUGH; - case regFree: - MO.setIsKill(); - return; - default: - // The physreg was allocated to a virtual register. That means the value we - // wanted has been clobbered. - llvm_unreachable("Instruction uses an allocated register"); - } +/// Mark PhysReg as reserved or free after spilling any virtregs. This is very +/// similar to defineVirtReg except the physreg is reserved instead of +/// allocated. +bool RegAllocFast::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) { + bool displacedAny = false; - // Maybe a superregister is reserved? - for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) { - MCPhysReg Alias = *AI; - switch (PhysRegState[Alias]) { - case regDisabled: + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + unsigned Unit = *UI; + switch (unsigned VirtReg = RegUnitState[Unit]) { + default: { + LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); + assert(LRI != LiveVirtRegs.end() && "datastructures in sync"); + MachineBasicBlock::iterator ReloadBefore = + std::next((MachineBasicBlock::iterator)MI.getIterator()); + reload(ReloadBefore, VirtReg, LRI->PhysReg); + + setPhysRegState(LRI->PhysReg, regFree); + LRI->PhysReg = 0; + LRI->Reloaded = true; + displacedAny = true; + break; + } + case regPreAssigned: + RegUnitState[Unit] = regFree; + displacedAny = true; break; - case regReserved: - // Either PhysReg is a subregister of Alias and we mark the - // whole register as free, or PhysReg is the superregister of - // Alias and we mark all the aliases as disabled before freeing - // PhysReg. - // In the latter case, since PhysReg was disabled, this means that - // its value is defined only by physical sub-registers. This check - // is performed by the assert of the default case in this loop. - // Note: The value of the superregister may only be partial - // defined, that is why regDisabled is a valid state for aliases. - assert((TRI->isSuperRegister(PhysReg, Alias) || - TRI->isSuperRegister(Alias, PhysReg)) && - "Instruction is not using a subregister of a reserved register"); - LLVM_FALLTHROUGH; case regFree: - if (TRI->isSuperRegister(PhysReg, Alias)) { - // Leave the superregister in the working set. - PhysRegState[Alias] = regFree; - MO.getParent()->addRegisterKilled(Alias, TRI, true); - return; - } - // Some other alias was in the working set - clear it. - PhysRegState[Alias] = regDisabled; break; - default: - llvm_unreachable("Instruction uses an alias of an allocated register"); } } + return displacedAny; +} - // All aliases are disabled, bring register into working set. - PhysRegState[PhysReg] = regFree; - MO.setIsKill(); +void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) { + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) + RegUnitState[*UI] = NewState; } -/// Mark PhysReg as reserved or free after spilling any virtregs. This is very -/// similar to defineVirtReg except the physreg is reserved instead of -/// allocated. -void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI, - MCPhysReg PhysReg, RegState NewState) { - markRegUsedInInstr(PhysReg); - switch (unsigned VirtReg = PhysRegState[PhysReg]) { - case regDisabled: - break; - default: - spillVirtReg(MI, VirtReg); - LLVM_FALLTHROUGH; +void RegAllocFast::freePhysReg(MCPhysReg PhysReg) { + LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':'); + + unsigned FirstUnit = *MCRegUnitIterator(PhysReg, TRI); + switch (unsigned VirtReg = RegUnitState[FirstUnit]) { case regFree: - case regReserved: - PhysRegState[PhysReg] = NewState; + LLVM_DEBUG(dbgs() << '\n'); return; - } - - // This is a disabled register, disable all aliases. - PhysRegState[PhysReg] = NewState; - for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) { - MCPhysReg Alias = *AI; - switch (unsigned VirtReg = PhysRegState[Alias]) { - case regDisabled: - break; - default: - spillVirtReg(MI, VirtReg); - LLVM_FALLTHROUGH; - case regFree: - case regReserved: - PhysRegState[Alias] = regDisabled; - if (TRI->isSuperRegister(PhysReg, Alias)) - return; - break; + case regPreAssigned: + LLVM_DEBUG(dbgs() << '\n'); + setPhysRegState(PhysReg, regFree); + return; + default: { + LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); + assert(LRI != LiveVirtRegs.end()); + LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n'); + setPhysRegState(LRI->PhysReg, regFree); + LRI->PhysReg = 0; } + return; } } -/// Return the cost of spilling clearing out PhysReg and aliases so it is -/// free for allocation. Returns 0 when PhysReg is free or disabled with all -/// aliases disabled - it can be allocated directly. +/// Return the cost of spilling clearing out PhysReg and aliases so it is free +/// for allocation. Returns 0 when PhysReg is free or disabled with all aliases +/// disabled - it can be allocated directly. /// \returns spillImpossible when PhysReg or an alias can't be spilled. unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const { - if (isRegUsedInInstr(PhysReg)) { - LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) - << " is already used in instr.\n"); - return spillImpossible; - } - switch (unsigned VirtReg = PhysRegState[PhysReg]) { - case regDisabled: - break; - case regFree: - return 0; - case regReserved: - LLVM_DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding " - << printReg(PhysReg, TRI) << " is reserved already.\n"); - return spillImpossible; - default: { - LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg); - assert(I != LiveVirtRegs.end() && "Missing VirtReg entry"); - return I->Dirty ? spillDirty : spillClean; - } - } - - // This is a disabled register, add up cost of aliases. - LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is disabled.\n"); - unsigned Cost = 0; - for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) { - MCPhysReg Alias = *AI; - switch (unsigned VirtReg = PhysRegState[Alias]) { - case regDisabled: - break; + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + switch (unsigned VirtReg = RegUnitState[*UI]) { case regFree: - ++Cost; break; - case regReserved: + case regPreAssigned: + LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned " + << printReg(PhysReg, TRI) << '\n'); return spillImpossible; default: { - LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg); - assert(I != LiveVirtRegs.end() && "Missing VirtReg entry"); - Cost += I->Dirty ? spillDirty : spillClean; - break; + // TODO: it's not correct to return here, we may have additional + // virtregs assigned to other units or even have a preassigned bit in + // another unit... + bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 || + findLiveVirtReg(VirtReg)->LiveOut; + return SureSpill ? spillClean : spillDirty; } } } - return Cost; + return 0; } /// This method updates local state so that we know that PhysReg is the @@ -528,563 +446,756 @@ void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) { LLVM_DEBUG(dbgs() << "Assigning " << printReg(LR.VirtReg, TRI) << " to " << printReg(PhysReg, TRI) << "\n"); - PhysRegState[PhysReg] = LR.VirtReg; - assert(!LR.PhysReg && "Already assigned a physreg"); + assert(LR.PhysReg == 0 && "Already assigned a physreg"); + assert(PhysReg != 0 && "Trying to assign no register"); LR.PhysReg = PhysReg; + setPhysRegState(PhysReg, LR.VirtReg); } -RegAllocFast::LiveRegMap::iterator -RegAllocFast::assignVirtToPhysReg(unsigned VirtReg, MCPhysReg PhysReg) { - LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); - assert(LRI != LiveVirtRegs.end() && "VirtReg disappeared"); - assignVirtToPhysReg(*LRI, PhysReg); - return LRI; +static bool isCoalescable(const MachineInstr &MI) { + return MI.isCopy() && MI.getOperand(0).getSubReg() == 0 && + MI.getOperand(1).getSubReg() == 0; +} + +unsigned RegAllocFast::traceCopyChain(unsigned Reg) const { + static const unsigned ChainLengthLimit = 3; + unsigned C = 0; + do { + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + return Reg; + assert(TargetRegisterInfo::isVirtualRegister(Reg)); + + MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg); + if (VRegDef == nullptr || !isCoalescable(*VRegDef)) + return 0; + Reg = VRegDef->getOperand(1).getReg(); + } while(++C <= ChainLengthLimit); + return 0; +} + +/// Check if any of \p VirtReg's definitions is a copy. If it is follow the +/// chain of copies to check whether we reach a physical register we can +/// coalesce with. +unsigned RegAllocFast::traceCopies(unsigned VirtReg) const { + static const unsigned DefLimit = 3; + unsigned C = 0; + for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) { + if (isCoalescable(MI)) { + unsigned Reg = MI.getOperand(1).getReg(); + Reg = traceCopyChain(Reg); + if (Reg != 0) + return Reg; + } + if (++C >= DefLimit) + break; + } + return 0; +} + +bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const { + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { + if (RegUnitState[*UI] != regFree) + return false; + } + return true; +} + + +void RegAllocFast::assignDanglingDebugValues(MachineInstr &Definition, + unsigned VirtReg, MCPhysReg Reg) { + auto UDBGValIter = DanglingDbgValues.find(VirtReg); + if (UDBGValIter == DanglingDbgValues.end()) + return; + + SmallVectorImpl &Dangling = UDBGValIter->second; + for (MachineInstr *DbgValue : Dangling) { + assert(DbgValue->isDebugValue()); + MachineOperand &MO = DbgValue->getOperand(0); + if (!MO.isReg()) + continue; + + // Test whether the physreg survives from the definition to the DBG_VALUE. + MCPhysReg SetToReg = Reg; + unsigned Limit = 20; + for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()), + E = DbgValue->getIterator(); I != E; ++I) { + if (I->modifiesRegister(Reg, TRI) || --Limit == 0) { + LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue + << '\n'); + SetToReg = 0; + break; + } + } + MO.setReg(SetToReg); + } + Dangling.clear(); } /// Allocates a physical register for VirtReg. -RegAllocFast::LiveRegMap::iterator RegAllocFast::allocVirtReg(MachineInstr &MI, - LiveRegMap::iterator LRI, unsigned Hint) { +void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveRegMap::iterator LRI, + unsigned Hint1) { const unsigned VirtReg = LRI->VirtReg; + assert(LRI->PhysReg == 0); - assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && - "Can only allocate virtual registers"); + const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); + LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg) + << " in class " << TRI->getRegClassName(&RC) << "\n"); // Take hint when possible. - const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); - if (TargetRegisterInfo::isPhysicalRegister(Hint) && - MRI->isAllocatable(Hint) && RC.contains(Hint)) { - // Ignore the hint if we would have to spill a dirty register. - unsigned Cost = calcSpillCost(Hint); - if (Cost < spillDirty) { - if (Cost) - definePhysReg(MI, Hint, regFree); - // definePhysReg may kill virtual registers and modify LiveVirtRegs. - // That invalidates LRI, so run a new lookup for VirtReg. - return assignVirtToPhysReg(VirtReg, Hint); + unsigned Hint0 = traceCopies(VirtReg); + if (TargetRegisterInfo::isPhysicalRegister(Hint0) && + MRI->isAllocatable(Hint0) && RC.contains(Hint0) && + !isRegUsedInInstr(Hint0)) { + // Take hint if the register is currently free. + if (isPhysRegFree(Hint0)) { + LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI) + << '\n'); + assignVirtToPhysReg(*LRI, Hint0); + return; + } else { + LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI) + << "occupied\n"); } + } else { + Hint0 = 0; } - // First try to find a completely free register. - ArrayRef AO = RegClassInfo.getOrder(&RC); - for (MCPhysReg PhysReg : AO) { - if (PhysRegState[PhysReg] == regFree && !isRegUsedInInstr(PhysReg)) { - assignVirtToPhysReg(*LRI, PhysReg); - return LRI; + // Try first hint. + if (TargetRegisterInfo::isPhysicalRegister(Hint1) && + MRI->isAllocatable(Hint1) && RC.contains(Hint1) && + !isRegUsedInInstr(Hint1)) { + // Take hint if the register is currently free. + if (isPhysRegFree(Hint1)) { + LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI) + << '\n'); + assignVirtToPhysReg(*LRI, Hint1); + return; + } else { + LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI) + << "occupied\n"); } } - LLVM_DEBUG(dbgs() << "Allocating " << printReg(VirtReg) << " from " - << TRI->getRegClassName(&RC) << "\n"); - - unsigned BestReg = 0; + MCPhysReg BestReg = 0; unsigned BestCost = spillImpossible; - for (MCPhysReg PhysReg : AO) { + ArrayRef AllocationOrder = RegClassInfo.getOrder(&RC); + for (MCPhysReg PhysReg : AllocationOrder) { + LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' '); + if (isRegUsedInInstr(PhysReg)) { + LLVM_DEBUG(dbgs() << "already used in instr.\n"); + continue; + } + unsigned Cost = calcSpillCost(PhysReg); - LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << "\n"); - LLVM_DEBUG(dbgs() << "\tCost: " << Cost << "\n"); - LLVM_DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n"); - // Cost is 0 when all aliases are already disabled. + LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << "\n"); + // Immediate take a register with cost 0. if (Cost == 0) { assignVirtToPhysReg(*LRI, PhysReg); - return LRI; + return; + } + if (PhysReg == Hint0 || PhysReg == Hint1) + Cost -= spillPrefBonus; + if (Cost < BestCost) { + BestReg = PhysReg; + BestCost = Cost; } - if (Cost < BestCost) - BestReg = PhysReg, BestCost = Cost; } - if (BestReg) { - definePhysReg(MI, BestReg, regFree); - // definePhysReg may kill virtual registers and modify LiveVirtRegs. - // That invalidates LRI, so run a new lookup for VirtReg. - return assignVirtToPhysReg(VirtReg, BestReg); + if (!BestReg) { + // Nothing we can do: Report an error and keep going with an invalid + // allocation. + if (MI.isInlineAsm()) + MI.emitError("inline assembly requires more registers than available"); + else + MI.emitError("ran out of registers during register allocation"); + + LRI->Error = true; + LRI->PhysReg = 0; + return; } - // Nothing we can do. Report an error and keep going with a bad allocation. - if (MI.isInlineAsm()) - MI.emitError("inline assembly requires more registers than available"); - else - MI.emitError("ran out of registers during register allocation"); - definePhysReg(MI, *AO.begin(), regFree); - return assignVirtToPhysReg(VirtReg, *AO.begin()); + displacePhysReg(MI, BestReg); + assignVirtToPhysReg(*LRI, BestReg); + assignDanglingDebugValues(MI, VirtReg, BestReg); +} + +void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) { + assert(MO.isUndef() && "expected undef use"); + unsigned Reg = MO.getReg(); + assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Expected virtreg"); + const TargetRegisterClass &RC = *MRI->getRegClass(Reg); + ArrayRef AllocationOrder = RegClassInfo.getOrder(&RC); + assert(!AllocationOrder.empty() && "Allocation order must not be empty"); + MCPhysReg FinalReg = AllocationOrder[0]; + unsigned SubRegIdx = MO.getSubReg(); + if (SubRegIdx != 0) { + FinalReg = TRI->getSubReg(FinalReg, SubRegIdx); + MO.setSubReg(0); + } + MO.setReg(FinalReg); +} + +/// Heuristic to identify virtual registers not living out of current block. +bool RegAllocFast::mayLiveOut(unsigned VirtReg) { + unsigned C = 0; + if (MayLiveAccrossBlocks.test(TargetRegisterInfo::virtReg2Index(VirtReg))) + goto MayLO; + + static const unsigned Limit = 8; + for (const MachineOperand &Use : MRI->reg_nodbg_operands(VirtReg)) { + if (Use.getParent()->getParent() != MBB || ++C >= Limit) { + MayLiveAccrossBlocks.set(TargetRegisterInfo::virtReg2Index(VirtReg)); + goto MayLO; + } + } + return false; + +MayLO: + // No vregs live out without successors (such as the return block). + return !MBB->succ_empty(); +} + +/// Variation of defineVirtReg() with special handling for livethrough regs +/// (tied or earlyclobber) that may interfere with preassigned uses. +void RegAllocFast::defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum, + unsigned VirtReg) { + LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); + if (LRI != LiveVirtRegs.end()) { + MCPhysReg PrevReg = LRI->PhysReg; + if (PrevReg != 0 && isRegUsedInInstr(PrevReg)) { + LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI) + << " (tied/earlyclobber resolution)\n"); + freePhysReg(PrevReg); + LRI->PhysReg = 0; + allocVirtReg(MI, LRI, 0); + MachineBasicBlock::iterator InsertBefore = + std::next((MachineBasicBlock::iterator)MI.getIterator()); + LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to " + << printReg(PrevReg, TRI) << "\n"); + BuildMI(*MBB, InsertBefore, MI.getDebugLoc(), + TII->get(TargetOpcode::COPY), PrevReg) + .addReg(LRI->PhysReg, llvm::RegState::Kill); + } + MachineOperand &MO = MI.getOperand(OpNum); + if (MO.getSubReg() && !MO.isUndef()) { + LRI->LastUse = &MI; + } + } + return defineVirtReg(MI, OpNum, VirtReg); } -/// Allocates a register for VirtReg and mark it as dirty. -RegAllocFast::LiveRegMap::iterator RegAllocFast::defineVirtReg(MachineInstr &MI, - unsigned OpNum, - unsigned VirtReg, - unsigned Hint) { +/// Allocates a register for VirtReg definition. Typically the register is +/// already assigned from a use of the virtreg, however we still need to +/// perform an allocation if: +/// - It is a dead definition without any uses. +/// - The value is live out and all uses are in different basic blocks. +void RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum, + unsigned VirtReg) { assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Not a virtual register"); + MachineOperand &MO = MI.getOperand(OpNum); LiveRegMap::iterator LRI; bool New; std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg)); if (New) { - // If there is no hint, peek at the only use of this register. - if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) && - MRI->hasOneNonDBGUse(VirtReg)) { - const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg); - // It's a copy, use the destination register as a hint. - if (UseMI.isCopyLike()) - Hint = UseMI.getOperand(0).getReg(); + // Note that we have to prime the LiveOut cache, even if a dead flags is + // set. + bool MayLO = mayLiveOut(VirtReg); + if (!MO.isDead()) { + if (MayLO) + LRI->LiveOut = true; + else { + // It is a dead def without the dead flag; add the flag now. + MO.setIsDead(true); + } } - LRI = allocVirtReg(MI, LRI, Hint); - } else if (LRI->LastUse) { - // Redefining a live register - kill at the last use, unless it is this - // instruction defining VirtReg multiple times. - if (LRI->LastUse != &MI || LRI->LastUse->getOperand(LRI->LastOpNum).isUse()) - addKillFlag(*LRI); } - assert(LRI->PhysReg && "Register not assigned"); - LRI->LastUse = &MI; - LRI->LastOpNum = OpNum; - LRI->Dirty = true; - markRegUsedInInstr(LRI->PhysReg); - return LRI; + if (LRI->PhysReg == 0) + allocVirtReg(MI, LRI, 0); + else { + assert(!isRegUsedInInstr(LRI->PhysReg) && "TODO: preassign mismatch"); + LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI) + << " use existing assignment to " + << printReg(LRI->PhysReg, TRI) << '\n'); + } + + MCPhysReg PhysReg = LRI->PhysReg; + assert(PhysReg != 0 && "Register not assigned"); + if (LRI->Reloaded || LRI->LiveOut) { + if (!MI.isImplicitDef()) { + MachineBasicBlock::iterator SpillBefore = + std::next((MachineBasicBlock::iterator)MI.getIterator()); + LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut << " RL: " + << LRI->Reloaded << '\n'); + bool Kill = LRI->LastUse == nullptr; + spillVirtReg(SpillBefore, VirtReg, PhysReg, Kill); + LRI->LastUse = nullptr; + } + LRI->LiveOut = false; + LRI->Reloaded = false; + } + markRegUsedInInstr(PhysReg); + setPhysReg(MI, MO, PhysReg); } -/// Make sure VirtReg is available in a physreg and return it. -RegAllocFast::LiveRegMap::iterator RegAllocFast::reloadVirtReg(MachineInstr &MI, - unsigned OpNum, - unsigned VirtReg, - unsigned Hint) { +/// Allocates a register for a VirtReg use. +void RegAllocFast::useVirtReg(MachineInstr &MI, unsigned OpNum, + unsigned VirtReg) { assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Not a virtual register"); + MachineOperand &MO = MI.getOperand(OpNum); LiveRegMap::iterator LRI; bool New; std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg)); - MachineOperand &MO = MI.getOperand(OpNum); if (New) { - LRI = allocVirtReg(MI, LRI, Hint); - const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); - int FrameIndex = getStackSpaceFor(VirtReg, RC); - LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into " - << printReg(LRI->PhysReg, TRI) << "\n"); - TII->loadRegFromStackSlot(*MBB, MI, LRI->PhysReg, FrameIndex, &RC, TRI); - ++NumLoads; - } else if (LRI->Dirty) { - if (isLastUseOfLocalReg(MO)) { - LLVM_DEBUG(dbgs() << "Killing last use: " << MO << "\n"); - if (MO.isUse()) - MO.setIsKill(); - else - MO.setIsDead(); - } else if (MO.isKill()) { - LLVM_DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n"); - MO.setIsKill(false); - } else if (MO.isDead()) { - LLVM_DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n"); - MO.setIsDead(false); + MachineOperand &MO = MI.getOperand(OpNum); + // Prime the LiveOut cache. Note that we need to do this even if a kill + // flag is set, since we may query it later in other situations. + bool MayLO = mayLiveOut(VirtReg); + if (!MO.isKill()) { + if (MayLO) { + LRI->LiveOut = true; + } else { + // It is a last (killing) use without the kill flag; add the flag now. + MO.setIsKill(true); + } + } + } else { + assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag"); + } + + // If necessary allocate a register. + if (LRI->PhysReg == 0) { + assert(!MO.isTied() && "tied op should be allocated"); + unsigned Hint = 0; + if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) { + Hint = MI.getOperand(0).getReg(); + assert(TargetRegisterInfo::isPhysicalRegister(Hint) && + "Copy destination should already be assigned"); + } + allocVirtReg(MI, LRI, Hint); + if (LRI->Error) { + const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); + ArrayRef AllocationOrder = RegClassInfo.getOrder(&RC); + setPhysReg(MI, MO, *AllocationOrder.begin()); + return; } - } else if (MO.isKill()) { - // We must remove kill flags from uses of reloaded registers because the - // register would be killed immediately, and there might be a second use: - // %foo = OR killed %x, %x - // This would cause a second reload of %x into a different register. - LLVM_DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n"); - MO.setIsKill(false); - } else if (MO.isDead()) { - LLVM_DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n"); - MO.setIsDead(false); } - assert(LRI->PhysReg && "Register not assigned"); + LRI->LastUse = &MI; - LRI->LastOpNum = OpNum; markRegUsedInInstr(LRI->PhysReg); - return LRI; + setPhysReg(MI, MO, LRI->PhysReg); +} + +void RegAllocFast::reload(MachineBasicBlock::iterator Before, unsigned VirtReg, + MCPhysReg PhysReg) { + LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into " + << printReg(PhysReg, TRI) << "\n"); + int FI = getStackSpaceFor(VirtReg); + const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); + TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI); + ++NumLoads; } /// Changes operand OpNum in MI the refer the PhysReg, considering subregs. This /// may invalidate any operand pointers. Return true if the operand kills its /// register. -bool RegAllocFast::setPhysReg(MachineInstr &MI, unsigned OpNum, +void RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg) { - MachineOperand &MO = MI.getOperand(OpNum); - bool Dead = MO.isDead(); if (!MO.getSubReg()) { MO.setReg(PhysReg); MO.setIsRenamable(true); - return MO.isKill() || Dead; + return; } // Handle subregister index. MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : 0); MO.setIsRenamable(true); - MO.setSubReg(0); + // Note: We leave the subreg number around a little longer in case of defs. + // This is so that the register freeing logic in allocateInstruction can still + // recognize this as subregister defs. The code there will clear the number. + if (!MO.isDef()) + MO.setSubReg(0); // A kill flag implies killing the full register. Add corresponding super // register kill. if (MO.isKill()) { MI.addRegisterKilled(PhysReg, TRI, true); - return true; + return; } // A of a sub-register requires an implicit def of the full // register. - if (MO.isDef() && MO.isUndef()) - MI.addRegisterDefined(PhysReg, TRI); - - return Dead; -} - -// Handles special instruction operand like early clobbers and tied ops when -// there are additional physreg defines. -void RegAllocFast::handleThroughOperands(MachineInstr &MI, - SmallVectorImpl &VirtDead) { - LLVM_DEBUG(dbgs() << "Scanning for through registers:"); - SmallSet ThroughRegs; - for (const MachineOperand &MO : MI.operands()) { - if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); - if (!TargetRegisterInfo::isVirtualRegister(Reg)) - continue; - if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) || - (MO.getSubReg() && MI.readsVirtualRegister(Reg))) { - if (ThroughRegs.insert(Reg).second) - LLVM_DEBUG(dbgs() << ' ' << printReg(Reg)); - } - } - - // If any physreg defines collide with preallocated through registers, - // we must spill and reallocate. - LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n"); - for (const MachineOperand &MO : MI.operands()) { - if (!MO.isReg() || !MO.isDef()) continue; - unsigned Reg = MO.getReg(); - if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; - markRegUsedInInstr(Reg); - for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { - if (ThroughRegs.count(PhysRegState[*AI])) - definePhysReg(MI, *AI, regFree); - } + if (MO.isDef() && MO.isUndef()) { + if (MO.isDead()) + MI.addRegisterDead(PhysReg, TRI, true); + else + MI.addRegisterDefined(PhysReg, TRI); } - - SmallVector PartialDefs; - LLVM_DEBUG(dbgs() << "Allocating tied uses.\n"); - for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { - const MachineOperand &MO = MI.getOperand(I); - if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); - if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; - if (MO.isUse()) { - if (!MO.isTied()) continue; - LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO - << ") is tied to operand " << MI.findTiedOperandIdx(I) - << ".\n"); - LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, 0); - MCPhysReg PhysReg = LRI->PhysReg; - setPhysReg(MI, I, PhysReg); - // Note: we don't update the def operand yet. That would cause the normal - // def-scan to attempt spilling. - } else if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) { - LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << "\n"); - // Reload the register, but don't assign to the operand just yet. - // That would confuse the later phys-def processing pass. - LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, 0); - PartialDefs.push_back(LRI->PhysReg); - } - } - - LLVM_DEBUG(dbgs() << "Allocating early clobbers.\n"); - for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { - const MachineOperand &MO = MI.getOperand(I); - if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); - if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; - if (!MO.isEarlyClobber()) - continue; - // Note: defineVirtReg may invalidate MO. - LiveRegMap::iterator LRI = defineVirtReg(MI, I, Reg, 0); - MCPhysReg PhysReg = LRI->PhysReg; - if (setPhysReg(MI, I, PhysReg)) - VirtDead.push_back(Reg); - } - - // Restore UsedInInstr to a state usable for allocating normal virtual uses. - UsedInInstr.clear(); - for (const MachineOperand &MO : MI.operands()) { - if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue; - unsigned Reg = MO.getReg(); - if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; - LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI) - << " as used in instr\n"); - markRegUsedInInstr(Reg); - } - - // Also mark PartialDefs as used to avoid reallocation. - for (unsigned PartialDef : PartialDefs) - markRegUsedInInstr(PartialDef); } #ifndef NDEBUG void RegAllocFast::dumpState() { - for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) { - if (PhysRegState[Reg] == regDisabled) continue; - dbgs() << " " << printReg(Reg, TRI); - switch(PhysRegState[Reg]) { + for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE; + ++Unit) { + switch(unsigned VirtReg = RegUnitState[Unit]) { case regFree: break; - case regReserved: - dbgs() << "*"; + case regPreAssigned: + dbgs() << " " << printRegUnit(Unit, TRI) << "[P]"; break; + case regLiveIn: + llvm_unreachable("Should not have regLiveIn in map"); default: { - dbgs() << '=' << printReg(PhysRegState[Reg]); - LiveRegMap::iterator I = findLiveVirtReg(PhysRegState[Reg]); - assert(I != LiveVirtRegs.end() && "Missing VirtReg entry"); - if (I->Dirty) - dbgs() << "*"; - assert(I->PhysReg == Reg && "Bad inverse map"); + dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg); + LiveRegMap::iterator I = findLiveVirtReg(VirtReg); + assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry"); + if (I->LiveOut || I->Reloaded) { + dbgs() << '['; + if (I->LiveOut) dbgs() << 'O'; + if (I->Reloaded) dbgs() << 'R'; + dbgs() << ']'; + } + assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present"); break; } } } dbgs() << '\n'; // Check that LiveVirtRegs is the inverse. - for (LiveRegMap::iterator i = LiveVirtRegs.begin(), - e = LiveVirtRegs.end(); i != e; ++i) { - assert(TargetRegisterInfo::isVirtualRegister(i->VirtReg) && + for (const LiveReg &LR : LiveVirtRegs) { + unsigned VirtReg = LR.VirtReg; + assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Bad map key"); - assert(TargetRegisterInfo::isPhysicalRegister(i->PhysReg) && - "Bad map value"); - assert(PhysRegState[i->PhysReg] == i->VirtReg && "Bad inverse map"); + MCPhysReg PhysReg = LR.PhysReg; + if (PhysReg != 0) { + assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) && + "mapped to physreg"); + for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) + assert(RegUnitState[*UI] == VirtReg && "inverse map valid"); + } } } #endif -void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) { - this->MBB = &MBB; - LLVM_DEBUG(dbgs() << "\nAllocating " << MBB); +void RegAllocFast::allocateInstruction(MachineInstr &MI) { + // The basic algorithm here is: + // 1. Mark registers of def operands as free + // 2. Allocate registers to use operands and place reload instructions for + // registers displaced by the allocation. + // + // However we need to handle some corner cases: + // - pre-assigned defs and uses need to be handled before the other def/use + // operands are processed to avoid the allocation heuristics clashing with + // the pre-assignment. + // - The "free def operands" step has to come last instead of first for tied + // operands and early-clobbers. - PhysRegState.assign(TRI->getNumRegs(), regDisabled); - assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?"); - - MachineBasicBlock::iterator MII = MBB.begin(); - - // Add live-in registers as live. - for (const MachineBasicBlock::RegisterMaskPair LI : MBB.liveins()) - if (MRI->isAllocatable(LI.PhysReg)) - definePhysReg(MII, LI.PhysReg, regReserved); - - VirtDead.clear(); - Coalesced.clear(); - - // Otherwise, sequentially allocate each instruction in the MBB. - for (MachineInstr &MI : MBB) { - const MCInstrDesc &MCID = MI.getDesc(); - LLVM_DEBUG(dbgs() << "\n>> " << MI << "Regs:"; dumpState()); - - // Debug values are not allowed to change codegen in any way. - if (MI.isDebugValue()) { - MachineInstr *DebugMI = &MI; - MachineOperand &MO = DebugMI->getOperand(0); + UsedInInstr.clear(); - // Ignore DBG_VALUEs that aren't based on virtual registers. These are - // mostly constants and frame indices. - if (!MO.isReg()) - continue; + // Scan for special cases; Apply pre-assigned register defs to state. + bool HasPhysRegUse = false; + bool HasRegMask = false; + bool HasVRegDef = false; + bool HasDef = false; + bool HasEarlyClobber = false; + bool NeedToAssignLiveThroughs = false; + for (MachineOperand &MO : MI.operands()) { + if (MO.isReg()) { unsigned Reg = MO.getReg(); - if (!TargetRegisterInfo::isVirtualRegister(Reg)) - continue; - - // See if this virtual register has already been allocated to a physical - // register or spilled to a stack slot. - LiveRegMap::iterator LRI = findLiveVirtReg(Reg); - if (LRI != LiveVirtRegs.end()) - setPhysReg(*DebugMI, 0, LRI->PhysReg); - else { - int SS = StackSlotForVirtReg[Reg]; - if (SS != -1) { - // Modify DBG_VALUE now that the value is in a spill slot. - updateDbgValueForSpill(*DebugMI, SS); - LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:" - << "\t" << *DebugMI); - continue; + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + if (MO.isDef()) { + HasDef = true; + HasVRegDef = true; + if (MO.isEarlyClobber()) { + HasEarlyClobber = true; + NeedToAssignLiveThroughs = true; + } + if (MO.isTied() || (MO.getSubReg() != 0 && !MO.isUndef())) + NeedToAssignLiveThroughs = true; + } + } else if (TargetRegisterInfo::isPhysicalRegister(Reg)) { + if (!MRI->isReserved(Reg)) { + if (MO.isDef()) { + HasDef = true; + bool displacedAny = definePhysReg(MI, Reg); + if (MO.isEarlyClobber()) + HasEarlyClobber = true; + if (!displacedAny) + MO.setIsDead(true); + } + if (MO.readsReg()) + HasPhysRegUse = true; } - - // We can't allocate a physreg for a DebugValue, sorry! - LLVM_DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE"); - MO.setReg(0); } - - // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so - // that future spills of Reg will have DBG_VALUEs. - LiveDbgValueMap[Reg].push_back(DebugMI); - continue; + } else if (MO.isRegMask()) { + HasRegMask = true; } + } - if (MI.isDebugLabel()) - continue; + // Allocate virtreg defs. + if (HasDef) { + if (HasVRegDef) { + // Special handling for early clobbers: We need to assign them first + // and cannot use any of the registers from pre-assigned uses. + if (NeedToAssignLiveThroughs) { + LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n"); + for (const MachineOperand &MO : MI.operands()) { + if (!MO.isReg() || !MO.readsReg()) + continue; + unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isPhysicalRegister(Reg)) { + LLVM_DEBUG(dbgs() << "mark used: " << printReg(Reg, TRI) << '\n'); + markRegUsedInInstr(Reg); + } + } + // Assign early clobbers and tied registers first. + for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) { + const MachineOperand &MO = MI.getOperand(I); + if (!MO.isReg() || !MO.isDef()) + continue; + if (!MO.isEarlyClobber() && !MO.isTied() && + (MO.getSubReg() == 0 || MO.isUndef())) + continue; + unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) + defineLiveThroughVirtReg(MI, I, Reg); + } + // Reset UsedInInstr to just contain the preassigned defs. + UsedInInstr.clear(); + for (const MachineOperand &MO : MI.operands()) { + if (!MO.isReg() || !MO.isDef()) + continue; + unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + markRegUsedInInstr(Reg); + } + } - // If this is a copy, we may be able to coalesce. - unsigned CopySrcReg = 0; - unsigned CopyDstReg = 0; - unsigned CopySrcSub = 0; - unsigned CopyDstSub = 0; - if (MI.isCopy()) { - CopyDstReg = MI.getOperand(0).getReg(); - CopySrcReg = MI.getOperand(1).getReg(); - CopyDstSub = MI.getOperand(0).getSubReg(); - CopySrcSub = MI.getOperand(1).getSubReg(); + // Assign virtual register defs. + for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) { + MachineOperand &MO = MI.getOperand(I); + if (!MO.isReg() || !MO.isDef()) + continue; + unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) + defineVirtReg(MI, I, Reg); + } } - // Track registers used by instruction. - UsedInInstr.clear(); - - // First scan. - // Mark physreg uses and early clobbers as used. - // Find the end of the virtreg operands - unsigned VirtOpEnd = 0; - bool hasTiedOps = false; - bool hasEarlyClobbers = false; - bool hasPartialRedefs = false; - bool hasPhysDefs = false; - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI.getOperand(i); - // Make sure MRI knows about registers clobbered by regmasks. - if (MO.isRegMask()) { - MRI->addPhysRegsUsedFromRegMask(MO.getRegMask()); + // Free registers occupied by defs. + // Iterate operands in reverse order, so we see the implicit super register + // defs first (we added them earlier in case of ). + for (unsigned I = MI.getNumOperands(); I-- > 0;) { + MachineOperand &MO = MI.getOperand(I); + if (!MO.isReg() || !MO.isDef()) + continue; + // Do not free tied operands and early clobbers. + if (MO.isTied() || MO.isEarlyClobber()) + continue; + // subreg defs don't free the full register. We left the subreg number + // around as a marker in setPhysReg() to recognize this case here. + if (MO.getSubReg() != 0) { + MO.setSubReg(0); continue; } - if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); - if (!Reg) continue; - if (TargetRegisterInfo::isVirtualRegister(Reg)) { - VirtOpEnd = i+1; - if (MO.isUse()) { - hasTiedOps = hasTiedOps || - MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1; - } else { - if (MO.isEarlyClobber()) - hasEarlyClobbers = true; - if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) - hasPartialRedefs = true; - } + if (!Reg) continue; - } - if (!MRI->isAllocatable(Reg)) continue; - if (MO.isUse()) { - usePhysReg(MO); - } else if (MO.isEarlyClobber()) { - definePhysReg(MI, Reg, - (MO.isImplicit() || MO.isDead()) ? regFree : regReserved); - hasEarlyClobbers = true; - } else - hasPhysDefs = true; + assert(TargetRegisterInfo::isPhysicalRegister(Reg)); + if (MRI->isReserved(Reg)) + continue; + freePhysReg(Reg); + unmarkRegUsedInInstr(Reg); } + } - // The instruction may have virtual register operands that must be allocated - // the same register at use-time and def-time: early clobbers and tied - // operands. If there are also physical defs, these registers must avoid - // both physical defs and uses, making them more constrained than normal - // operands. - // Similarly, if there are multiple defs and tied operands, we must make - // sure the same register is allocated to uses and defs. - // We didn't detect inline asm tied operands above, so just make this extra - // pass for all inline asm. - if (MI.isInlineAsm() || hasEarlyClobbers || hasPartialRedefs || - (hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) { - handleThroughOperands(MI, VirtDead); - // Don't attempt coalescing when we have funny stuff going on. - CopyDstReg = 0; - // Pretend we have early clobbers so the use operands get marked below. - // This is not necessary for the common case of a single tied use. - hasEarlyClobbers = true; - } + // Displace clobbered registers. + if (HasRegMask) { + for (const MachineOperand &MO : MI.operands()) { + if (MO.isRegMask()) { + // MRI bookkeeping. + MRI->addPhysRegsUsedFromRegMask(MO.getRegMask()); - // Second scan. - // Allocate virtreg uses. - for (unsigned I = 0; I != VirtOpEnd; ++I) { - const MachineOperand &MO = MI.getOperand(I); - if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); - if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; - if (MO.isUse()) { - LiveRegMap::iterator LRI = reloadVirtReg(MI, I, Reg, CopyDstReg); - MCPhysReg PhysReg = LRI->PhysReg; - CopySrcReg = (CopySrcReg == Reg || CopySrcReg == PhysReg) ? PhysReg : 0; - if (setPhysReg(MI, I, PhysReg)) - killVirtReg(LRI); + // Displace clobbered registers. + const uint32_t *Mask = MO.getRegMask(); + for (LiveRegMap::iterator LRI = LiveVirtRegs.begin(), + LRIE = LiveVirtRegs.end(); LRI != LRIE; ++LRI) { + MCPhysReg PhysReg = LRI->PhysReg; + if (PhysReg != 0 && MachineOperand::clobbersPhysReg(Mask, PhysReg)) + displacePhysReg(MI, PhysReg); + } } } + } - // Track registers defined by instruction - early clobbers and tied uses at - // this point. - UsedInInstr.clear(); - if (hasEarlyClobbers) { - for (const MachineOperand &MO : MI.operands()) { - if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); - if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; - // Look for physreg defs and tied uses. - if (!MO.isDef() && !MO.isTied()) continue; - markRegUsedInInstr(Reg); - } + // Apply pre-assigned register uses to state. + if (HasPhysRegUse) { + for (MachineOperand &MO : MI.operands()) { + if (!MO.isReg() || !MO.readsReg()) + continue; + unsigned Reg = MO.getReg(); + if (!TargetRegisterInfo::isPhysicalRegister(Reg)) + continue; + if (MRI->isReserved(Reg)) + continue; + bool displacedAny = usePhysReg(MI, Reg); + if (!displacedAny && !MRI->isReserved(Reg)) + MO.setIsKill(true); } + } - unsigned DefOpEnd = MI.getNumOperands(); - if (MI.isCall()) { - // Spill all virtregs before a call. This serves one purpose: If an - // exception is thrown, the landing pad is going to expect to find - // registers in their spill slots. - // Note: although this is appealing to just consider all definitions - // as call-clobbered, this is not correct because some of those - // definitions may be used later on and we do not want to reuse - // those for virtual registers in between. - LLVM_DEBUG(dbgs() << " Spilling remaining registers before call.\n"); - spillAll(MI); + // Allocate virtreg uses and insert reloads as necessary. + for (unsigned I = 0; I < MI.getNumOperands(); ++I) { + MachineOperand &MO = MI.getOperand(I); + if (!MO.isReg() || !MO.isUse()) + continue; + unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + if (MO.isUndef()) { + allocVirtRegUndef(MO); + } else { + assert(!MO.isInternalRead() && "Bundles not supported"); + assert(MO.readsReg() && "reading use"); + useVirtReg(MI, I, Reg); + } } + } - // Third scan. - // Allocate defs and collect dead defs. - for (unsigned I = 0; I != DefOpEnd; ++I) { - const MachineOperand &MO = MI.getOperand(I); - if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber()) + // Free early clobbers. + if (HasEarlyClobber) { + for (unsigned I = MI.getNumOperands(); I-- > 0; ) { + MachineOperand &MO = MI.getOperand(I); + if (!MO.isReg() || !MO.isDef() || !MO.isEarlyClobber()) continue; - unsigned Reg = MO.getReg(); - - if (TargetRegisterInfo::isPhysicalRegister(Reg)) { - if (!MRI->isAllocatable(Reg)) continue; - definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved); + // subreg defs don't free the full register. We left the subreg number + // around as a marker in setPhysReg() to recognize this case here. + if (MO.getSubReg() != 0) { + MO.setSubReg(0); continue; } - LiveRegMap::iterator LRI = defineVirtReg(MI, I, Reg, CopySrcReg); - MCPhysReg PhysReg = LRI->PhysReg; - if (setPhysReg(MI, I, PhysReg)) { - VirtDead.push_back(Reg); - CopyDstReg = 0; // cancel coalescing; - } else - CopyDstReg = (CopyDstReg == Reg || CopyDstReg == PhysReg) ? PhysReg : 0; + + unsigned Reg = MO.getReg(); + if (!Reg) + continue; + assert(TargetRegisterInfo::isPhysicalRegister(Reg) && + "should have register assigned"); + + // We sometimes get odd situations like: + // early-clobber %x0 = INSTRUCTION %x0 + // which is semantically questionable as the early-clobber should + // apply before the use. But in practice we consider the use to + // happen before the early clobber now. Don't free the early clobber + // register in this case. + if (MI.readsRegister(Reg, TRI)) + continue; + + freePhysReg(Reg); } + } - // Kill dead defs after the scan to ensure that multiple defs of the same - // register are allocated identically. We didn't need to do this for uses - // because we are crerating our own kill flags, and they are always at the - // last use. - for (unsigned VirtReg : VirtDead) - killVirtReg(VirtReg); - VirtDead.clear(); - - if (CopyDstReg && CopyDstReg == CopySrcReg && CopyDstSub == CopySrcSub) { - LLVM_DEBUG(dbgs() << "-- coalescing: " << MI); - Coalesced.push_back(&MI); - } else { - LLVM_DEBUG(dbgs() << "<< " << MI); + LLVM_DEBUG(dbgs() << "<< " << MI); + if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() && + MI.getNumOperands() == 2) { + LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n"); + Coalesced.push_back(&MI); + ++NumCoalesced; + } +} + +void RegAllocFast::handleDebugValue(MachineInstr &MI) { + MachineOperand &MO = MI.getOperand(0); + + // Ignore DBG_VALUEs that aren't based on virtual registers. These are + // mostly constants and frame indices. + if (!MO.isReg()) + return; + unsigned Reg = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(Reg)) + return; + + // Already spilled to a stackslot? + int SS = StackSlotForVirtReg[Reg]; + if (SS != -1) { + // Modify DBG_VALUE now that the value is in a spill slot. + updateDbgValueForSpill(MI, SS); + LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI); + return; + } + + // See if this virtual register has already been allocated to a physical + // register or spilled to a stack slot. + LiveRegMap::iterator LRI = findLiveVirtReg(Reg); + if (LRI != LiveVirtRegs.end() && LRI->PhysReg != 0) { + setPhysReg(MI, MO, LRI->PhysReg); + } else { + DanglingDbgValues[Reg].push_back(&MI); + } + + // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so + // that future spills of Reg will have DBG_VALUEs. + LiveDbgValueMap[Reg].push_back(&MI); +} + +void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) { + this->MBB = &MBB; + LLVM_DEBUG(dbgs() << "\nAllocating " << MBB); + + RegUnitState.assign(TRI->getNumRegUnits(), regFree); + assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?"); + + Coalesced.clear(); + + // Traverse block in reverse order allocating instructions one by one. + for (MachineInstr &MI : make_range(MBB.rbegin(), MBB.rend())) { + LLVM_DEBUG( + dbgs() << "\n>> " << MI << "Regs:"; + dumpState() + ); + + // Special handling for debug values. Note that they are not allowed to + // affect codegen of the other instructions in any way. + if (MI.isDebugValue()) { + handleDebugValue(MI); + continue; } + + allocateInstruction(MI); } + LLVM_DEBUG( + dbgs() << "Begin Regs:"; + dumpState() + ); + // Spill all physical registers holding virtual registers now. - LLVM_DEBUG(dbgs() << "Spilling live registers at end of block.\n"); - spillAll(MBB.getFirstTerminator()); + LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n"); + reloadAtBegin(MBB); + + LiveVirtRegs.clear(); // Erase all the coalesced copies. We are delaying it until now because // LiveVirtRegs might refer to the instrs. for (MachineInstr *MI : Coalesced) MBB.erase(MI); - NumCopies += Coalesced.size(); + + for (auto &UDBGPair : DanglingDbgValues) { + for (MachineInstr *DbgValue : UDBGPair.second) { + assert(DbgValue->isDebugValue() && "expected DBG_VALUE"); + MachineOperand &MO = DbgValue->getOperand(0); + // Nothing to do if the vreg was spilled in the meantime. + if (!MO.isReg()) + continue; + LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue + << '\n'); + MO.setReg(0); + } + } + DanglingDbgValues.clear(); LLVM_DEBUG(MBB.dump()); } -/// Allocates registers for a function. bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) { LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n" << "********** Function: " << MF.getName() << '\n'); @@ -1103,6 +1214,8 @@ unsigned NumVirtRegs = MRI->getNumVirtRegs(); StackSlotForVirtReg.resize(NumVirtRegs); LiveVirtRegs.setUniverse(NumVirtRegs); + MayLiveAccrossBlocks.clear(); + MayLiveAccrossBlocks.resize(NumVirtRegs); // Loop over all of the basic blocks, eliminating virtual register references for (MachineBasicBlock &MBB : MF)