Index: llvm/include/llvm/CodeGen/MachineScheduler.h =================================================================== --- llvm/include/llvm/CodeGen/MachineScheduler.h +++ llvm/include/llvm/CodeGen/MachineScheduler.h @@ -597,32 +597,11 @@ void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); }; -/// Each Scheduling boundary is associated with ready queues. It tracks the -/// current cycle in the direction of movement, and maintains the state -/// of "hazards" and other interlocks at the current cycle. -class SchedBoundary { -public: - /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) - enum { - TopQID = 1, - BotQID = 2, - LogMaxQID = 2 - }; - - ScheduleDAGMI *DAG = nullptr; - const TargetSchedModel *SchedModel = nullptr; - SchedRemainder *Rem = nullptr; - - ReadyQueue Available; - ReadyQueue Pending; - - ScheduleHazardRecognizer *HazardRec = nullptr; - +/// Each SchedState is associated with SchedBoundary. It tracks the resource +/// state in the direction of movement. +class SchedState { + friend class SchedBoundary; private: - /// True if the pending Q should be checked/updated before scheduling another - /// instruction. - bool CheckPending; - /// Number of cycles it takes to issue the instructions scheduled in this /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls. /// See getStalls(). @@ -675,7 +654,52 @@ // times we should retry the pending queue because of a hazard. unsigned MaxObservedStall; #endif +public: + SchedState() : CurrCycle(0), CurrMOps(0), + MinReadyCycle(std::numeric_limits::max()), ExpectedLatency(0), + DependentLatency(0), RetiredMOps(0), MaxExecutedResCount(0), + ZoneCritResIdx(0), IsResourceLimited(false) + #ifndef NDEBUG + // Track the maximum number of stall cycles that could arise either from the + // latency of a DAG edge or the number of cycles that a processor resource + // is reserved (SchedState::ReservedCycles). + , MaxObservedStall(0) + #endif + { + // Reserve a zero-count for invalid CritResIdx. + ExecutedResCounts.resize(1); + assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); + } +}; + +/// Each Scheduling boundary is associated with ready queues. It tracks the +/// current cycle in the direction of movement, and maintains the state +/// of "hazards" and other interlocks at the current cycle. +class SchedBoundary { +public: + /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) + enum { + TopQID = 1, + BotQID = 2, + LogMaxQID = 2 + }; + + ScheduleDAGMI *DAG = nullptr; + const TargetSchedModel *SchedModel = nullptr; + SchedRemainder *Rem = nullptr; + ReadyQueue Available; + ReadyQueue Pending; + + ScheduleHazardRecognizer *HazardRec = nullptr; + +private: + /// True if the pending Q should be checked/updated before scheduling another + /// instruction. + bool CheckPending; + + /// Scheduling resource state. + SchedState State; public: /// Pending queues extend the ready queues with the same ID and the /// PendingFlag set. @@ -696,19 +720,19 @@ } /// Number of cycles to issue the instructions scheduled in this zone. - unsigned getCurrCycle() const { return CurrCycle; } + unsigned getCurrCycle() const { return State.CurrCycle; } /// Micro-ops issued in the current cycle - unsigned getCurrMOps() const { return CurrMOps; } + unsigned getCurrMOps() const { return State.CurrMOps; } // The latency of dependence chains leading into this zone. - unsigned getDependentLatency() const { return DependentLatency; } + unsigned getDependentLatency() const { return State.DependentLatency; } /// Get the number of latency cycles "covered" by the scheduled /// instructions. This is the larger of the critical path within the zone /// and the number of cycles required to issue the instructions. unsigned getScheduledLatency() const { - return std::max(ExpectedLatency, CurrCycle); + return std::max(State.ExpectedLatency, State.CurrCycle); } unsigned getUnscheduledLatency(SUnit *SU) const { @@ -716,29 +740,29 @@ } unsigned getResourceCount(unsigned ResIdx) const { - return ExecutedResCounts[ResIdx]; + return State.ExecutedResCounts[ResIdx]; } /// Get the scaled count of scheduled micro-ops and resources, including /// executed resources. unsigned getCriticalCount() const { - if (!ZoneCritResIdx) - return RetiredMOps * SchedModel->getMicroOpFactor(); - return getResourceCount(ZoneCritResIdx); + if (!State.ZoneCritResIdx) + return State.RetiredMOps * SchedModel->getMicroOpFactor(); + return getResourceCount(State.ZoneCritResIdx); } /// Get a scaled count for the minimum execution time of the scheduled /// micro-ops that are ready to execute by getExecutedCount. Notice the /// feedback loop. unsigned getExecutedCount() const { - return std::max(CurrCycle * SchedModel->getLatencyFactor(), - MaxExecutedResCount); + return std::max(State.CurrCycle * SchedModel->getLatencyFactor(), + State.MaxExecutedResCount); } - unsigned getZoneCritResIdx() const { return ZoneCritResIdx; } + unsigned getZoneCritResIdx() const { return State.ZoneCritResIdx; } // Is the scheduled region resource limited vs. latency limited. - bool isResourceLimited() const { return IsResourceLimited; } + bool isResourceLimited() const { return State.IsResourceLimited; } /// Get the difference between the given SUnit's ready time and the current /// cycle. @@ -1009,9 +1033,11 @@ class PostGenericScheduler : public GenericSchedulerBase { protected: ScheduleDAGMI *DAG; - SchedBoundary Top; SmallVector BotRoots; +protected: + SchedBoundary Top; + public: PostGenericScheduler(const MachineSchedContext *C): GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {} Index: llvm/lib/CodeGen/MachineScheduler.cpp =================================================================== --- llvm/lib/CodeGen/MachineScheduler.cpp +++ llvm/lib/CodeGen/MachineScheduler.cpp @@ -1859,26 +1859,8 @@ Available.clear(); Pending.clear(); CheckPending = false; - CurrCycle = 0; - CurrMOps = 0; - MinReadyCycle = std::numeric_limits::max(); - ExpectedLatency = 0; - DependentLatency = 0; - RetiredMOps = 0; - MaxExecutedResCount = 0; - ZoneCritResIdx = 0; - IsResourceLimited = false; - ReservedCycles.clear(); - ReservedCyclesIndex.clear(); -#ifndef NDEBUG - // Track the maximum number of stall cycles that could arise either from the - // latency of a DAG edge or the number of cycles that a processor resource is - // reserved (SchedBoundary::ReservedCycles). - MaxObservedStall = 0; -#endif - // Reserve a zero-count for invalid CritResIdx. - ExecutedResCounts.resize(1); - assert(!ExecutedResCounts[0] && "nonzero count for bad resource"); + + State = SchedState(); } void SchedRemainder:: @@ -1909,16 +1891,16 @@ Rem = rem; if (SchedModel->hasInstrSchedModel()) { unsigned ResourceCount = SchedModel->getNumProcResourceKinds(); - ReservedCyclesIndex.resize(ResourceCount); - ExecutedResCounts.resize(ResourceCount); + State.ReservedCyclesIndex.resize(ResourceCount); + State.ExecutedResCounts.resize(ResourceCount); unsigned NumUnits = 0; for (unsigned i = 0; i < ResourceCount; ++i) { - ReservedCyclesIndex[i] = NumUnits; + State.ReservedCyclesIndex[i] = NumUnits; NumUnits += SchedModel->getProcResource(i)->NumUnits; } - ReservedCycles.resize(NumUnits, InvalidCycle); + State.ReservedCycles.resize(NumUnits, InvalidCycle); } } @@ -1934,8 +1916,8 @@ return 0; unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); - if (ReadyCycle > CurrCycle) - return ReadyCycle - CurrCycle; + if (ReadyCycle > State.CurrCycle) + return ReadyCycle - State.CurrCycle; return 0; } @@ -1943,7 +1925,7 @@ /// can be scheduled. unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx, unsigned Cycles) { - unsigned NextUnreserved = ReservedCycles[InstanceIdx]; + unsigned NextUnreserved = State.ReservedCycles[InstanceIdx]; // If this resource has never been used, always return cycle zero. if (NextUnreserved == InvalidCycle) return 0; @@ -1960,7 +1942,7 @@ SchedBoundary::getNextResourceCycle(unsigned PIdx, unsigned Cycles) { unsigned MinNextUnreserved = InvalidCycle; unsigned InstanceIdx = 0; - unsigned StartIndex = ReservedCyclesIndex[PIdx]; + unsigned StartIndex = State.ReservedCyclesIndex[PIdx]; unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits; assert(NumberOfInstances > 0 && "Cannot have zero instances of a ProcResource"); @@ -1996,13 +1978,14 @@ } unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); - if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { + if ((State.CurrMOps > 0) && + (State.CurrMOps + uops > SchedModel->getIssueWidth())) { LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); return true; } - if (CurrMOps > 0 && + if (State.CurrMOps > 0 && ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " @@ -2019,14 +2002,14 @@ unsigned Cycles = PE.Cycles; unsigned NRCycle, InstanceIdx; std::tie(NRCycle, InstanceIdx) = getNextResourceCycle(ResIdx, Cycles); - if (NRCycle > CurrCycle) { + if (NRCycle > State.CurrCycle) { #ifndef NDEBUG - MaxObservedStall = std::max(Cycles, MaxObservedStall); + State.MaxObservedStall = std::max(Cycles, State.MaxObservedStall); #endif LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") " - << SchedModel->getResourceName(ResIdx) - << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx] << ']' - << "=" << NRCycle << "c\n"); + << SchedModel->getResourceName(ResIdx) << '[' + << InstanceIdx - State.ReservedCyclesIndex[ResIdx] + << ']' << "=" << NRCycle << "c\n"); return true; } } @@ -2063,7 +2046,7 @@ return 0; unsigned OtherCritCount = Rem->RemIssueCount - + (RetiredMOps * SchedModel->getMicroOpFactor()); + + (State.RetiredMOps * SchedModel->getMicroOpFactor()); LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: " << OtherCritCount / SchedModel->getMicroOpFactor() << '\n'); for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds(); @@ -2090,17 +2073,18 @@ // ReadyCycle was been bumped up to the CurrCycle when this node was // scheduled, but CurrCycle may have been eagerly advanced immediately after // scheduling, so may now be greater than ReadyCycle. - if (ReadyCycle > CurrCycle) - MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall); + if (ReadyCycle > State.CurrCycle) + State.MaxObservedStall = + std::max(ReadyCycle - State.CurrCycle, State.MaxObservedStall); #endif - if (ReadyCycle < MinReadyCycle) - MinReadyCycle = ReadyCycle; + if (ReadyCycle < State.MinReadyCycle) + State.MinReadyCycle = ReadyCycle; // Check for interlocks first. For the purpose of other heuristics, an // instruction that cannot issue appears as if it's not in the ReadyQueue. bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0; - if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) || + if ((!IsBuffered && ReadyCycle > State.CurrCycle) || checkHazard(SU) || Available.size() >= ReadyListLimit) Pending.push(SU); else @@ -2110,27 +2094,28 @@ /// Move the boundary of scheduled code by one cycle. void SchedBoundary::bumpCycle(unsigned NextCycle) { if (SchedModel->getMicroOpBufferSize() == 0) { - assert(MinReadyCycle < std::numeric_limits::max() && + assert(State.MinReadyCycle < std::numeric_limits::max() && "MinReadyCycle uninitialized"); - if (MinReadyCycle > NextCycle) - NextCycle = MinReadyCycle; + if (State.MinReadyCycle > NextCycle) + NextCycle = State.MinReadyCycle; } // Update the current micro-ops, which will issue in the next cycle. - unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle); - CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps; + unsigned DecMOps = SchedModel->getIssueWidth() * + (NextCycle - State.CurrCycle); + State.CurrMOps = (State.CurrMOps <= DecMOps) ? 0 : State.CurrMOps - DecMOps; // Decrement DependentLatency based on the next cycle. - if ((NextCycle - CurrCycle) > DependentLatency) - DependentLatency = 0; + if ((NextCycle - State.CurrCycle) > State.DependentLatency) + State.DependentLatency = 0; else - DependentLatency -= (NextCycle - CurrCycle); + State.DependentLatency -= (NextCycle - State.CurrCycle); if (!HazardRec->isEnabled()) { // Bypass HazardRec virtual calls. - CurrCycle = NextCycle; + State.CurrCycle = NextCycle; } else { // Bypass getHazardType calls in case of long latency. - for (; CurrCycle != NextCycle; ++CurrCycle) { + for (; State.CurrCycle != NextCycle; ++State.CurrCycle) { if (isTop()) HazardRec->AdvanceCycle(); else @@ -2138,18 +2123,18 @@ } } CheckPending = true; - IsResourceLimited = + State.IsResourceLimited = checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), getScheduledLatency(), true); - LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() - << '\n'); + LLVM_DEBUG(dbgs() << "Cycle: " << State.CurrCycle << ' ' + << Available.getName() << '\n'); } void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) { - ExecutedResCounts[PIdx] += Count; - if (ExecutedResCounts[PIdx] > MaxExecutedResCount) - MaxExecutedResCount = ExecutedResCounts[PIdx]; + State.ExecutedResCounts[PIdx] += Count; + if (State.ExecutedResCounts[PIdx] > State.MaxExecutedResCount) + State.MaxExecutedResCount = State.ExecutedResCounts[PIdx]; } /// Add the given processor resource to this scheduled zone. @@ -2173,8 +2158,9 @@ // Check if this resource exceeds the current critical resource. If so, it // becomes the critical resource. - if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) { - ZoneCritResIdx = PIdx; + if (State.ZoneCritResIdx != PIdx && + (getResourceCount(PIdx) > getCriticalCount())) { + State.ZoneCritResIdx = PIdx; LLVM_DEBUG(dbgs() << " *** Critical resource " << SchedModel->getResourceName(PIdx) << ": " << getResourceCount(PIdx) / SchedModel->getLatencyFactor() @@ -2183,10 +2169,10 @@ // For reserved resources, record the highest cycle using the resource. unsigned NextAvailable, InstanceIdx; std::tie(NextAvailable, InstanceIdx) = getNextResourceCycle(PIdx, Cycles); - if (NextAvailable > CurrCycle) { + if (NextAvailable > State.CurrCycle) { LLVM_DEBUG(dbgs() << " Resource conflict: " - << SchedModel->getResourceName(PIdx) - << '[' << InstanceIdx - ReservedCyclesIndex[PIdx] << ']' + << SchedModel->getResourceName(PIdx) << '[' + << InstanceIdx - State.ReservedCyclesIndex[PIdx] << ']' << " reserved until @" << NextAvailable << "\n"); } return NextAvailable; @@ -2209,17 +2195,17 @@ // exceed the issue width. const MCSchedClassDesc *SC = DAG->getSchedClass(SU); unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); - assert( - (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && - "Cannot schedule this instruction's MicroOps in the current cycle."); + assert((State.CurrMOps == 0 || + (State.CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && + "Cannot schedule this instruction's MicroOps in the current cycle."); unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); - unsigned NextCycle = CurrCycle; + unsigned NextCycle = State.CurrCycle; switch (SchedModel->getMicroOpBufferSize()) { case 0: - assert(ReadyCycle <= CurrCycle && "Broken PendingQueue"); + assert(ReadyCycle <= State.CurrCycle && "Broken PendingQueue"); break; case 1: if (ReadyCycle > NextCycle) { @@ -2236,23 +2222,23 @@ NextCycle = ReadyCycle; break; } - RetiredMOps += IncMOps; + State.RetiredMOps += IncMOps; // Update resource counts and critical resource. if (SchedModel->hasInstrSchedModel()) { unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor(); assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted"); Rem->RemIssueCount -= DecRemIssue; - if (ZoneCritResIdx) { + if (State.ZoneCritResIdx) { // Scale scheduled micro-ops for comparing with the critical resource. unsigned ScaledMOps = - RetiredMOps * SchedModel->getMicroOpFactor(); + State.RetiredMOps * SchedModel->getMicroOpFactor(); // If scaled micro-ops are now more than the previous critical resource by // a full cycle, then micro-ops issue becomes critical. - if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx)) + if ((int)(ScaledMOps - getResourceCount(State.ZoneCritResIdx)) >= (int)SchedModel->getLatencyFactor()) { - ZoneCritResIdx = 0; + State.ZoneCritResIdx = 0; LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: " << ScaledMOps / SchedModel->getLatencyFactor() << "c\n"); @@ -2279,17 +2265,19 @@ unsigned ReservedUntil, InstanceIdx; std::tie(ReservedUntil, InstanceIdx) = getNextResourceCycle(PIdx, 0); if (isTop()) { - ReservedCycles[InstanceIdx] = + State.ReservedCycles[InstanceIdx] = std::max(ReservedUntil, NextCycle + PI->Cycles); } else - ReservedCycles[InstanceIdx] = NextCycle; + State.ReservedCycles[InstanceIdx] = NextCycle; } } } } // Update ExpectedLatency and DependentLatency. - unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency; - unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency; + unsigned &TopLatency = isTop() ? + State.ExpectedLatency : State.DependentLatency; + unsigned &BotLatency = isTop() ? + State.DependentLatency : State.ExpectedLatency; if (SU->getDepth() > TopLatency) { TopLatency = SU->getDepth(); LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU(" @@ -2301,12 +2289,12 @@ << SU->NodeNum << ") " << BotLatency << "c\n"); } // If we stall for any reason, bump the cycle. - if (NextCycle > CurrCycle) + if (NextCycle > State.CurrCycle) bumpCycle(NextCycle); else // After updating ZoneCritResIdx and ExpectedLatency, check if we're // resource limited. If a stall occurred, bumpCycle does this. - IsResourceLimited = + State.IsResourceLimited = checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(), getScheduledLatency(), true); @@ -2314,7 +2302,7 @@ // resets CurrMOps. Loop to handle instructions with more MOps than issue in // one cycle. Since we commonly reach the max MOps here, opportunistically // bump the cycle to avoid uselessly checking everything in the readyQ. - CurrMOps += IncMOps; + State.CurrMOps += IncMOps; // Bump the cycle count for issue group constraints. // This must be done after NextCycle has been adjust for all other stalls. @@ -2327,9 +2315,9 @@ bumpCycle(++NextCycle); } - while (CurrMOps >= SchedModel->getIssueWidth()) { - LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle " - << CurrCycle << '\n'); + while (State.CurrMOps >= SchedModel->getIssueWidth()) { + LLVM_DEBUG(dbgs() << " *** Max MOps " << State.CurrMOps << " at cycle " + << State.CurrCycle << '\n'); bumpCycle(++NextCycle); } LLVM_DEBUG(dumpScheduledState()); @@ -2340,7 +2328,7 @@ void SchedBoundary::releasePending() { // If the available queue is empty, it is safe to reset MinReadyCycle. if (Available.empty()) - MinReadyCycle = std::numeric_limits::max(); + State.MinReadyCycle = std::numeric_limits::max(); // Check to see if any of the pending instructions are ready to issue. If // so, add them to the available queue. @@ -2349,10 +2337,10 @@ SUnit *SU = *(Pending.begin()+i); unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; - if (ReadyCycle < MinReadyCycle) - MinReadyCycle = ReadyCycle; + if (ReadyCycle < State.MinReadyCycle) + State.MinReadyCycle = ReadyCycle; - if (!IsBuffered && ReadyCycle > CurrCycle) + if (!IsBuffered && ReadyCycle > State.CurrCycle) continue; if (checkHazard(SU)) @@ -2385,7 +2373,7 @@ if (CheckPending) releasePending(); - if (CurrMOps > 0) { + if (State.CurrMOps > 0) { // Defer any ready instrs that now have a hazard. for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { if (checkHazard(*I)) { @@ -2401,7 +2389,7 @@ // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) && // "permanent hazard"); (void)i; - bumpCycle(CurrCycle + 1); + bumpCycle(State.CurrCycle + 1); releasePending(); } @@ -2419,22 +2407,22 @@ LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const { unsigned ResFactor; unsigned ResCount; - if (ZoneCritResIdx) { - ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx); - ResCount = getResourceCount(ZoneCritResIdx); + if (State.ZoneCritResIdx) { + ResFactor = SchedModel->getResourceFactor(State.ZoneCritResIdx); + ResCount = getResourceCount(State.ZoneCritResIdx); } else { ResFactor = SchedModel->getMicroOpFactor(); - ResCount = RetiredMOps * ResFactor; + ResCount = State.RetiredMOps * ResFactor; } unsigned LFactor = SchedModel->getLatencyFactor(); - dbgs() << Available.getName() << " @" << CurrCycle << "c\n" - << " Retired: " << RetiredMOps; + dbgs() << Available.getName() << " @" << State.CurrCycle << "c\n" + << " Retired: " << State.RetiredMOps; dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c"; dbgs() << "\n Critical: " << ResCount / LFactor << "c, " << ResCount / ResFactor << " " - << SchedModel->getResourceName(ZoneCritResIdx) - << "\n ExpectedLatency: " << ExpectedLatency << "c\n" - << (IsResourceLimited ? " - Resource" : " - Latency") + << SchedModel->getResourceName(State.ZoneCritResIdx) + << "\n ExpectedLatency: " << State.ExpectedLatency << "c\n" + << (State.IsResourceLimited ? " - Resource" : " - Latency") << " limited.\n"; } #endif