Index: include/llvm/CodeGen/MachineScheduler.h =================================================================== --- include/llvm/CodeGen/MachineScheduler.h +++ include/llvm/CodeGen/MachineScheduler.h @@ -618,6 +618,7 @@ LogMaxQID = 2 }; + bool IsPostRA; ScheduleDAGMI *DAG = nullptr; const TargetSchedModel *SchedModel = nullptr; SchedRemainder *Rem = nullptr; Index: lib/CodeGen/MachineScheduler.cpp =================================================================== --- lib/CodeGen/MachineScheduler.cpp +++ lib/CodeGen/MachineScheduler.cpp @@ -1881,6 +1881,7 @@ init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { reset(); DAG = dag; + IsPostRA = (DAG->MF.getRegInfo().getNumVirtRegs() == 0); SchedModel = smodel; Rem = rem; if (SchedModel->hasInstrSchedModel()) { @@ -1931,27 +1932,27 @@ /// simple counters that the scheduler itself maintains. It explicitly checks /// for instruction dispatch limitations, including the number of micro-ops that /// can dispatch per cycle. -/// -/// TODO: Also check whether the SU must start a new group. bool SchedBoundary::checkHazard(SUnit *SU) { if (HazardRec->isEnabled() && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) { return true; } - unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); - if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { - LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" - << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); - return true; - } + if (IsPostRA) { + unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); + if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) { + LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" + << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); + return true; + } - if (CurrMOps > 0 && - ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || - (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { - LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " - << (isTop() ? "begin" : "end") << " group\n"); - return true; + if (CurrMOps > 0 && + ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) || + (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) { + LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must " + << (isTop() ? "begin" : "end") << " group\n"); + return true; + } } if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) { @@ -2147,9 +2148,9 @@ // exceed the issue width. const MCSchedClassDesc *SC = DAG->getSchedClass(SU); unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr()); - assert( - (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) && - "Cannot schedule this instruction's MicroOps in the current cycle."); + assert((!IsPostRA || (CurrMOps == 0 || + (CurrMOps + IncMOps) <= SchedModel->getIssueWidth())) && + "Cannot schedule this instruction's MicroOps in the current cycle."); unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle); LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n"); @@ -2257,11 +2258,12 @@ // This must be done after NextCycle has been adjust for all other stalls. // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set // currCycle to X. - if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) || - (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) { - LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin") - << " group\n"); - bumpCycle(++NextCycle); + if (IsPostRA && + ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) || + (!isTop() && SchedModel->mustBeginGroup(SU->getInstr())))) { + LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin") + << " group\n"); + bumpCycle(++NextCycle); } while (CurrMOps >= SchedModel->getIssueWidth()) {