diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp @@ -27,7 +27,7 @@ using namespace llvm; -#define DEBUG_TYPE "machine-scheduler" +#define DEBUG_TYPE "igrouplp" namespace { @@ -61,6 +61,22 @@ cl::desc("The maximum number of instructions to include " "in lds/gds write group.")); +static cl::opt EnableExactSolver( + "amdgpu-igrouplp-exact-solver", cl::Hidden, + cl::desc("Whether to use the exponential time solver to fit " + "the instructions to the pipeline as closely as " + "possible."), + cl::init(false)); + +static cl::opt> CutoffForExact( + "amdgpu-igrouplp-exact-solver-cutoff", cl::init(None), cl::Hidden, + cl::desc("The maximum number of scheduling group conflicts " + "which we attempt to solve with the exponential time " + "exact solver. Problem sizes greater than this will" + "be solved by the less accurate greedy algorithm. Selecting " + "solver by size is superseded by manually selecting " + "the solver (e.g. by amdgpu-igrouplp-exact-solver")); + // Components of the mask that determines which instruction types may be may be // classified into a SchedGroup. enum class SchedGroupMask { @@ -97,8 +113,8 @@ // SyncID. int SyncID = 0; - // Collection of SUnits that are classified as members of this group. - SmallVector Collection; + // SGID is used to map instructions to candidate SchedGroups + int SGID; ScheduleDAGInstrs *DAG; @@ -111,29 +127,26 @@ // SchedGroup object. bool canAddMI(const MachineInstr &MI) const; +public: + // Collection of SUnits that are classified as members of this group. + SmallVector Collection; + // Returns true if SU can be added to this SchedGroup. bool canAddSU(SUnit &SU) const; - // Returns true if no more instructions may be added to this group. - bool isFull() const; - - // Add SU to the SchedGroup. - void add(SUnit &SU) { - LLVM_DEBUG(dbgs() << "For SchedGroup with mask " - << format_hex((int)SGMask, 10, true) << " adding " - << *SU.getInstr()); - Collection.push_back(&SU); - } - -public: // Add DAG dependencies from all SUnits in this SchedGroup and this SU. If // MakePred is true, SU will be a predecessor of the SUnits in this // SchedGroup, otherwise SU will be a successor. void link(SUnit &SU, bool MakePred = false); - // Add DAG dependencies from all SUnits in this SchedGroup and this SU. Use - // the predicate to determine whether SU should be a predecessor (P = true) - // or a successor (P = false) of this SchedGroup. + // Add DAG dependencies and track which edges are added, and the count of + // missed edges + int link(SUnit &SU, bool MakePred, + std::vector> &AddedEdges); + + // Add DAG dependencies from all SUnits in this SchedGroup and this SU. + // Use the predicate to determine whether SU should be a predecessor (P = + // true) or a successor (P = false) of this SchedGroup. void link(SUnit &SU, function_ref P); // Add DAG dependencies such that SUnits in this group shall be ordered @@ -141,30 +154,467 @@ void link(SchedGroup &OtherGroup); // Returns true if no more instructions may be added to this group. - bool isFull() { return MaxSize && Collection.size() >= *MaxSize; } + bool isFull() const { return MaxSize && Collection.size() >= *MaxSize; } + + // Add SU to the SchedGroup. + void add(SUnit &SU) { + LLVM_DEBUG(dbgs() << "For SchedGroup with mask " + << format_hex((int)SGMask, 10, true) << " adding " + << *SU.getInstr()); + Collection.push_back(&SU); + } + + // Remove last element in the SchedGroup + void pop() { Collection.pop_back(); } // Identify and add all relevant SUs from the DAG to this SchedGroup. void initSchedGroup(); // Add instructions to the SchedGroup bottom up starting from RIter. - // ConflictedInstrs is a set of instructions that should not be added to the + // PipelineInstrs is a set of instructions that should not be added to the // SchedGroup even when the other conditions for adding it are satisfied. // RIter will be added to the SchedGroup as well, and dependencies will be // added so that RIter will always be scheduled at the end of the group. void initSchedGroup(std::vector::reverse_iterator RIter, - DenseSet &ConflictedInstrs); + DenseMap> &SyncedInstrs); int getSyncID() { return SyncID; } + int getSGID() { return SGID; } + + SchedGroupMask getMask() { return SGMask; } + SchedGroup(SchedGroupMask SGMask, Optional MaxSize, ScheduleDAGInstrs *DAG, const SIInstrInfo *TII) : SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) {} SchedGroup(SchedGroupMask SGMask, Optional MaxSize, int SyncID, - ScheduleDAGInstrs *DAG, const SIInstrInfo *TII) - : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) {} + int SGID, ScheduleDAGInstrs *DAG, const SIInstrInfo *TII) + : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), SGID(SGID), DAG(DAG), + TII(TII) {} +}; + +// Remove all existing edges from a SCHED_BARRIER or SCHED_GROUP_BARRIER. +static void resetEdges(SUnit &SU, ScheduleDAGInstrs *DAG) { + assert(SU.getInstr()->getOpcode() == AMDGPU::SCHED_BARRIER || + SU.getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER); + + while (!SU.Preds.empty()) + for (auto &P : SU.Preds) + SU.removePred(P); + + while (!SU.Succs.empty()) + for (auto &S : SU.Succs) + for (auto &SP : S.getSUnit()->Preds) + if (SP.getSUnit() == &SU) + S.getSUnit()->removePred(SP); +} + +// The PipelineSolver is used to assign SUnits to SchedGroups in a pipeline +// in non-trivial cases. For example, if the requested pipeline is +// {VMEM_READ, VALU, MFMA, VMEM_READ} and we encounter a VMEM_READ instruction +// in the DAG, then we will have an instruction that can not be trivially +// assigned to a SchedGroup. The PipelineSolver class implements two algorithms +// to find a good solution to the pipeline -- a greedy algorithm and an exact +// algorithm. The exact algorithm has an exponential time complexity and should +// only be used for small sized problems or medium sized problems where an exact +// solution is highly desired. + +class PipelineSolver { + ScheduleDAGMI *DAG; + + // Instructions that can be assigned to multiple SchedGroups + DenseMap>> SyncedInstrs; + SmallVector>, 4>, 4> + PipelineInstrs; + DenseMap> SyncedSchedGroups; + // The current working pipeline + SmallVector, 4> CurrPipeline; + // The pipeline that has the best solution found so far + SmallVector, 4> BestPipeline; + + // Compute an estimate of the size of search tree -- the true size is + // the product of each conflictedInst.Matches.size() across all SyncPipelines + unsigned computeProblemSize(); + + // The cost penalty of not assigning a SU to a SchedGroup + int MissPenalty = 0; + + // Costs in terms of the number of edges we are unable to add + int BestCost = -1; + int CurrCost = 0; + + // Index pointing to the conflicting instruction that is currently being + // fitted + int CurrConflInstNo = 0; + // Index to the pipeline that is currently being fitted + int CurrSyncGroupIdx = 0; + // The first non trivial pipeline + int BeginSyncGroupIdx = 0; + + // Update indices to fit next conflicting instruction + void advancePosition(); + // Recede indices to attempt to find better fit for previous conflicting + // instruction + void retreatPosition(); + + // The exponential time algorithm which finds the provably best fit + bool solveExact(); + // The polynomial time algorithm which attempts to find a good fit + bool solveGreedy(); + // Whether or not the current solution is optimal + bool checkOptimal(); + // Add edges corresponding to the SchedGroups as assigned by solver + void makePipeline(); + // Add the edges from the SU to the other SchedGroups in pipeline, and + // return the number of edges missed. + int addEdges(SmallVectorImpl &SyncPipeline, SUnit *SU, int SGID, + std::vector> &AddedEdges); + // Remove the edges passed via AddedEdges + void removeEdges(const std::vector> &AddedEdges); + // Convert the passed in maps to arrays for bidirectional iterators + void convertSyncMapsToArrays(); + +public: + // Invoke the solver to map instructions to instruction groups. Heuristic && + // command-line-option determines to use exact or greedy algorithm. + void solve(); + + PipelineSolver( + DenseMap> &SyncedSchedGroups, + DenseMap>> &SyncedInstrs, + ScheduleDAGMI *DAG) + : DAG(DAG), SyncedInstrs(SyncedInstrs), + SyncedSchedGroups(SyncedSchedGroups) { + + convertSyncMapsToArrays(); + + CurrPipeline = BestPipeline; + while (static_cast(BeginSyncGroupIdx) < PipelineInstrs.size() && + PipelineInstrs[BeginSyncGroupIdx].size() == 0) + ++BeginSyncGroupIdx; + + if (static_cast(BeginSyncGroupIdx) >= PipelineInstrs.size()) + return; + + CurrSyncGroupIdx = BeginSyncGroupIdx; + } }; +void PipelineSolver::convertSyncMapsToArrays() { + for (auto &SyncPipe : SyncedSchedGroups) { + BestPipeline.insert(BestPipeline.begin(), SyncPipe.second); + } + + int PipelineIDx = SyncedInstrs.size() - 1; + PipelineInstrs.resize(SyncedInstrs.size()); + for (auto &InstrMap : SyncedInstrs) { + for (auto &Inst : InstrMap.second) { + if (PipelineInstrs[PipelineIDx].size() == 0) { + PipelineInstrs[PipelineIDx].push_back( + std::make_pair(Inst.first, Inst.second)); + continue; + } + auto SortPosition = PipelineInstrs[PipelineIDx].begin(); + // Insert them in sorted order -- this allows for good parsing order in + // the greedy algorithm + while (SortPosition != PipelineInstrs[PipelineIDx].end() && + Inst.first->NodeNum > SortPosition->first->NodeNum) + ++SortPosition; + PipelineInstrs[PipelineIDx].insert( + SortPosition, std::make_pair(Inst.first, Inst.second)); + } + --PipelineIDx; + } +} + +void PipelineSolver::makePipeline() { + // Preserve the order of barrier for subsequent SchedGroupBarrier mutations + for (auto &SyncPipeline : BestPipeline) { + for (auto &SG : SyncPipeline) { + SUnit *SGBarr = nullptr; + for (auto &SU : SG.Collection) { + if (SU->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER) + SGBarr = SU; + } + assert(SGBarr); + resetEdges(*SGBarr, DAG); + SG.link(*SGBarr, false); + } + } + + for (auto &SyncPipeline : BestPipeline) { + auto I = SyncPipeline.rbegin(); + auto E = SyncPipeline.rend(); + for (; I != E; ++I) { + auto &GroupA = *I; + for (auto J = std::next(I); J != E; ++J) { + auto &GroupB = *J; + GroupA.link(GroupB); + } + } + } +} + +int PipelineSolver::addEdges( + SmallVectorImpl &SyncPipeline, SUnit *SU, int SGID, + std::vector> &AddedEdges) { + int AddedCost = 0; + bool MakePred = false; + + // The groups in the pipeline are in reverse order. Thus, + // by traversing them from last to first, we are traversing + // them in the order as they were introduced in the code. After we + // pass the group the SU is being assigned to, it should be + // linked as a predecessor of the subsequent SchedGroups + auto GroupNo = (int)SyncPipeline.size() - 1; + for (; GroupNo >= 0; GroupNo--) { + if (SyncPipeline[GroupNo].getSGID() == SGID) { + MakePred = true; + continue; + } + auto Group = &SyncPipeline[GroupNo]; + AddedCost += Group->link(*SU, MakePred, AddedEdges); + assert(AddedCost >= 0); + } + + return AddedCost; +} + +void PipelineSolver::removeEdges( + const std::vector> &EdgesToRemove) { + // Only remove the edges that we have added when testing + // the fit. + for (auto &PredSuccPair : EdgesToRemove) { + SUnit *Pred = PredSuccPair.first; + SUnit *Succ = PredSuccPair.second; + + auto Match = + std::find_if(Succ->Preds.begin(), Succ->Preds.end(), + [&Pred](SDep &P) { return P.getSUnit() == Pred; }); + if (Match != Succ->Preds.end()) { + Succ->removePred(*Match); + } + } +} + +void PipelineSolver::advancePosition() { + ++CurrConflInstNo; + + if (static_cast(CurrConflInstNo) >= + PipelineInstrs[CurrSyncGroupIdx].size()) { + CurrConflInstNo = 0; + ++CurrSyncGroupIdx; + // Advance to next non-trivial pipeline + while (static_cast(CurrSyncGroupIdx) < PipelineInstrs.size() && + PipelineInstrs[CurrSyncGroupIdx].size() == 0) + ++CurrSyncGroupIdx; + } +} + +void PipelineSolver::retreatPosition() { + assert(CurrConflInstNo >= 0); + assert(CurrSyncGroupIdx >= 0); + if (CurrConflInstNo > 0) { + --CurrConflInstNo; + + return; + } + + if (CurrConflInstNo == 0) { + // If we return to the starting position, we have explored + // the entire tree + if (CurrSyncGroupIdx == BeginSyncGroupIdx) + return; + + --CurrSyncGroupIdx; + // Go to previous non-trivial pipeline + while (PipelineInstrs[CurrSyncGroupIdx].size() == 0) + --CurrSyncGroupIdx; + + CurrConflInstNo = PipelineInstrs[CurrSyncGroupIdx].size() - 1; + } +} + +bool PipelineSolver::checkOptimal() { + if (static_cast(CurrSyncGroupIdx) == PipelineInstrs.size()) { + if (BestCost == -1 || CurrCost < BestCost) { + BestPipeline = CurrPipeline; + BestCost = CurrCost; + LLVM_DEBUG(dbgs() << "Found Fit with cost " << BestCost << "\n"); + } + assert(BestCost >= 0); + } + return BestCost == 0; +} + +bool PipelineSolver::solveExact() { + if (checkOptimal()) + return true; + + if (static_cast(CurrSyncGroupIdx) == PipelineInstrs.size()) + return false; + + assert(static_cast(CurrSyncGroupIdx) < PipelineInstrs.size()); + assert(static_cast(CurrConflInstNo) < + PipelineInstrs[CurrSyncGroupIdx].size()); + std::pair> CurrSGSU = + PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo]; + LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSGSU.first->NodeNum + << ") in Pipeline # " << CurrSyncGroupIdx << "\n"); + // Since we have added the potential SchedGroups from bottom up, but + // traversed the DAG from top down, parse over the groups from last to first. + // In this way, the position of the instruction in the initial code more + // closely aligns with the position of the SchedGroupBarrier relative to the + // entire pipeline. Parsing in such a way increases likelihood of good + // solution found early. + auto I = CurrSGSU.second.rbegin(); + auto E = CurrSGSU.second.rend(); + assert(CurrSGSU.second.size() >= 1); + for (; I != E; ++I) { + int CandSGID = *I; + int AddedCost = 0; + std::vector> AddedEdges; + auto &SyncPipeline = CurrPipeline[CurrSyncGroupIdx]; + SchedGroup *Match; + for (auto &SG : SyncPipeline) { + if (SG.getSGID() == CandSGID) + Match = &SG; + } + + if (Match->isFull()) + continue; + + LLVM_DEBUG(dbgs() << "Assigning to SchedGroup with Mask " + << (int)Match->getMask() << "and ID " << CandSGID + << "\n"); + Match->add(*CurrSGSU.first); + AddedCost = addEdges(SyncPipeline, CurrSGSU.first, CandSGID, AddedEdges); + LLVM_DEBUG(dbgs() << "Cost of Assignment: " << AddedCost << "\n"); + CurrCost += AddedCost; + advancePosition(); + + // If the Cost after adding edges is greater than a known solution, + // backtrack + if (CurrCost < BestCost || BestCost == -1) { + if (solveExact()) + return true; + } + + retreatPosition(); + CurrCost -= AddedCost; + removeEdges(AddedEdges); + Match->pop(); + CurrPipeline[CurrSyncGroupIdx] = SyncPipeline; + } + + // Try the pipeline where the current instruction is omitted + // Potentially if we omit a problematic instruction from the pipeline, + // all the other instructions can nicely fit. + CurrCost += MissPenalty; + advancePosition(); + + LLVM_DEBUG(dbgs() << "NOT Assigned (" << CurrSGSU.first->NodeNum << ")\n"); + + if (CurrCost < BestCost || BestCost == -1) { + if (solveExact()) + return true; + } + + retreatPosition(); + CurrCost -= MissPenalty; + + return false; +} + +bool PipelineSolver::solveGreedy() { + while (static_cast(CurrSyncGroupIdx) < PipelineInstrs.size()) { + std::pair> CurrSGSU = + PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo]; + int BestCost = -1; + int TempCost; + SchedGroup *BestGroup = nullptr; + int BestGroupID = -1; + auto &SyncPipeline = CurrPipeline[CurrSyncGroupIdx]; + LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSGSU.first->NodeNum + << ") in Pipeline # " << CurrSyncGroupIdx << "\n"); + + // Since we have added the potential SchedGroups from bottom up, but + // traversed the DAG from top down, parse over the groups from last to + // first. If we fail to do this for the greedy algorithm, the solution will + // likely not be good in more complex cases. + auto I = CurrSGSU.second.rbegin(); + auto E = CurrSGSU.second.rend(); + for (; I != E; ++I) { + std::vector> AddedEdges; + int CandSGID = *I; + SchedGroup *Match; + for (auto &SG : SyncPipeline) { + if (SG.getSGID() == CandSGID) + Match = &SG; + } + + LLVM_DEBUG(dbgs() << "Trying SGID # " << CandSGID << " with Mask " + << (int)Match->getMask() << "\n"); + if (Match->isFull()) { + LLVM_DEBUG(dbgs() << "SGID # " << CandSGID << " is full\n"); + continue; + } + TempCost = addEdges(SyncPipeline, CurrSGSU.first, CandSGID, AddedEdges); + LLVM_DEBUG(dbgs() << "Cost of Group " << TempCost << "\n"); + if (TempCost < BestCost || BestCost == -1) { + BestGroup = Match; + BestCost = TempCost; + BestGroupID = CandSGID; + } + removeEdges(AddedEdges); + if (BestCost == 0) + break; + } + + if (BestGroupID != -1) { + BestGroup->add(*CurrSGSU.first); + std::vector> AddedEdges; + addEdges(SyncPipeline, CurrSGSU.first, BestGroupID, AddedEdges); + LLVM_DEBUG(dbgs() << "Best Group has ID: " << BestGroupID << " and Mask" + << (int)BestGroup->getMask() << "\n"); + } + CurrPipeline[CurrSyncGroupIdx] = SyncPipeline; + advancePosition(); + } + BestPipeline = CurrPipeline; + + return false; +} + +unsigned PipelineSolver::computeProblemSize() { + unsigned ProblemSize = 0; + for (auto &PipeConflicts : PipelineInstrs) { + ProblemSize += PipeConflicts.size(); + } + + return ProblemSize; +} + +void PipelineSolver::solve() { + unsigned ProblemSize = computeProblemSize(); + assert(ProblemSize > 0); + + bool BelowCutoff = CutoffForExact && ProblemSize <= *CutoffForExact; + MissPenalty = (ProblemSize / 2) + 1; + + LLVM_DEBUG(DAG->dump()); + if (EnableExactSolver || BelowCutoff) { + LLVM_DEBUG(dbgs() << "Starting EXACT pipeline solver\n"); + solveExact(); + } else { // Use the Greedy Algorithm by default + LLVM_DEBUG(dbgs() << "Starting GREEDY pipeline solver\n"); + solveGreedy(); + } + + makePipeline(); +} + class IGroupLPDAGMutation : public ScheduleDAGMutation { public: const SIInstrInfo *TII; @@ -186,11 +636,13 @@ // Organize lists of SchedGroups by their SyncID. SchedGroups / // SCHED_GROUP_BARRIERs with different SyncIDs will have no edges added // between then. - DenseMap> SyncedSchedGroupsMap; + DenseMap> SyncedSchedGroups; + + // The number of create sched groups -- also used as SGID + int NumCreatedSchedGroups = 0; - // Used to track instructions that are already to added to a different - // SchedGroup with the same SyncID. - DenseMap> SyncedInstrsMap; + // Used to track instructions that can be mapped to multiple sched groups + DenseMap>> SyncedInstrs; // Add DAG edges that enforce SCHED_BARRIER ordering. void addSchedBarrierEdges(SUnit &SU); @@ -207,11 +659,8 @@ SchedGroupMask invertSchedBarrierMask(SchedGroupMask Mask) const; // Create SchedGroups for a SCHED_GROUP_BARRIER. - void initSchedGroupBarrier(std::vector::reverse_iterator RIter); - - // Add DAG edges that try to enforce ordering defined by SCHED_GROUP_BARRIER - // instructions. - void addSchedGroupBarrierEdges(); + void initSchedGroupBarrierPipelineStage( + std::vector::reverse_iterator RIter); public: void apply(ScheduleDAGInstrs *DAGInstrs) override; @@ -222,9 +671,6 @@ bool SchedGroup::tryAddEdge(SUnit *A, SUnit *B) { if (A != B && DAG->canAddEdge(B, A)) { DAG->addEdge(B, SDep(A, SDep::Artificial)); - LLVM_DEBUG(dbgs() << "Adding edge...\n" - << "from: SU(" << A->NodeNum << ") " << *A->getInstr() - << "to: SU(" << B->NodeNum << ") " << *B->getInstr()); return true; } return false; @@ -285,6 +731,32 @@ return Result; } +int SchedGroup::link(SUnit &SU, bool MakePred, + std::vector> &AddedEdges) { + int MissedEdges = 0; + for (auto A : Collection) { + SUnit *B = &SU; + if (A == B) + continue; + if (MakePred) + std::swap(A, B); + + // If B is already recursive successor of A, + // this is not a deviation from desired pipeline and we should not + // increase the cost + if (DAG->IsReachable(B, A)) + continue; + + bool Added = tryAddEdge(A, B); + if (Added) { + AddedEdges.push_back(std::make_pair(A, B)); + } else + ++MissedEdges; + } + + return MissedEdges; +} + void SchedGroup::link(SUnit &SU, bool MakePred) { for (auto A : Collection) { SUnit *B = &SU; @@ -311,10 +783,6 @@ link(*B); } -bool SchedGroup::isFull() const { - return MaxSize && Collection.size() >= *MaxSize; -} - bool SchedGroup::canAddSU(SUnit &SU) const { MachineInstr &MI = *SU.getInstr(); if (MI.getOpcode() != TargetOpcode::BUNDLE) @@ -340,26 +808,17 @@ } } -static bool canFitIntoPipeline(SUnit &SU, ScheduleDAGInstrs *DAG, - DenseSet &ConflictedInstrs) { - return std::all_of( - ConflictedInstrs.begin(), ConflictedInstrs.end(), - [DAG, &SU](SUnit *SuccSU) { return DAG->canAddEdge(SuccSU, &SU); }); -} - -void SchedGroup::initSchedGroup(std::vector::reverse_iterator RIter, - DenseSet &ConflictedInstrs) { +void SchedGroup::initSchedGroup( + std::vector::reverse_iterator RIter, + DenseMap> &SyncedInstrs) { SUnit &InitSU = *RIter; for (auto E = DAG->SUnits.rend(); RIter != E; ++RIter) { auto &SU = *RIter; if (isFull()) break; - if (canAddSU(SU) && !ConflictedInstrs.count(&SU) && - canFitIntoPipeline(SU, DAG, ConflictedInstrs)) { - add(SU); - ConflictedInstrs.insert(&SU); - } + if (canAddSU(SU)) + SyncedInstrs[&SU].push_back(SGID); } add(InitSU); @@ -381,20 +840,6 @@ } } -// Same as makePipeline but with reverse ordering. -static void -makeReversePipeline(SmallVectorImpl &PipelineOrderGroups) { - auto I = PipelineOrderGroups.rbegin(); - auto E = PipelineOrderGroups.rend(); - for (; I != E; ++I) { - auto &GroupA = *I; - for (auto J = std::next(I); J != E; ++J) { - auto &GroupB = *J; - GroupA.link(GroupB); - } - } -} - void IGroupLPDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) { const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget(); TII = ST.getInstrInfo(); @@ -421,22 +866,6 @@ makePipeline(PipelineOrderGroups); } -// Remove all existing edges from a SCHED_BARRIER or SCHED_GROUP_BARRIER. -static void resetEdges(SUnit &SU, ScheduleDAGInstrs *DAG) { - assert(SU.getInstr()->getOpcode() == AMDGPU::SCHED_BARRIER || - SU.getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER); - - while (!SU.Preds.empty()) - for (auto &P : SU.Preds) - SU.removePred(P); - - while (!SU.Succs.empty()) - for (auto &S : SU.Succs) - for (auto &SP : S.getSUnit()->Preds) - if (SP.getSUnit() == &SU) - S.getSUnit()->removePred(SP); -} - void SchedBarrierDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) { const TargetSchedModel *TSchedModel = DAGInstrs->getSchedModel(); if (!TSchedModel || DAGInstrs->SUnits.empty()) @@ -446,19 +875,32 @@ const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget(); TII = ST.getInstrInfo(); DAG = static_cast(DAGInstrs); - SyncedInstrsMap.clear(); - SyncedSchedGroupsMap.clear(); + + SyncedSchedGroups.clear(); + SyncedInstrs.clear(); for (auto R = DAG->SUnits.rbegin(), E = DAG->SUnits.rend(); R != E; ++R) { if (R->getInstr()->getOpcode() == AMDGPU::SCHED_BARRIER) addSchedBarrierEdges(*R); else if (R->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER) - initSchedGroupBarrier(R); + initSchedGroupBarrierPipelineStage(R); } - // SCHED_GROUP_BARRIER edges can only be added after we have found and - // initialized all of the SCHED_GROUP_BARRIER SchedGroups. - addSchedGroupBarrierEdges(); + // Only invoke the solver if there is something to solve + bool NeedsSolution = false; + for (auto &PipelineInstrs : SyncedInstrs) { + if (PipelineInstrs.second.size() > 0) { + NeedsSolution = true; + break; + } + } + + if (NeedsSolution) { + PipelineSolver PS(SyncedSchedGroups, SyncedInstrs, DAG); + // PipelineSolver performs the mutation by adding the edges it + // determined as the best + PS.solve(); + } } void SchedBarrierDAGMutation::addSchedBarrierEdges(SUnit &SchedBarrier) { @@ -513,7 +955,7 @@ return InvertedMask; } -void SchedBarrierDAGMutation::initSchedGroupBarrier( +void SchedBarrierDAGMutation::initSchedGroupBarrierPipelineStage( std::vector::reverse_iterator RIter) { // Remove all existing edges from the SCHED_GROUP_BARRIER that were added due // to the instruction having side effects. @@ -523,23 +965,11 @@ int32_t SGMask = SGB.getOperand(0).getImm(); int32_t Size = SGB.getOperand(1).getImm(); int32_t SyncID = SGB.getOperand(2).getImm(); - // Create a new SchedGroup and add it to a list that is mapped to the SyncID. - // SchedGroups only enforce ordering between SchedGroups with the same SyncID. - auto &SG = SyncedSchedGroupsMap[SyncID].emplace_back((SchedGroupMask)SGMask, - Size, SyncID, DAG, TII); - - // SyncedInstrsMap is used here is used to avoid adding the same SUs in - // multiple SchedGroups that have the same SyncID. This only matters for - // SCHED_GROUP_BARRIER and not SCHED_BARRIER. - SG.initSchedGroup(RIter, SyncedInstrsMap[SG.getSyncID()]); -} -void SchedBarrierDAGMutation::addSchedGroupBarrierEdges() { - // Since we traversed the DAG in reverse order when initializing - // SCHED_GROUP_BARRIERs we need to reverse the order in the vector to maintain - // user intentions and program order. - for (auto &SchedGroups : SyncedSchedGroupsMap) - makeReversePipeline(SchedGroups.second); + auto &SG = SyncedSchedGroups[SyncID].emplace_back( + (SchedGroupMask)SGMask, Size, SyncID, NumCreatedSchedGroups++, DAG, TII); + + SG.initSchedGroup(RIter, SyncedInstrs[SG.getSyncID()]); } } // namespace diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir @@ -0,0 +1,393 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -mcpu=gfx908 -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=GREEDY %s +# RUN: llc -march=amdgcn -mcpu=gfx908 -amdgpu-igrouplp-exact-solver -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=EXACT %s + +--- | + define amdgpu_kernel void @sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_2_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_3_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + + !0 = distinct !{!0} + !1 = !{!1, !0} +... + +--- +name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 10, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 10, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 10 ALU + SCHED_GROUP_BARRIER 6, 10, 0 + ; 5 MFMA + SCHED_GROUP_BARRIER 8, 5, 0 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 0 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... + +--- +name: sched_group_barrier_MFMA_VALU_and_SALU_alternating +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 0 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... + +--- +name: sched_group_barrier_2_separate_pipes +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_2_separate_pipes + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_2_separate_pipes + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 5 ALU + SCHED_GROUP_BARRIER 6, 5, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 2 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... + +--- +name: sched_group_barrier_3_separate_pipes +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_3_separate_pipes + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 4, 1, 1 + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 2, 1, 1 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 1 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 1, 1 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_3_separate_pipes + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 4, 1, 1 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 2, 1, 1 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 1 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 1, 1 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 5 ALU + SCHED_GROUP_BARRIER 6, 5, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 2 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + ; 1 SALU + SCHED_GROUP_BARRIER 4, 1, 1 + ; 1 VALU + SCHED_GROUP_BARRIER 2, 1, 1 + ; 1 MFMA + SCHED_GROUP_BARRIER 8, 1, 1 + ; 1 VMEM + SCHED_GROUP_BARRIER 16, 1, 1 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir --- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir @@ -130,16 +130,16 @@ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 ; CHECK-NEXT: S_NOP 0 - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 1, 10, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: SCHED_GROUP_BARRIER 1, 10, 0 ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)