diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp @@ -27,7 +27,7 @@ using namespace llvm; -#define DEBUG_TYPE "machine-scheduler" +#define DEBUG_TYPE "igrouplp" namespace { @@ -61,6 +61,30 @@ cl::desc("The maximum number of instructions to include " "in lds/gds write group.")); +static cl::opt EnableExactSolver( + "amdgpu-igrouplp-exact-solver", + cl::desc("Whether to use the exponential time solver to fit " + "the instructions to the pipeline as closely as " + "possible."), + cl::init(false)); + +static cl::opt EnableGreedySolver( + "amdgpu-igrouplp-greedy-solver", + cl::desc("Whether to use the greedy solver to fit " + "the instructions to the pipeline as closely as " + "possible."), + cl::init(false)); + +static cl::opt CutoffForExact( + "amdgpu-igrouplp-exact-solver-cutoff", + cl::desc("The maximum number of scheduling group conflicts " + "which we attempt to solve with the exponential time " + "exact solver. Problem sizes greater than this will" + "be solved by the less accurate greedy algorithm. Selecting " + "solver by size is superseded by manually selecting " + "the solver (e.g. by amdgpu-igrouplp-exact-solver"), + cl::init(30)); + // Components of the mask that determines which instruction types may be may be // classified into a SchedGroup. enum class SchedGroupMask { @@ -80,6 +104,13 @@ LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ ALL) }; +struct SchedGroupSU { + SUnit *SU; + SmallVector, 4> Matches; + + SchedGroupSU(SUnit *SU) : SU(SU) {} +}; + // Classify instructions into groups to enable fine tuned control over the // scheduler. These groups may be more specific than current SchedModel // instruction classes. @@ -98,7 +129,7 @@ int SyncID = 0; // Collection of SUnits that are classified as members of this group. - SmallVector Collection; + // SmallVector Collection; ScheduleDAGInstrs *DAG; @@ -111,29 +142,27 @@ // SchedGroup object. bool canAddMI(const MachineInstr &MI) const; +public: + // Collection of SUnits that are classified as members of this group. + SmallVector Collection; + + std::vector::reverse_iterator BarrierPosition; // Returns true if SU can be added to this SchedGroup. bool canAddSU(SUnit &SU) const; - // Returns true if no more instructions may be added to this group. - bool isFull() const; - - // Add SU to the SchedGroup. - void add(SUnit &SU) { - LLVM_DEBUG(dbgs() << "For SchedGroup with mask " - << format_hex((int)SGMask, 10, true) << " adding " - << *SU.getInstr()); - Collection.push_back(&SU); - } - -public: // Add DAG dependencies from all SUnits in this SchedGroup and this SU. If // MakePred is true, SU will be a predecessor of the SUnits in this // SchedGroup, otherwise SU will be a successor. void link(SUnit &SU, bool MakePred = false); - // Add DAG dependencies from all SUnits in this SchedGroup and this SU. Use - // the predicate to determine whether SU should be a predecessor (P = true) - // or a successor (P = false) of this SchedGroup. + // Add DAG dependencies and track which edges are added, and the count of + // missed edges + int link(SUnit &SU, bool MakePred, + std::vector> &AddedEdges); + + // Add DAG dependencies from all SUnits in this SchedGroup and this SU. + // Use the predicate to determine whether SU should be a predecessor (P = + // true) or a successor (P = false) of this SchedGroup. void link(SUnit &SU, function_ref P); // Add DAG dependencies such that SUnits in this group shall be ordered @@ -141,7 +170,18 @@ void link(SchedGroup &OtherGroup); // Returns true if no more instructions may be added to this group. - bool isFull() { return MaxSize && Collection.size() >= *MaxSize; } + bool isFull() const { return MaxSize && Collection.size() >= *MaxSize; } + + // Add SU to the SchedGroup. + void add(SUnit &SU) { + LLVM_DEBUG(dbgs() << "For SchedGroup with mask " + << format_hex((int)SGMask, 10, true) << " adding " + << *SU.getInstr()); + Collection.push_back(&SU); + } + + // Remove last element in the SchedGroup + void pop() { Collection.pop_back(); } // Identify and add all relevant SUs from the DAG to this SchedGroup. void initSchedGroup(); @@ -156,15 +196,381 @@ int getSyncID() { return SyncID; } + SchedGroupMask getMask() { return SGMask; } + + SchedGroup() {} + SchedGroup(SchedGroupMask SGMask, Optional MaxSize, ScheduleDAGInstrs *DAG, const SIInstrInfo *TII) : SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) {} SchedGroup(SchedGroupMask SGMask, Optional MaxSize, int SyncID, - ScheduleDAGInstrs *DAG, const SIInstrInfo *TII) - : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) {} + ScheduleDAGInstrs *DAG, const SIInstrInfo *TII, + std::vector::reverse_iterator BarrierPosition) + : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII), + BarrierPosition(BarrierPosition) {} +}; + +// The PipelineSolver is used to assign SUnits to SchedGroups in a pipeline +// in non-trivial cases. For example, if the requested pipeline is +// {VMEM_READ, VALU, MFMA, VMEM_READ} and we encounter a VMEM_READ instruction +// in the DAG, then we will have an instruction that can not be trivially +// assigned to a SchedGroup. The PipelineSolver class implements two algorithms +// to find a good solution to the pipeline -- a greedy algorithm and an exact +// algorithm. The exact algorithm has an exponential time complexity and should +// only be used for small sized problems or medium sized problems where an exact +// solution is highly desired. + +class PipelineSolver { + // Instructions that can be assigned to multiple SchedGroups + const SmallVector, 4> ConflictedInstrs; + ScheduleDAGMI *DAG; + + // The current working pipeline + SmallVector, 4> CurrPipeline; + // The pipeline that has the best solution found so far + SmallVector, 4> BestPipeline; + + // Compute an estimate of the size of search tree -- the true size is + // the product of each conflictedInst.Matches.size() across all SyncPipelines + int computeProblemSize(); + + // The cost penalty of not assigning a SU to a SchedGroup + int MissPenalty = 0; + + // Costs in terms of the number of edges we are unable to add + int BestCost = -1; + int CurrCost = 0; + + // Index pointing to the conflicting instruction that is currently being + // fitted + int CurrConflInstNo = 0; + // Index to the pipeline that is currently being fitted + int CurrSyncGroupIdx = 0; + // The first non trivial pipeline + int BeginSyncGroupIdx = 0; + + // Update indices to fit next conflicting instruction + void advancePosition(); + // Recede indices to attempt to find better fit for previous conflicting + // instruction + void retreatPosition(); + + // The exponential time algorithm which finds the provably best fit + bool solveExact(); + // The polynomial time algorithm which attempts to find a good fit + bool solveGreedy(); + // Whether or not the current solution is optimal + bool checkOptimal(); + // Add edges corresponding to the SchedGroups as assigned by solver + void makePipeline(); + // Add the edges from the SU to the other SchedGroups in pipeline, and + // return the number of edges missed. + int addEdges(SmallVector &SyncPipeline, SUnit *SU, + int AssignedGroupNo, + std::vector> &AddedEdges); + // Remove the edges passed via AddedEdges + void removeEdges(const std::vector> &AddedEdges); + +public: + // Invoke the solver to map instructions to instruction groups. Heuristic && + // command-line-option determines to use exact or greedy algorithm. + void solve(); + + PipelineSolver( + SmallVector, 4> &Pipeline, + const SmallVector, 4> &ConflictedInstrs, + ScheduleDAGMI *DAG) + : ConflictedInstrs(ConflictedInstrs), DAG(DAG), BestPipeline(Pipeline) { + CurrPipeline = BestPipeline; + while (static_cast(BeginSyncGroupIdx) < ConflictedInstrs.size() && + ConflictedInstrs[BeginSyncGroupIdx].size() == 0) + ++BeginSyncGroupIdx; + + if (static_cast(BeginSyncGroupIdx) >= ConflictedInstrs.size()) + return; + + CurrSyncGroupIdx = BeginSyncGroupIdx; + } }; +void PipelineSolver::makePipeline() { + // Preserve the order of barrier for subsequent SchedGroupBarrier mutations + for (auto &SyncPipeline : BestPipeline) { + for (auto &SG : SyncPipeline) { + SG.link(*SG.BarrierPosition, + (function_ref)[]( + const SUnit *A, const SUnit *B) { + return A->NodeNum > B->NodeNum; + }); + } + } + + for (auto &SyncPipeline : BestPipeline) { + auto I = SyncPipeline.rbegin(); + auto E = SyncPipeline.rend(); + for (; I != E; ++I) { + auto &GroupA = *I; + for (auto J = std::next(I); J != E; ++J) { + auto &GroupB = *J; + GroupA.link(GroupB); + } + } + } +} + +int PipelineSolver::addEdges( + SmallVector &SyncPipeline, SUnit *SU, + const int AssignedGroupNo, + std::vector> &AddedEdges) { + int AddedCost = 0; + bool MakePred = false; + + // The groups in the pipeline are in reverse order. Thus, + // by traversing them from last to first, we are traversing + // them in the order as they were introduced in the code. After we + // pass the group the SU is being assigned to, it should be + // linked as a predecessor of the subsequent SchedGroups + auto GroupNo = (int)SyncPipeline.size() - 1; + for (; GroupNo >= 0; GroupNo--) { + if (AssignedGroupNo == GroupNo) { + MakePred = true; + continue; + } + assert(MakePred || GroupNo > AssignedGroupNo); + auto Group = &SyncPipeline[GroupNo]; + AddedCost += Group->link(*SU, MakePred, AddedEdges); + assert(AddedCost >= 0); + } + + return AddedCost; +} + +void PipelineSolver::removeEdges( + const std::vector> &EdgesToRemove) { + // Only remove the edges that we have added when testing + // the fit. + for (auto &PredSuccPair : EdgesToRemove) { + SUnit *Pred = PredSuccPair.first; + SUnit *Succ = PredSuccPair.second; + + auto Match = + std::find_if(Succ->Preds.begin(), Succ->Preds.end(), + [&Pred](SDep &P) { return P.getSUnit() == Pred; }); + if (Match != Succ->Preds.end()) { + Succ->removePred(*Match); + } + } +} + +void PipelineSolver::advancePosition() { + ++CurrConflInstNo; + + if (static_cast(CurrConflInstNo) >= + ConflictedInstrs[CurrSyncGroupIdx].size()) { + CurrConflInstNo = 0; + ++CurrSyncGroupIdx; + // Advance to next non-trivial pipeline + while (static_cast(CurrSyncGroupIdx) < ConflictedInstrs.size() && + ConflictedInstrs[CurrSyncGroupIdx].size() == 0) + ++CurrSyncGroupIdx; + } +} + +void PipelineSolver::retreatPosition() { + assert(CurrConflInstNo >= 0); + assert(CurrSyncGroupIdx >= 0); + if (CurrConflInstNo > 0) { + --CurrConflInstNo; + + return; + } + + if (CurrConflInstNo == 0) { + // If we return to the starting position, we have explored + // the entire tree + if (CurrSyncGroupIdx == BeginSyncGroupIdx) + return; + + --CurrSyncGroupIdx; + // Go to previous non-trivial pipeline + while (ConflictedInstrs[CurrSyncGroupIdx].size() == 0) + --CurrSyncGroupIdx; + + CurrConflInstNo = ConflictedInstrs[CurrSyncGroupIdx].size() - 1; + } +} + +bool PipelineSolver::checkOptimal() { + if (static_cast(CurrSyncGroupIdx) == ConflictedInstrs.size()) { + if (BestCost == -1 || CurrCost < BestCost) { + BestPipeline = CurrPipeline; + BestCost = CurrCost; + LLVM_DEBUG(dbgs() << "Found Fit with cost " << BestCost << "\n"); + } + assert(BestCost >= 0); + } + return BestCost == 0; +} + +bool PipelineSolver::solveExact() { + if (checkOptimal()) + return true; + + if (static_cast(CurrSyncGroupIdx) == ConflictedInstrs.size()) + return false; + + assert(static_cast(CurrSyncGroupIdx) < ConflictedInstrs.size()); + assert(static_cast(CurrConflInstNo) < + ConflictedInstrs[CurrSyncGroupIdx].size()); + SchedGroupSU CurrSGSU = ConflictedInstrs[CurrSyncGroupIdx][CurrConflInstNo]; + LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSGSU.SU->NodeNum + << ") in Pipeline # " << CurrSyncGroupIdx << "\n"); + // Since we have added the potential SchedGroups from bottom up, but + // traversed the DAG from top down, parse over the groups from last to first. + // In this way, the position of the instruction in the initial code more + // closely aligns with the position of the SchedGroupBarrier relative to the + // entire pipeline. Parsing in such a way increases likelihood of good + // solution found early. + auto I = CurrSGSU.Matches[CurrSyncGroupIdx].rbegin(); + auto E = CurrSGSU.Matches[CurrSyncGroupIdx].rend(); + assert(CurrSGSU.Matches.size() >= 1); + for (; I != E; ++I) { + int SchedGroupNo = *I; + int AddedCost = 0; + std::vector> AddedEdges; + auto &SyncPipeline = CurrPipeline[CurrSyncGroupIdx]; + SchedGroup *Match = &SyncPipeline[SchedGroupNo]; + + if (Match->isFull()) + continue; + + LLVM_DEBUG(dbgs() << "Assigning to SchedGroup with Mask " + << (int)Match->getMask() << "and Group # " << SchedGroupNo + << "\n"); + Match->add(*CurrSGSU.SU); + AddedCost = addEdges(SyncPipeline, CurrSGSU.SU, SchedGroupNo, AddedEdges); + LLVM_DEBUG(dbgs() << "Cost of Assignment: " << AddedCost << "\n"); + CurrCost += AddedCost; + advancePosition(); + + // If the Cost after adding edges is greater than a known solution, + // backtrack + if (CurrCost < BestCost || BestCost == -1) { + if (solveExact()) + return true; + } + + retreatPosition(); + CurrCost -= AddedCost; + removeEdges(AddedEdges); + Match->pop(); + CurrPipeline[CurrSyncGroupIdx] = SyncPipeline; + } + + // Try the pipeline where the current instruction is omitted + // Potentially if we omit a problematic instruction from the pipeline, + // all the other instructions can nicely fit. + CurrCost += MissPenalty; + advancePosition(); + + LLVM_DEBUG(dbgs() << "NOT Assigned (" << CurrSGSU.SU->NodeNum << ")\n"); + + if (CurrCost < BestCost || BestCost == -1) { + if (solveExact()) + return true; + } + + retreatPosition(); + CurrCost -= MissPenalty; + + return false; +} + +bool PipelineSolver::solveGreedy() { + while (static_cast(CurrSyncGroupIdx) < ConflictedInstrs.size()) { + SchedGroupSU CurrSGSU = ConflictedInstrs[CurrSyncGroupIdx][CurrConflInstNo]; + int BestCost = -1; + int TempCost; + SchedGroup *BestGroup = nullptr; + int BestGroupNo = -1; + auto &SyncPipeline = BestPipeline[CurrSyncGroupIdx]; + LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSGSU.SU->NodeNum + << ") in Pipeline # " << CurrSyncGroupIdx << "\n"); + + // Since we have added the potential SchedGroups from bottom up, but + // traversed the DAG from top down, parse over the groups from last to + // first. If we fail to do this for the greedy algorithm, the solution will + // likely not be good in more complex cases. + auto I = CurrSGSU.Matches[CurrSyncGroupIdx].rbegin(); + auto E = CurrSGSU.Matches[CurrSyncGroupIdx].rend(); + for (; I != E; ++I) { + int SchedGroupNo = *I; + std::vector> AddedEdges; + SchedGroup *Match = &SyncPipeline[SchedGroupNo]; + LLVM_DEBUG(dbgs() << "Trying Group # " << SchedGroupNo << " with Mask " + << (int)Match->getMask() << "\n"); + if (Match->isFull()) { + LLVM_DEBUG(dbgs() << "Group # " << SchedGroupNo << " is full\n"); + continue; + } + TempCost = addEdges(SyncPipeline, CurrSGSU.SU, SchedGroupNo, AddedEdges); + LLVM_DEBUG(dbgs() << "Cost of Group " << TempCost << "\n"); + if (TempCost < BestCost || BestCost == -1) { + BestGroup = Match; + BestCost = TempCost; + BestGroupNo = SchedGroupNo; + } + removeEdges(AddedEdges); + if (BestCost == 0) + break; + } + + if (BestGroup) { + BestGroup->add(*CurrSGSU.SU); + std::vector> AddedEdges; + addEdges(SyncPipeline, CurrSGSU.SU, BestGroupNo, AddedEdges); + LLVM_DEBUG(dbgs() << "Best Group has GroupNo: " << BestGroupNo + << " and Mask" << (int)BestGroup->getMask() << "\n"); + } + advancePosition(); + } + + return false; +} + +int PipelineSolver::computeProblemSize() { + int ProblemSize = 0; + for (auto &PipeConflicts : ConflictedInstrs) { + ProblemSize += PipeConflicts.size(); + } + + return ProblemSize; +} + +void PipelineSolver::solve() { + bool ShouldUseExact; + int ProblemSize = computeProblemSize(); + assert(ProblemSize > 0); + bool SmallProblem = ProblemSize <= CutoffForExact; + + ShouldUseExact = EnableExactSolver || (!EnableGreedySolver && SmallProblem); + + MissPenalty = (ProblemSize / 2) + 1; + assert(MissPenalty > 0); + + LLVM_DEBUG(DAG->dump()); + if (ShouldUseExact) { + LLVM_DEBUG(dbgs() << "Starting EXACT pipeline solver\n"); + solveExact(); + } else { + LLVM_DEBUG(dbgs() << "Starting GREEDY pipeline solver\n"); + solveGreedy(); + } + + makePipeline(); +} + class IGroupLPDAGMutation : public ScheduleDAGMutation { public: const SIInstrInfo *TII; @@ -183,14 +589,17 @@ ScheduleDAGMI *DAG; + // Convert a user inputted SchedGroupBarrier ID to an index + // in an array holding the synchronized SchedGroups + DenseMap BarrierIDToPipelineID; + // Organize lists of SchedGroups by their SyncID. SchedGroups / // SCHED_GROUP_BARRIERs with different SyncIDs will have no edges added // between then. - DenseMap> SyncedSchedGroupsMap; + SmallVector, 4> SyncedSchedGroups; - // Used to track instructions that are already to added to a different - // SchedGroup with the same SyncID. - DenseMap> SyncedInstrsMap; + // Used to track instructions that can be mapped to multiple sched groups + SmallVector, 4> ConflictedInstrs; // Add DAG edges that enforce SCHED_BARRIER ordering. void addSchedBarrierEdges(SUnit &SU); @@ -207,11 +616,11 @@ SchedGroupMask invertSchedBarrierMask(SchedGroupMask Mask) const; // Create SchedGroups for a SCHED_GROUP_BARRIER. - void initSchedGroupBarrier(std::vector::reverse_iterator RIter); + void initSchedGroupBarrierPipelineStage( + std::vector::reverse_iterator RIter); - // Add DAG edges that try to enforce ordering defined by SCHED_GROUP_BARRIER - // instructions. - void addSchedGroupBarrierEdges(); + // Map the SUnits to candidate Sched Groups + void collectPipelineSGSU(); public: void apply(ScheduleDAGInstrs *DAGInstrs) override; @@ -285,6 +694,34 @@ return Result; } +int SchedGroup::link(SUnit &SU, bool MakePred, + std::vector> &AddedEdges) { + int MissedEdges = 0; + for (auto A : Collection) { + SUnit *B = &SU; + if (MakePred) + std::swap(A, B); + + bool ShouldTryAddEdge = A != B; + + // If we don't add edge because B is already recursive successor of A, + // this is not a deviation from desired pipeline and we should not + // increase the cost + if (ShouldTryAddEdge && DAG->IsReachable(B, A)) + ShouldTryAddEdge = false; + + if (!ShouldTryAddEdge) + continue; + bool Added = tryAddEdge(A, B); + if (Added) { + AddedEdges.push_back(std::make_pair(A, B)); + } else + ++MissedEdges; + } + + return MissedEdges; +} + void SchedGroup::link(SUnit &SU, bool MakePred) { for (auto A : Collection) { SUnit *B = &SU; @@ -311,10 +748,6 @@ link(*B); } -bool SchedGroup::isFull() const { - return MaxSize && Collection.size() >= *MaxSize; -} - bool SchedGroup::canAddSU(SUnit &SU) const { MachineInstr &MI = *SU.getInstr(); if (MI.getOpcode() != TargetOpcode::BUNDLE) @@ -381,20 +814,6 @@ } } -// Same as makePipeline but with reverse ordering. -static void -makeReversePipeline(SmallVectorImpl &PipelineOrderGroups) { - auto I = PipelineOrderGroups.rbegin(); - auto E = PipelineOrderGroups.rend(); - for (; I != E; ++I) { - auto &GroupA = *I; - for (auto J = std::next(I); J != E; ++J) { - auto &GroupB = *J; - GroupA.link(GroupB); - } - } -} - void IGroupLPDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) { const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget(); TII = ST.getInstrInfo(); @@ -446,19 +865,23 @@ const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget(); TII = ST.getInstrInfo(); DAG = static_cast(DAGInstrs); - SyncedInstrsMap.clear(); - SyncedSchedGroupsMap.clear(); + + BarrierIDToPipelineID.clear(); + SyncedSchedGroups.clear(); + ConflictedInstrs.clear(); for (auto R = DAG->SUnits.rbegin(), E = DAG->SUnits.rend(); R != E; ++R) { if (R->getInstr()->getOpcode() == AMDGPU::SCHED_BARRIER) addSchedBarrierEdges(*R); else if (R->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER) - initSchedGroupBarrier(R); + initSchedGroupBarrierPipelineStage(R); } - // SCHED_GROUP_BARRIER edges can only be added after we have found and - // initialized all of the SCHED_GROUP_BARRIER SchedGroups. - addSchedGroupBarrierEdges(); + collectPipelineSGSU(); + if (ConflictedInstrs.size() >= 1) { + PipelineSolver PS(SyncedSchedGroups, ConflictedInstrs, DAG); + PS.solve(); + } } void SchedBarrierDAGMutation::addSchedBarrierEdges(SUnit &SchedBarrier) { @@ -513,7 +936,7 @@ return InvertedMask; } -void SchedBarrierDAGMutation::initSchedGroupBarrier( +void SchedBarrierDAGMutation::initSchedGroupBarrierPipelineStage( std::vector::reverse_iterator RIter) { // Remove all existing edges from the SCHED_GROUP_BARRIER that were added due // to the instruction having side effects. @@ -522,24 +945,65 @@ assert(SGB.getOpcode() == AMDGPU::SCHED_GROUP_BARRIER); int32_t SGMask = SGB.getOperand(0).getImm(); int32_t Size = SGB.getOperand(1).getImm(); - int32_t SyncID = SGB.getOperand(2).getImm(); + int32_t BarrierID = SGB.getOperand(2).getImm(); + + int MappedIndex; + // Convert a user inputted Pipeline / Barrier ID to an index in the array + // which holds all the pipelines + if (BarrierIDToPipelineID.find(BarrierID) == BarrierIDToPipelineID.end()) { + MappedIndex = SyncedSchedGroups.size(); + BarrierIDToPipelineID[BarrierID] = MappedIndex; + SyncedSchedGroups.resize(MappedIndex + 1); + } else { + MappedIndex = BarrierIDToPipelineID[BarrierID]; + assert(static_cast(MappedIndex) < SyncedSchedGroups.size()); + } + // Create a new SchedGroup and add it to a list that is mapped to the SyncID. // SchedGroups only enforce ordering between SchedGroups with the same SyncID. - auto &SG = SyncedSchedGroupsMap[SyncID].emplace_back((SchedGroupMask)SGMask, - Size, SyncID, DAG, TII); - - // SyncedInstrsMap is used here is used to avoid adding the same SUs in - // multiple SchedGroups that have the same SyncID. This only matters for - // SCHED_GROUP_BARRIER and not SCHED_BARRIER. - SG.initSchedGroup(RIter, SyncedInstrsMap[SG.getSyncID()]); + SyncedSchedGroups[MappedIndex].emplace_back((SchedGroupMask)SGMask, Size, + BarrierID, DAG, TII, RIter); } -void SchedBarrierDAGMutation::addSchedGroupBarrierEdges() { - // Since we traversed the DAG in reverse order when initializing - // SCHED_GROUP_BARRIERs we need to reverse the order in the vector to maintain - // user intentions and program order. - for (auto &SchedGroups : SyncedSchedGroupsMap) - makeReversePipeline(SchedGroups.second); +void SchedBarrierDAGMutation::collectPipelineSGSU() { + for (auto &SU : DAG->SUnits) { + SchedGroupSU SGSU(&SU); + for (int PipelineIdx = 0; + static_cast(PipelineIdx) < SyncedSchedGroups.size(); + PipelineIdx++) { + for (int StageIdx = 0; static_cast(StageIdx) < + SyncedSchedGroups[PipelineIdx].size(); + StageIdx++) { + SchedGroup PipelineGroup = SyncedSchedGroups[PipelineIdx][StageIdx]; + std::vector::reverse_iterator RIter = + PipelineGroup.BarrierPosition; + if (!PipelineGroup.canAddSU(SU)) + continue; + + auto TempIter = RIter; + + auto Match = + std::find_if(TempIter, DAG->SUnits.rend(), + [&SU](SUnit &IterSU) { return &SU == &IterSU; }); + + if (Match != DAG->SUnits.rend()) { + // Grow the SGSU matches to hold the new match + if (static_cast(PipelineIdx) >= SGSU.Matches.size()) + SGSU.Matches.resize(PipelineIdx + 1); + SGSU.Matches[PipelineIdx].push_back(StageIdx); + } + } + if (static_cast(PipelineIdx) >= SGSU.Matches.size()) + continue; // The SGSU is not included in current Sync Pipeline + + if (SGSU.Matches[PipelineIdx].size() >= 1) { + // Grow the ConflictedInstrs to hold the pipeline instructions + if (static_cast(PipelineIdx) >= ConflictedInstrs.size()) + ConflictedInstrs.resize(PipelineIdx + 1); + ConflictedInstrs[PipelineIdx].push_back(SGSU); + } + } + } } } // namespace diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir @@ -0,0 +1,396 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -mcpu=gfx908 -amdgpu-igrouplp-greedy-solver -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=GREEDY %s +# RUN: llc -march=amdgcn -mcpu=gfx908 -amdgpu-igrouplp-exact-solver -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=EXACT %s + +--- | + define amdgpu_kernel void @sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_2_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_3_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + + !0 = distinct !{!0} + !1 = !{!1, !0} +... + +--- +name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 10, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 10, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 10 ALU + SCHED_GROUP_BARRIER 6, 10, 0 + ; 5 MFMA + SCHED_GROUP_BARRIER 8, 5, 0 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 0 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... + +--- +name: sched_group_barrier_MFMA_VALU_and_SALU_alternating +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 1 VALU+SALU + SCHED_GROUP_BARRIER 8, 1, 0 + ; 1 MFMA + SCHED_GROUP_BARRIER 6, 1, 0 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 0 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... + +--- +name: sched_group_barrier_2_separate_pipes +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_2_separate_pipes + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_2_separate_pipes + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 5 ALU + SCHED_GROUP_BARRIER 6, 5, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 2 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... + +--- +name: sched_group_barrier_3_separate_pipes +tracksRegLiveness: true +body: | + bb.0: + ; GREEDY-LABEL: name: sched_group_barrier_3_separate_pipes + ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; GREEDY-NEXT: S_NOP 0 + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 4, 1, 1 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_2]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 2, 1, 1 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 1 + ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 1, 1 + ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; EXACT-LABEL: name: sched_group_barrier_3_separate_pipes + ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF + ; EXACT-NEXT: S_NOP 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 4, 1, 1 + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 5, 0 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 0 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 2 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2 + ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec + ; EXACT-NEXT: SCHED_GROUP_BARRIER 2, 1, 1 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 1 + ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 1, 1 + ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + %0:sreg_64 = IMPLICIT_DEF + %1:vgpr_32 = IMPLICIT_DEF + %2:areg_128 = IMPLICIT_DEF + %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec + S_NOP 0 + %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec + %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec + %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec + %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec + %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) + %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec + GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; 2 VMEM + SCHED_GROUP_BARRIER 16, 2, 0 + ; 5 ALU + SCHED_GROUP_BARRIER 6, 5, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 0 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + ; 2 VMEM_WRITE + SCHED_GROUP_BARRIER 64, 2, 2 + ; 2 MFMA + SCHED_GROUP_BARRIER 8, 2, 2 + ; 1 SALU + SCHED_GROUP_BARRIER 4, 1, 1 + ; 1 VALU + SCHED_GROUP_BARRIER 2, 1, 1 + ; 1 MFMA + SCHED_GROUP_BARRIER 8, 1, 1 + ; 1 VMEM + SCHED_GROUP_BARRIER 16, 1, 1 + S_ENDPGM 0, implicit %5, implicit %6, implicit %11 +... +## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +# EXACT: {{.*}} +# GREEDY: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir --- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir @@ -65,26 +65,26 @@ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) ; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF - ; CHECK-NEXT: SCHED_GROUP_BARRIER 32, 1, 0 ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 1, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) ; CHECK-NEXT: S_NOP 0 ; CHECK-NEXT: SCHED_GROUP_BARRIER 32, 1, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 1, 0 ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 3, 0 + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec + ; CHECK-NEXT: SCHED_GROUP_BARRIER 32, 1, 0 ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_2]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 3, 0 + ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) ; CHECK-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 - ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] %0:sreg_64 = IMPLICIT_DEF %1:vgpr_32 = IMPLICIT_DEF %2:areg_128 = IMPLICIT_DEF @@ -128,23 +128,23 @@ ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1) ; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF - ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 ; CHECK-NEXT: S_NOP 0 - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec - ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 1, 10, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) - ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_2]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; CHECK-NEXT: SCHED_GROUP_BARRIER 1, 10, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0 ; CHECK-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 - ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] + ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] %0:sreg_64 = IMPLICIT_DEF %1:vgpr_32 = IMPLICIT_DEF %2:areg_128 = IMPLICIT_DEF @@ -186,26 +186,26 @@ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec + ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec + ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: S_NOP 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec - ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) + ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0 + ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0 ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1) ; CHECK-NEXT: SCHED_GROUP_BARRIER 64, 2, 0 ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]] @@ -252,3 +252,5 @@ SCHED_GROUP_BARRIER 64, 2, 0 S_ENDPGM 0, implicit %5, implicit %6, implicit %11 ... +## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +# CHECK: {{.*}}