diff --git a/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp @@ -32,21 +32,64 @@ MI->getOpcode() == AMDGPU::EXP_DONE; } +static void sortChain(SmallVector &Chain, ScheduleDAGInstrs *DAG) { + const SIInstrInfo *TII = static_cast(DAG->TII); + + // Move position exports before other exports while preserving + // the order within different export types (pos or other). + unsigned InsertionPoint = 0; + for (unsigned Idx = 0, End = Chain.size(); Idx < End; ++Idx) { + SUnit *SU = Chain[Idx]; + const MachineInstr *MI = SU->getInstr(); + int Imm = TII->getNamedOperand(*MI, AMDGPU::OpName::tgt)->getImm(); + if (Imm >= 12 && Imm <= 15) { + if (Idx != InsertionPoint) { + auto Begin = Chain.begin(); + Chain.erase(Begin + Idx); + Chain.insert(Begin + InsertionPoint, SU); + } + InsertionPoint++; + } + } +} + static void buildCluster(ArrayRef Exports, ScheduleDAGInstrs *DAG) { - // Cluster a series of exports. Also copy all dependencies to the first - // export to avoid computation being inserted into the chain. - SUnit *ChainHead = Exports[0]; + SUnit *ChainHead = Exports.front(); + + // First remove any edges between exports as these prevent + // redefinition of the order (due to cycles). + for (SUnit *SU : Exports) { + SmallVector ToRemove; + for (const SDep &Pred : SU->Preds) { + SUnit *PredSU = Pred.getSUnit(); + if (isExport(*PredSU)) { + assert(Pred.isBarrier() && + "only barrier edges between exports are expected"); + ToRemove.push_back(Pred); + } + } + for (SDep Pred : ToRemove) + SU->removePred(Pred); + } + + // Now construct cluster from chain by adding new edges. + // Copy all dependencies to the head of the chain to avoid any + // computation being inserted into the chain. for (unsigned Idx = 0, End = Exports.size() - 1; Idx < End; ++Idx) { SUnit *SUa = Exports[Idx]; SUnit *SUb = Exports[Idx + 1]; - if (DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { - for (const SDep &Pred : SUb->Preds) { - SUnit *PredSU = Pred.getSUnit(); - if (Pred.isWeak() || isExport(*PredSU)) - continue; + + // Copy dependencies + for (const SDep &Pred : SUb->Preds) { + SUnit *PredSU = Pred.getSUnit(); + if (!isExport(*PredSU) && !Pred.isWeak()) DAG->addEdge(ChainHead, SDep(PredSU, SDep::Artificial)); - } } + + // New barrier edge ordering exports + DAG->addEdge(SUb, SDep(SUa, SDep::Barrier)); + // Also add cluster edge + DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)); } } @@ -77,8 +120,10 @@ } // Apply clustering - for (auto &Chain : ExportChains) + for (auto &Chain : ExportChains) { + sortChain(Chain, DAG); buildCluster(Chain, DAG); + } } } // end namespace diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll @@ -557,5 +557,37 @@ ret void } +; GCN-LABEL: {{^}}test_export_pos_before_param: +; GCN: exp pos0 +; GCN-NOT: s_waitcnt +; GCN: exp param0 +define amdgpu_kernel void @test_export_pos_before_param(float %x, float %y) #0 { + %z0 = fadd float %x, %y + call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + %z1 = fsub float %y, %x + call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false) + ret void +} + +; GCN-LABEL: {{^}}test_export_pos_before_param_ordered: +; GCN: exp pos0 +; GCN: exp pos1 +; GCN: exp pos2 +; GCN-NOT: s_waitcnt +; GCN: exp param0 +; GCN: exp param1 +; GCN: exp param2 +define amdgpu_kernel void @test_export_pos_before_param_ordered(float %x, float %y) #0 { + %z0 = fadd float %x, %y + call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 34, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + %z1 = fsub float %y, %x + call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 13, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 14, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false) + ret void +} + attributes #0 = { nounwind } attributes #1 = { nounwind inaccessiblememonly }