diff --git a/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp @@ -32,53 +32,87 @@ MI->getOpcode() == AMDGPU::EXP_DONE; } +static bool isPositionExport(const SIInstrInfo *TII, SUnit *SU) { + const MachineInstr *MI = SU->getInstr(); + int Imm = TII->getNamedOperand(*MI, AMDGPU::OpName::tgt)->getImm(); + return Imm >= 12 && Imm <= 15; +} + +static void sortChain(const SIInstrInfo *TII, SmallVector &Chain, + unsigned PosCount) { + if (!PosCount || PosCount == Chain.size()) + return; + + // Position exports should occur as soon as possible in the shader + // for optimal performance. This moves position exports before + // other exports while preserving the order within different export + // types (pos or other). + SmallVector Copy(Chain); + unsigned PosIdx = 0; + unsigned OtherIdx = PosCount; + for (SUnit *SU : Copy) { + if (isPositionExport(TII, SU)) + Chain[PosIdx++] = SU; + else + Chain[OtherIdx++] = SU; + } +} + static void buildCluster(ArrayRef Exports, ScheduleDAGInstrs *DAG) { - // Cluster a series of exports. Also copy all dependencies to the first - // export to avoid computation being inserted into the chain. - SUnit *ChainHead = Exports[0]; + SUnit *ChainHead = Exports.front(); + + // Now construct cluster from chain by adding new edges. for (unsigned Idx = 0, End = Exports.size() - 1; Idx < End; ++Idx) { SUnit *SUa = Exports[Idx]; SUnit *SUb = Exports[Idx + 1]; - if (DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { - for (const SDep &Pred : SUb->Preds) { - SUnit *PredSU = Pred.getSUnit(); - if (Pred.isWeak() || isExport(*PredSU)) - continue; + + // Copy all dependencies to the head of the chain to avoid any + // computation being inserted into the chain. + for (const SDep &Pred : SUb->Preds) { + SUnit *PredSU = Pred.getSUnit(); + if (!isExport(*PredSU) && !Pred.isWeak()) DAG->addEdge(ChainHead, SDep(PredSU, SDep::Artificial)); - } } + + // New barrier edge ordering exports + DAG->addEdge(SUb, SDep(SUa, SDep::Barrier)); + // Also add cluster edge + DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)); } } void ExportClustering::apply(ScheduleDAGInstrs *DAG) { - SmallVector, 4> ExportChains; - DenseMap ChainMap; + const SIInstrInfo *TII = static_cast(DAG->TII); + + SmallVector Chain; - // Build chains of exports + // Pass through DAG gathering a list of exports and removing barrier edges + // creating dependencies on exports. Freeing exports of successor edges + // allows more scheduling freedom, and nothing should be order dependent + // on exports. Edges will be added later to order the exports. + unsigned PosCount = 0; for (SUnit &SU : DAG->SUnits) { - if (!isExport(SU)) - continue; + if (isExport(SU)) { + Chain.push_back(&SU); + if (isPositionExport(TII, &SU)) + PosCount++; + } - unsigned ChainID = ExportChains.size(); + SmallVector ToRemove; for (const SDep &Pred : SU.Preds) { - const SUnit &PredSU = *Pred.getSUnit(); - if (isExport(PredSU) && !Pred.isArtificial()) { - ChainID = ChainMap.lookup(PredSU.NodeNum); - break; - } + SUnit *PredSU = Pred.getSUnit(); + if (Pred.isBarrier() && isExport(*PredSU)) + ToRemove.push_back(Pred); } - ChainMap[SU.NodeNum] = ChainID; - - if (ChainID == ExportChains.size()) - ExportChains.push_back(SmallVector()); - - auto &Chain = ExportChains[ChainID]; - Chain.push_back(&SU); + for (SDep Pred : ToRemove) + SU.removePred(Pred); } - // Apply clustering - for (auto &Chain : ExportChains) + // Apply clustering if there are multiple exports + if (Chain.size() > 1) { + sortChain(TII, Chain, PosCount); buildCluster(Chain, DAG); + } } } // end namespace diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll @@ -3,6 +3,7 @@ declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1 declare void @llvm.amdgcn.exp.i32(i32, i32, i32, i32, i32, i32, i1, i1) #1 +declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32) #2 ; GCN-LABEL: {{^}}test_export_zeroes_f32: ; GCN: exp mrt0 off, off, off, off{{$}} @@ -557,5 +558,50 @@ ret void } +; GCN-LABEL: {{^}}test_export_pos_before_param: +; GCN: exp pos0 +; GCN-NOT: s_waitcnt +; GCN: exp param0 +define amdgpu_kernel void @test_export_pos_before_param(float %x, float %y) #0 { + %z0 = fadd float %x, %y + call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + %z1 = fsub float %y, %x + call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false) + ret void +} + +; GCN-LABEL: {{^}}test_export_pos_before_param_ordered: +; GCN: exp pos0 +; GCN: exp pos1 +; GCN: exp pos2 +; GCN-NOT: s_waitcnt +; GCN: exp param0 +; GCN: exp param1 +; GCN: exp param2 +define amdgpu_kernel void @test_export_pos_before_param_ordered(float %x, float %y) #0 { + %z0 = fadd float %x, %y + call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 34, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false) + %z1 = fsub float %y, %x + call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 13, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 14, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false) + ret void +} + +; GCN-LABEL: {{^}}test_export_pos_before_param_across_load: +; GCN: exp pos0 +; GCN-NEXT: exp param0 +; GCN-NEXT: exp param1 +define amdgpu_kernel void @test_export_pos_before_param_across_load(i32 %idx) #0 { + call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float 1.0, i1 false, i1 false) + call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float 1.0, float 1.0, float 1.0, float 0.5, i1 false, i1 false) + %load = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0) + call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %load, i1 true, i1 false) + ret void +} + attributes #0 = { nounwind } attributes #1 = { nounwind inaccessiblememonly } +attributes #2 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/wait.ll b/llvm/test/CodeGen/AMDGPU/wait.ll --- a/llvm/test/CodeGen/AMDGPU/wait.ll +++ b/llvm/test/CodeGen/AMDGPU/wait.ll @@ -10,8 +10,8 @@ ; DEFAULT: s_waitcnt lgkmcnt(0) ; DEFAULT: buffer_load_format_xyzw ; DEFAULT: buffer_load_format_xyzw -; DEFAULT: s_waitcnt vmcnt(0) -; DEFAULT: exp +; DEFAULT-DAG: s_waitcnt vmcnt(0) +; DEFAULT-DAG: exp ; DEFAULT: exp ; DEFAULT-NEXT: s_endpgm define amdgpu_vs void @main(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, <16 x i8> addrspace(4)* inreg %arg3, <16 x i8> addrspace(4)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(4)* inreg %constptr) #0 {