Index: lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp +++ lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp @@ -169,6 +169,9 @@ SmallVector ReturningBlocks; SmallVector UnreachableBlocks; + // Dummy return block for infinite loop. + BasicBlock *DummyReturnBB = nullptr; + for (BasicBlock *BB : PDT.getRoots()) { if (isa(BB->getTerminator())) { if (!isUniformlyReached(DA, *BB)) @@ -176,6 +179,22 @@ } else if (isa(BB->getTerminator())) { if (!isUniformlyReached(DA, *BB)) UnreachableBlocks.push_back(BB); + } else if (BranchInst *BI = dyn_cast(BB->getTerminator())) { + if (BI->isUnconditional()) { + BasicBlock *LoopHeaderBB = BI->getSuccessor(0); + if (DummyReturnBB == nullptr) { + DummyReturnBB = BasicBlock::Create(F.getContext(), "DummyRet", &F); + Type *RetTy = F.getReturnType(); + Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy); + ReturnInst::Create(F.getContext(), RetVal, DummyReturnBB); + ReturningBlocks.push_back(DummyReturnBB); + } + // Create a dummy edge from BB to the dummy return block. + ConstantInt *BoolTrue = ConstantInt::getTrue(F.getContext()); + BB->getInstList().pop_back(); // Remove the unconditional branch. + // Add a new conditional branch with a dummy edge to the return block. + BranchInst::Create(LoopHeaderBB, DummyReturnBB, BoolTrue, BB); + } } } Index: test/CodeGen/AMDGPU/branch-relaxation.ll =================================================================== --- test/CodeGen/AMDGPU/branch-relaxation.ll +++ test/CodeGen/AMDGPU/branch-relaxation.ll @@ -431,11 +431,17 @@ ; si_mask_branch ; GCN-LABEL: {{^}}analyze_mask_branch: -; GCN: v_cmp_lt_f32_e32 vcc -; GCN-NEXT: s_and_saveexec_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], vcc +; GCN: v_cmp_nlt_f32_e32 vcc +; GCN-NEXT: s_and_saveexec_b64 [[TEMP_MASK:s\[[0-9]+:[0-9]+\]]], vcc +; GCN-NEXT: s_xor_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec, [[TEMP_MASK]] +; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]] + +; GCN: [[FLOW]]: ; %Flow +; GCN-NEXT: s_or_saveexec_b64 [[TEMP_MASK1:s\[[0-9]+:[0-9]+\]]], [[MASK]] +; GCN-NEXT: s_xor_b64 exec, exec, [[TEMP_MASK1]] ; GCN-NEXT: ; mask branch [[RET:BB[0-9]+_[0-9]+]] -; GCN-NEXT: [[LOOP_BODY:BB[0-9]+_[0-9]+]]: ; %loop_body +; GCN: [[LOOP_BODY:BB[0-9]+_[0-9]+]]: ; %loop_body ; GCN: ;;#ASMSTART ; GCN: v_nop_e64 ; GCN: v_nop_e64 @@ -444,6 +450,7 @@ ; GCN: v_nop_e64 ; GCN: v_nop_e64 ; GCN: ;;#ASMEND +; GCN: s_cbranch_vccz [[RET]] ; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop_body ; GCN-NEXT: ; in Loop: Header=[[LOOP_BODY]] Depth=1 @@ -452,9 +459,7 @@ ; GCN-NEXT: s_subb_u32 vcc_hi, vcc_hi, 0 ; GCN-NEXT: s_setpc_b64 vcc -; GCN-NEXT: [[RET]]: ; %ret -; GCN-NEXT: s_or_b64 exec, exec, [[MASK]] -; GCN: buffer_store_dword +; GCN-NEXT: [[RET]]: ; %UnifiedReturnBlock ; GCN-NEXT: s_endpgm define amdgpu_kernel void @analyze_mask_branch() #0 { entry: Index: test/CodeGen/AMDGPU/cf-loop-on-constant.ll =================================================================== --- test/CodeGen/AMDGPU/cf-loop-on-constant.ll +++ test/CodeGen/AMDGPU/cf-loop-on-constant.ll @@ -2,10 +2,11 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs -O0 < %s ; GCN-LABEL: {{^}}test_loop: +; GCN: s_and_b64 vcc, exec, -1 ; GCN: [[LABEL:BB[0-9+]_[0-9]+]]: ; %for.body{{$}} ; GCN: ds_read_b32 ; GCN: ds_write_b32 -; GCN: s_branch [[LABEL]] +; GCN: s_cbranch_vccnz [[LABEL]] ; GCN: s_endpgm define amdgpu_kernel void @test_loop(float addrspace(3)* %ptr, i32 %n) nounwind { entry: Index: test/CodeGen/AMDGPU/infinite-loop.ll =================================================================== --- test/CodeGen/AMDGPU/infinite-loop.ll +++ test/CodeGen/AMDGPU/infinite-loop.ll @@ -3,16 +3,75 @@ ; SI-LABEL: {{^}}infinite_loop: ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e7 -; SI: BB0_1: +; SI: [[LOOP:BB[0-9]+_[0-9]+]]: ; %loop.body ; SI: s_waitcnt lgkmcnt(0) ; SI: buffer_store_dword [[REG]] -; SI: s_branch BB0_1 +; SI: s_branch [[LOOP]] define amdgpu_kernel void @infinite_loop(i32 addrspace(1)* %out) { entry: - br label %for.body + br label %loop.body -for.body: ; preds = %entry, %for.body +loop.body: store i32 999, i32 addrspace(1)* %out, align 4 - br label %for.body + br label %loop.body } +; SI-LABEL: {{^}}infinite_loop_ret: +; SI: s_cbranch_scc1 [[RET:BB[0-9]+_[0-9]+]] + +; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e7 +; SI: s_and_b64 vcc, exec, -1 + +; SI: [[LOOP:BB[0-9]+_[0-9]+]]: ; %loop.body +; SI: s_waitcnt lgkmcnt(0) +; SI: buffer_store_dword [[REG]] +; SI: s_cbranch_vccnz [[LOOP]] + +; SI: [[RET]]: ; %DummyRet +; SI: s_endpgm +define amdgpu_kernel void @infinite_loop_ret(i32 addrspace(1)* %out) { +entry: + br i1 undef, label %loop.body, label %return + +loop.body: + store i32 999, i32 addrspace(1)* %out, align 4 + br label %loop.body + +return: + ret void +} + + +; SI-LABEL: {{^}}infinite_loops: + +; SI: v_mov_b32_e32 [[REG1:v[0-9]+]], 0x3e7 +; SI: s_and_b64 vcc, exec, -1 + +; SI: [[LOOP1:BB[0-9]+_[0-9]+]]: ; %loop1.body +; SI: s_waitcnt lgkmcnt(0) +; SI: buffer_store_dword [[REG1]] +; SI: s_cbranch_vccnz [[LOOP1]] +; SI: s_branch [[RET:BB[0-9]+_[0-9]+]] + +; SI: v_mov_b32_e32 [[REG2:v[0-9]+]], 0x378 +; SI: s_and_b64 vcc, exec, -1 + +; SI: [[LOOP2:BB[0-9]+_[0-9]+]]: ; %loop2.body +; SI: s_waitcnt lgkmcnt(0) +; SI: buffer_store_dword [[REG2]] +; SI: s_cbranch_vccnz [[LOOP2]] + +; SI: [[RET]]: ; %DummyRet +; SI: s_endpgm +define amdgpu_kernel void @infinite_loops(i32 addrspace(1)* %out) { +entry: + br i1 undef, label %loop1.body, label %loop2.body + +loop1.body: + store i32 999, i32 addrspace(1)* %out, align 4 + br label %loop1.body + +loop2.body: + store i32 888, i32 addrspace(1)* %out, align 4 + br label %loop2.body +} Index: test/CodeGen/AMDGPU/nested-loop-conditions.ll =================================================================== --- test/CodeGen/AMDGPU/nested-loop-conditions.ll +++ test/CodeGen/AMDGPU/nested-loop-conditions.ll @@ -70,7 +70,7 @@ ; GCN: [[BB9:BB[0-9]+_[0-9]+]]: ; %bb9 ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-NEXT: s_branch [[BB9]] +; GCN-NEXT: s_cbranch_vccnz [[BB9]] define amdgpu_kernel void @reduced_nested_loop_conditions(i64 addrspace(3)* nocapture %arg) #0 { bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1 Index: test/CodeGen/AMDGPU/si-annotate-cf.ll =================================================================== --- test/CodeGen/AMDGPU/si-annotate-cf.ll +++ test/CodeGen/AMDGPU/si-annotate-cf.ll @@ -96,7 +96,7 @@ ; SI-NEXT: s_cbranch_scc1 [[ENDPGM]] ; SI: [[INFLOOP:BB[0-9]+_[0-9]+]] -; SI: s_branch [[INFLOOP]] +; SI: s_cbranch_vccnz [[INFLOOP]] ; SI: [[ENDPGM]]: ; SI: s_endpgm