Index: lib/Target/AMDGPU/SIInstructions.td =================================================================== --- lib/Target/AMDGPU/SIInstructions.td +++ lib/Target/AMDGPU/SIInstructions.td @@ -1935,9 +1935,9 @@ // replaced with exec mask operations. def SI_MASK_BRANCH : PseudoInstSI < (outs), (ins brtarget:$target, SReg_64:$dst)> { - let isBranch = 1; + let isBranch = 0; let isTerminator = 1; - let isBarrier = 1; + let isBarrier = 0; let SALU = 1; } Index: lib/Target/AMDGPU/SILowerControlFlow.cpp =================================================================== --- lib/Target/AMDGPU/SILowerControlFlow.cpp +++ lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -80,7 +80,7 @@ bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); - void Skip(MachineInstr &From, MachineOperand &To); + MachineInstr *Skip(MachineInstr &From, MachineOperand &To); bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB); void If(MachineInstr &MI); @@ -182,14 +182,15 @@ return false; } -void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) { - +MachineInstr *SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) { if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) - return; + return nullptr; - DebugLoc DL = From.getDebugLoc(); - BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) + const DebugLoc &DL = From.getDebugLoc(); + MachineInstr *Skip = + BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) .addOperand(To); + return Skip; } bool SILowerControlFlow::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) { @@ -242,10 +243,13 @@ .addReg(AMDGPU::EXEC) .addReg(Reg); - Skip(MI, MI.getOperand(2)); + MachineInstr *SkipInst = Skip(MI, MI.getOperand(2)); + + // Insert before the new branch instruction. + MachineInstr *InsPt = SkipInst ? SkipInst : &MI; // Insert a pseudo terminator to help keep the verifier happy. - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) + BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) .addOperand(MI.getOperand(2)) .addReg(Reg); @@ -275,10 +279,13 @@ .addReg(AMDGPU::EXEC) .addReg(Dst); - Skip(MI, MI.getOperand(2)); + MachineInstr *SkipInst = Skip(MI, MI.getOperand(2)); + + // Insert before the new branch instruction. + MachineInstr *InsPt = SkipInst ? SkipInst : &MI; // Insert a pseudo terminator to help keep the verifier happy. - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) + BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) .addOperand(MI.getOperand(2)) .addReg(Dst); Index: test/CodeGen/AMDGPU/convergent-inlineasm.ll =================================================================== --- test/CodeGen/AMDGPU/convergent-inlineasm.ll +++ test/CodeGen/AMDGPU/convergent-inlineasm.ll @@ -4,7 +4,8 @@ ; GCN-LABEL: {{^}}convergent_inlineasm: ; GCN: BB#0: ; GCN: v_cmp_ne_i32_e64 -; GCN: BB#1: +; GCN: ; mask branch +; GCN: BB{{[0-9]+_[0-9]+}}: define void @convergent_inlineasm(i64 addrspace(1)* nocapture %arg) { bb: %tmp = call i32 @llvm.amdgcn.workitem.id.x() @@ -22,9 +23,12 @@ } ; GCN-LABEL: {{^}}nonconvergent_inlineasm: -; GCN: BB#1: +; GCN: ; mask branch + +; GCN: BB{{[0-9]+_[0-9]+}}: ; GCN: v_cmp_ne_i32_e64 -; GCN: BB1_2: + +; GCN: BB{{[0-9]+_[0-9]+}}: define void @nonconvergent_inlineasm(i64 addrspace(1)* nocapture %arg) { bb: %tmp = call i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/skip-if-dead.ll =================================================================== --- test/CodeGen/AMDGPU/skip-if-dead.ll +++ test/CodeGen/AMDGPU/skip-if-dead.ll @@ -202,8 +202,11 @@ ; CHECK: v_cmp_eq_i32_e32 vcc, 0, v0 ; CHECK-NEXT: s_and_saveexec_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], vcc ; CHECK-NEXT: s_xor_b64 [[SAVEEXEC]], exec, [[SAVEEXEC]] -; CHECK-NEXT: s_cbranch_execz [[EXIT:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; mask branch [[EXIT]] +; CHECK-NEXT: ; mask branch [[EXIT:BB[0-9]+_[0-9]+]] +; CHECK-NEXT: s_cbranch_execz [[EXIT]] + +; CHECK: {{BB[0-9]+_[0-9]+}}: ; %bb.preheader +; CHECK: s_mov_b32 ; CHECK: [[LOOP_BB:BB[0-9]+_[0-9]+]]: @@ -354,7 +357,7 @@ ; CHECK: mask branch [[END:BB[0-9]+_[0-9]+]] ; CHECK-NOT: branch -; CHECK: ; BB#3: ; %bb8 +; CHECK: BB{{[0-9]+_[0-9]+}}: ; %bb8 ; CHECK: buffer_store_dword ; CHECK: [[END]]: @@ -387,4 +390,4 @@ declare <4 x float> @llvm.SI.image.sample.c.v4i32(<4 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1 attributes #0 = { nounwind } -attributes #1 = { nounwind readnone } \ No newline at end of file +attributes #1 = { nounwind readnone } Index: test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll =================================================================== --- test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll +++ test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll @@ -5,6 +5,11 @@ ; CHECK-LABEL: {{^}}test1: ; CHECK: v_cmp_ne_i32_e32 vcc, 0 ; CHECK: s_and_saveexec_b64 +; CHECK-NEXT: s_xor_b64 +; CHECK-NEXT: ; mask branch +; CHECK-NEXT: s_cbranch_execz + +; CHECK-NEXT: BB{{[0-9]+_[0-9]+}}: ; %loop_body.preheader ; CHECK: [[LOOP_BODY_LABEL:BB[0-9]+_[0-9]+]]: ; CHECK: s_and_b64 vcc, exec, vcc @@ -30,10 +35,11 @@ ret void } -;CHECK-LABEL: {{^}}test2: -;CHECK: s_and_saveexec_b64 -;CHECK: s_xor_b64 -;CHECK-NEXT: s_cbranch_execz +; CHECK-LABEL: {{^}}test2: +; CHECK: s_and_saveexec_b64 +; CHECK-NEXT: s_xor_b64 +; CHECK-NEXT: ; mask branch +; CHECK-NEXT: s_cbranch_execz define void @test2(i32 addrspace(1)* %out, i32 %a, i32 %b) { main_body: %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 Index: test/CodeGen/AMDGPU/valu-i1.ll =================================================================== --- test/CodeGen/AMDGPU/valu-i1.ll +++ test/CodeGen/AMDGPU/valu-i1.ll @@ -47,7 +47,7 @@ ; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc ; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]] -; SI: ; BB#1 +; SI: BB{{[0-9]+_[0-9]+}}: ; SI: buffer_store_dword ; SI: s_endpgm @@ -68,7 +68,7 @@ ret void } -; SI-LABEL: @simple_test_v_loop +; SI-LABEL: {{^}}simple_test_v_loop: ; SI: v_cmp_ne_i32_e32 vcc, 0, v{{[0-9]+}} ; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc ; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]] @@ -106,7 +106,7 @@ ret void } -; SI-LABEL: @multi_vcond_loop +; SI-LABEL: {{^}}multi_vcond_loop: ; Load loop limit from buffer ; Branch to exit if uniformly not taken @@ -118,7 +118,7 @@ ; SI: s_cbranch_execz [[LABEL_EXIT:BB[0-9]+_[0-9]+]] ; Initialize inner condition to false -; SI: ; BB#1: +; SI: BB{{[0-9]+_[0-9]+}}: ; %bb10.preheader ; SI: s_mov_b64 [[ZERO:s\[[0-9]+:[0-9]+\]]], 0{{$}} ; SI: s_mov_b64 [[COND_STATE:s\[[0-9]+:[0-9]+\]]], [[ZERO]] @@ -133,7 +133,7 @@ ; SI: s_xor_b64 [[ORNEG2]], exec, [[ORNEG2]] ; SI: s_cbranch_execz [[LABEL_FLOW:BB[0-9]+_[0-9]+]] -; SI: BB#3: +; SI: BB{{[0-9]+_[0-9]+}}: ; %bb20 ; SI: buffer_store_dword ; SI: v_cmp_ge_i64_e32 [[CMP:s\[[0-9]+:[0-9]+\]|vcc]] ; SI: s_or_b64 [[TMP:s\[[0-9]+:[0-9]+\]]], [[CMP]], [[COND_STATE]] Index: test/CodeGen/AMDGPU/wqm.ll =================================================================== --- test/CodeGen/AMDGPU/wqm.ll +++ test/CodeGen/AMDGPU/wqm.ll @@ -123,7 +123,7 @@ ;CHECK-NEXT: s_and_b64 [[SAVED]], exec, [[SAVED]] ;CHECK-NEXT: s_xor_b64 exec, exec, [[SAVED]] ;CHECK-NEXT: mask branch [[END_BB:BB[0-9]+_[0-9]+]] -;CHECK-NEXT: ; BB#3: ; %ELSE +;CHECK-NEXT: BB{{[0-9]+_[0-9]+}}: ; %ELSE ;CHECK: store_dword ;CHECK: [[END_BB]]: ; %END ;CHECK: s_or_b64 exec, exec,