Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -1674,6 +1674,16 @@ unsigned CondReg = UseSCCBr ? AMDGPU::SCC : AMDGPU::VCC; SDLoc SL(N); + if (!UseSCCBr) { + // Clear unused bits of vcc. (For the case that we select S_CBRANCH_SCC1 and + // it gets changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, then this S_AND + // gets inserted there.) + Cond = SDValue(CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1, + CurDAG->getRegister(AMDGPU::EXEC, MVT::i1), + Cond), + 0); + } + SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond); CurDAG->SelectNodeTo(N, BrOp, MVT::Other, N->getOperand(2), // Basic Block Index: test/CodeGen/AMDGPU/branch-relaxation.ll =================================================================== --- test/CodeGen/AMDGPU/branch-relaxation.ll +++ test/CodeGen/AMDGPU/branch-relaxation.ll @@ -100,7 +100,8 @@ ; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_vcnd_branch: ; GCN: s_load_dword [[CND:s[0-9]+]] ; GCN-DAG: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]] -; GCN-DAG: v_cmp_eq_f32_e64 vcc, [[CND]], 0 +; GCN-DAG: v_cmp_eq_f32_e64 [[UNMASKED:s\[[0-9]+:[0-9]+\]]], [[CND]], 0 +; GCN-DAG: s_and_b64 vcc, exec, [[UNMASKED]] ; GCN: s_cbranch_vccz [[LONGBB:BB[0-9]+_[0-9]+]] ; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0 @@ -500,8 +501,7 @@ ; GCN: s_setpc_b64 ; GCN: [[LONG_BR_DEST0]] -; GCN: v_cmp_ne_u32_e32 -; GCN-NEXT: s_cbranch_vccz +; GCN: s_cbranch_vccz ; GCN: s_setpc_b64 ; GCN: s_endpgm @@ -520,6 +520,11 @@ br i1 %tmp12, label %bb19, label %bb14 bb13: ; preds = %bb + call void asm sideeffect + "v_nop_e64 + v_nop_e64 + v_nop_e64 + v_nop_e64", ""() #0 br i1 %tmp6, label %bb19, label %bb14 bb14: ; preds = %bb13, %bb9 Index: test/CodeGen/AMDGPU/cf-loop-on-constant.ll =================================================================== --- test/CodeGen/AMDGPU/cf-loop-on-constant.ll +++ test/CodeGen/AMDGPU/cf-loop-on-constant.ll @@ -95,7 +95,7 @@ ; GCN-LABEL: {{^}}loop_arg_0: ; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}} -; GCN: v_cmp_eq_u32_e32 vcc, 1, +; GCN: v_cmp_eq_u32{{[^,]*}}, 1, ; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]] ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80 Index: test/CodeGen/AMDGPU/nested-loop-conditions.ll =================================================================== --- test/CodeGen/AMDGPU/nested-loop-conditions.ll +++ test/CodeGen/AMDGPU/nested-loop-conditions.ll @@ -63,8 +63,7 @@ ; GCN-NEXT: s_cbranch_scc1 ; FIXME: Should fold to unconditional branch? -; GCN: s_mov_b64 vcc, -1 -; GCN-NEXT: ; implicit-def +; GCN: ; implicit-def ; GCN: s_cbranch_vccz ; GCN: ds_read_b32 Index: test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll @@ -0,0 +1,54 @@ +; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=amdgcn -mcpu=gfx800 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s + +; This checks for a bug where uniform control flow can result in multiple +; v_cmp results being combined together with s_and_b64, s_or_b64 and s_xor_b64, +; using the resulting mask in s_cbranch_vccnz +; without ensuring that the resulting mask has bits clear for inactive lanes. +; The problematic case is s_xor_b64, as, unlike the other ops, it can actually +; set bits for inactive lanes. +; +; The check for an s_xor_b64 is just to check that this test tests what it is +; supposed to test. If the s_xor_b64 disappears due to some other case, it does +; not necessarily mean that the bug has reappeared. +; +; The check for "s_and_b64 vcc, exec, something" checks that the bug is fixed. + +; CHECK: {{^}}main: +; CHECK: s_xor_b64 +; CHECK: s_and_b64 vcc, exec, + +define amdgpu_cs void @main(i32 inreg %arg) { +.entry: + %tmp44 = load volatile <2 x float>, <2 x float> addrspace(1)* undef + %tmp16 = load volatile float, float addrspace(1)* undef + %tmp22 = load volatile float, float addrspace(1)* undef + %tmp25 = load volatile float, float addrspace(1)* undef + %tmp31 = fcmp olt float %tmp16, 0x3FA99999A0000000 + br i1 %tmp31, label %bb, label %.exit.thread + +bb: ; preds = %.entry + %tmp42 = fcmp olt float %tmp25, 0x3FA99999A0000000 + br i1 %tmp42, label %bb43, label %.exit.thread + +bb43: + %tmp46 = fcmp olt <2 x float> %tmp44, + %tmp47 = extractelement <2 x i1> %tmp46, i32 0 + %tmp48 = extractelement <2 x i1> %tmp46, i32 1 + %tmp49 = and i1 %tmp47, %tmp48 + br i1 %tmp49, label %bb50, label %.exit.thread + +bb50: + %tmp53 = fcmp olt float %tmp22, 0x3FA99999A0000000 + br i1 %tmp53, label %.exit3.i, label %.exit.thread + +.exit3.i: + store volatile i32 0, i32 addrspace(1)* undef + br label %.exit.thread + +.exit.thread: + ret void +} + Index: test/CodeGen/AMDGPU/select-opt.ll =================================================================== --- test/CodeGen/AMDGPU/select-opt.ll +++ test/CodeGen/AMDGPU/select-opt.ll @@ -134,8 +134,8 @@ } ; GCN-LABEL: {{^}}regression: -; GCN: v_cmp_neq_f32_e64 vcc -; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0 +; GCN: v_cmp_neq_f32_e64 +; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0 ; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}} define amdgpu_kernel void @regression(float addrspace(1)* %out, float %c0, float %c1) #0 { Index: test/CodeGen/AMDGPU/skip-if-dead.ll =================================================================== --- test/CodeGen/AMDGPU/skip-if-dead.ll +++ test/CodeGen/AMDGPU/skip-if-dead.ll @@ -267,7 +267,7 @@ ; CHECK: [[PHIBB]]: ; CHECK: v_cmp_eq_f32_e32 vcc, 0, [[PHIREG]] -; CHECK-NEXT: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]] +; CHECK: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]] ; CHECK: ; %bb10 ; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 9 @@ -302,14 +302,14 @@ ; CHECK-LABEL: {{^}}no_skip_no_successors: ; CHECK: v_cmp_nge_f32 -; CHECK-NEXT: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]] +; CHECK: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]] ; CHECK: ; %bb6 ; CHECK: s_mov_b64 exec, 0 ; CHECK: [[SKIPKILL]]: ; CHECK: v_cmp_nge_f32_e32 vcc -; CHECK-NEXT: %bb.3: ; %bb5 +; CHECK: %bb.3: ; %bb5 ; CHECK-NEXT: .Lfunc_end{{[0-9]+}} define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 { bb: Index: test/CodeGen/AMDGPU/smrd-vccz-bug.ll =================================================================== --- test/CodeGen/AMDGPU/smrd-vccz-bug.ll +++ test/CodeGen/AMDGPU/smrd-vccz-bug.ll @@ -4,7 +4,7 @@ ; GCN-FUNC: {{^}}vccz_workaround: ; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0x0 -; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0{{$}} +; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0{{$}} ; VCCZ-BUG: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; VCCZ-BUG: s_mov_b64 vcc, vcc ; NOVCCZ-BUG-NOT: s_mov_b64 vcc, vcc Index: test/CodeGen/AMDGPU/uniform-cfg.ll =================================================================== --- test/CodeGen/AMDGPU/uniform-cfg.ll +++ test/CodeGen/AMDGPU/uniform-cfg.ll @@ -251,7 +251,7 @@ ; GCN: s_load_dword [[COND:s[0-9]+]] ; GCN: s_cmp_lt_i32 [[COND]], 1 ; GCN: s_cbranch_scc1 [[EXIT:[A-Za-z0-9_]+]] -; GCN: v_cmp_gt_i32_e64 vcc, [[COND]], 0{{$}} +; GCN: v_cmp_gt_i32_e64 {{[^,]*}}, [[COND]], 0{{$}} ; GCN: s_cbranch_vccz [[BODY:[A-Za-z0-9_]+]] ; GCN: {{^}}[[EXIT]]: ; GCN: s_endpgm