diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll --- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll +++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll @@ -1,76 +1,102 @@ -; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX10 %s -; CHECK-LABEL: {{^}}test_kill_depth_0_imm_pos: -; CHECK-NEXT: ; %bb.0: -; CHECK-NEXT: ; %bb.1: -; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 { +; GCN-LABEL: test_kill_depth_0_imm_pos: +; GCN: ; %bb.0: +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 true) ret void } -; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg: -; CHECK-NEXT: ; %bb.0: -; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: s_cbranch_execz BB1_2 -; CHECK-NEXT: ; %bb.1: -; CHECK-NEXT: s_endpgm -; CHECK-NEXT: BB1_2: -; CHECK-NEXT: exp null off, off, off, off done vm -; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 { +; GCN-LABEL: test_kill_depth_0_imm_neg: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b64 exec, 0 +; GCN-NEXT: s_cbranch_execz BB1_2 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_endpgm +; GCN-NEXT: BB1_2: +; GCN-NEXT: exp null off, off, off, off done vm +; GCN-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 false) ret void } -; FIXME: Ideally only one would be emitted -; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg_x2: -; CHECK-NEXT: ; %bb.0: -; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: s_cbranch_execz BB2_3 -; CHECK-NEXT: ; %bb.1: -; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: s_cbranch_execz BB2_3 -; CHECK-NEXT: ; %bb.2: -; CHECK-NEXT: s_endpgm -; CHECK-NEXT: BB2_3: -; CHECK: exp null -; CHECK-NEXT: s_endpgm +; FIXME: Ideally only one early-exit would be emitted define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 { +; GCN-LABEL: test_kill_depth_0_imm_neg_x2: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b64 exec, 0 +; GCN-NEXT: s_cbranch_execz BB2_3 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_mov_b64 exec, 0 +; GCN-NEXT: s_cbranch_execz BB2_3 +; GCN-NEXT: ; %bb.2: +; GCN-NEXT: s_endpgm +; GCN-NEXT: BB2_3: +; GCN-NEXT: exp null off, off, off, off done vm +; GCN-NEXT: s_endpgm call void @llvm.amdgcn.kill(i1 false) call void @llvm.amdgcn.kill(i1 false) ret void } -; CHECK-LABEL: {{^}}test_kill_depth_var: -; CHECK-NEXT: ; %bb.0: -; CHECK-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 -; CHECK-NEXT: s_cbranch_execz BB3_2 -; CHECK-NEXT: ; %bb.1: -; CHECK-NEXT: s_endpgm -; CHECK-NEXT: BB3_2: -; CHECK: exp null -; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var(float %x) #0 { +; SI-LABEL: test_kill_depth_var: +; SI: ; %bb.0: +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_cbranch_execz BB3_2 +; SI-NEXT: ; %bb.1: +; SI-NEXT: s_endpgm +; SI-NEXT: BB3_2: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_kill_depth_var: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0 +; GFX10-NEXT: s_cbranch_execz BB3_2 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB3_2: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm %cmp = fcmp olt float %x, 0.0 call void @llvm.amdgcn.kill(i1 %cmp) ret void } -; FIXME: Ideally only one would be emitted -; CHECK-LABEL: {{^}}test_kill_depth_var_x2_same: -; CHECK-NEXT: ; %bb.0: -; CHECK-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 -; CHECK-NEXT: s_cbranch_execz BB4_3 -; CHECK-NEXT: ; %bb.1: -; CHECK-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 -; CHECK-NEXT: s_cbranch_execz BB4_3 -; CHECK-NEXT: ; %bb.2: -; CHECK-NEXT: s_endpgm -; CHECK-NEXT: BB4_3: -; CHECK: exp null -; CHECK-NEXT: s_endpgm +; FIXME: Ideally only one early-exit would be emitted define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 { +; SI-LABEL: test_kill_depth_var_x2_same: +; SI: ; %bb.0: +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_cbranch_execz BB4_3 +; SI-NEXT: ; %bb.1: +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_cbranch_execz BB4_3 +; SI-NEXT: ; %bb.2: +; SI-NEXT: s_endpgm +; SI-NEXT: BB4_3: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_kill_depth_var_x2_same: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0 +; GFX10-NEXT: s_cbranch_execz BB4_3 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_waitcnt_depctr 0xfffe +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0 +; GFX10-NEXT: s_cbranch_execz BB4_3 +; GFX10-NEXT: ; %bb.2: +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB4_3: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm %cmp = fcmp olt float %x, 0.0 call void @llvm.amdgcn.kill(i1 %cmp) call void @llvm.amdgcn.kill(i1 %cmp) @@ -78,19 +104,33 @@ } ; FIXME: Ideally only one early-exit would be emitted -; CHECK-LABEL: {{^}}test_kill_depth_var_x2: -; CHECK-NEXT: ; %bb.0: -; CHECK-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 -; CHECK-NEXT: s_cbranch_execz BB5_3 -; CHECK-NEXT: ; %bb.1 -; CHECK-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v1 -; CHECK-NEXT: s_cbranch_execz BB5_3 -; CHECK-NEXT: ; %bb.2 -; CHECK-NEXT: s_endpgm -; CHECK-NEXT: BB5_3: -; CHECK: exp null -; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 { +; SI-LABEL: test_kill_depth_var_x2: +; SI: ; %bb.0: +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_cbranch_execz BB5_3 +; SI-NEXT: ; %bb.1: +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v1 +; SI-NEXT: s_cbranch_execz BB5_3 +; SI-NEXT: ; %bb.2: +; SI-NEXT: s_endpgm +; SI-NEXT: BB5_3: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_kill_depth_var_x2: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0 +; GFX10-NEXT: s_cbranch_execz BB5_3 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: s_waitcnt_depctr 0xfffe +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v1 +; GFX10-NEXT: s_cbranch_execz BB5_3 +; GFX10-NEXT: ; %bb.2: +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB5_3: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm %cmp.x = fcmp olt float %x, 0.0 call void @llvm.amdgcn.kill(i1 %cmp.x) %cmp.y = fcmp olt float %y, 0.0 @@ -98,20 +138,39 @@ ret void } -; CHECK-LABEL: {{^}}test_kill_depth_var_x2_instructions: -; CHECK-NEXT: ; %bb.0: -; CHECK-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 -; CHECK-NEXT: s_cbranch_execz BB6_3 -; CHECK-NEXT: ; %bb.1: -; CHECK: v_mov_b32_e64 v7, -1 -; CHECK: v_cmpx_gt_f32_e32 vcc, 0, v7 -; CHECK-NEXT: s_cbranch_execz BB6_3 -; CHECK-NEXT: ; %bb.2: -; CHECK-NEXT: s_endpgm -; CHECK-NEXT: BB6_3: -; CHECK-NEXT: exp null -; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 { +; SI-LABEL: test_kill_depth_var_x2_instructions: +; SI: ; %bb.0: +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 +; SI-NEXT: s_cbranch_execz BB6_3 +; SI-NEXT: ; %bb.1: +; SI-NEXT: ;;#ASMSTART +; SI-NEXT: v_mov_b32_e64 v7, -1 +; SI-NEXT: ;;#ASMEND +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7 +; SI-NEXT: s_cbranch_execz BB6_3 +; SI-NEXT: ; %bb.2: +; SI-NEXT: s_endpgm +; SI-NEXT: BB6_3: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_kill_depth_var_x2_instructions: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0 +; GFX10-NEXT: s_cbranch_execz BB6_3 +; GFX10-NEXT: ; %bb.1: +; GFX10-NEXT: ;;#ASMSTART +; GFX10-NEXT: v_mov_b32_e64 v7, -1 +; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: s_waitcnt_depctr 0xfffe +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7 +; GFX10-NEXT: s_cbranch_execz BB6_3 +; GFX10-NEXT: ; %bb.2: +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB6_3: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm %cmp.x = fcmp olt float %x, 0.0 call void @llvm.amdgcn.kill(i1 %cmp.x) %y = call float asm sideeffect "v_mov_b32_e64 v7, -1", "={v7}"() @@ -121,38 +180,58 @@ } ; FIXME: why does the skip depend on the asm length in the same block? - -; CHECK-LABEL: {{^}}test_kill_control_flow: -; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0 -; CHECK: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] - -; CHECK-NEXT: ; %bb.1: -; CHECK: v_mov_b32_e64 v7, -1 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 - -; CHECK: v_cmpx_gt_f32_e32 vcc, 0, v7 - -; TODO: We could do an early-exit here (the branch above is uniform!) -; CHECK-NOT: exp null - -; CHECK: v_mov_b32_e32 v0, 1.0 define amdgpu_ps float @test_kill_control_flow(i32 inreg %arg) #0 { +; SI-LABEL: test_kill_control_flow: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_cmp_lg_u32 s0, 0 +; SI-NEXT: s_cbranch_scc1 BB7_2 +; SI-NEXT: ; %bb.1: ; %bb +; SI-NEXT: ;;#ASMSTART +; SI-NEXT: v_mov_b32_e64 v7, -1 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: ;;#ASMEND +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7 +; SI-NEXT: BB7_2: ; %exit +; SI-NEXT: v_mov_b32_e32 v0, 1.0 +; SI-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: test_kill_control_flow: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_cmp_lg_u32 s0, 0 +; GFX10-NEXT: s_cbranch_scc1 BB7_2 +; GFX10-NEXT: ; %bb.1: ; %bb +; GFX10-NEXT: ;;#ASMSTART +; GFX10-NEXT: v_mov_b32_e64 v7, -1 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7 +; GFX10-NEXT: BB7_2: ; %exit +; GFX10-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX10-NEXT: ; return to shader part epilog entry: %cmp = icmp eq i32 %arg, 0 br i1 %cmp, label %bb, label %exit bb: - %var = call float asm sideeffect " - v_mov_b32_e64 v7, -1 + %var = call float asm sideeffect "v_mov_b32_e64 v7, -1 v_nop_e64 v_nop_e64 v_nop_e64 @@ -164,6 +243,7 @@ v_nop_e64 v_nop_e64", "={v7}"() %cmp.var = fcmp olt float %var, 0.0 + ; TODO: We could do an early-exit here (the branch above is uniform!) call void @llvm.amdgcn.kill(i1 %cmp.var) br label %exit @@ -171,43 +251,91 @@ ret float 1.0 } -; CHECK-LABEL: {{^}}test_kill_control_flow_remainder: -; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0 -; CHECK-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0 -; CHECK-NEXT: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] - -; CHECK-NEXT: ; %bb.1: ; %bb -; CHECK: v_mov_b32_e64 v7, -1 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: ;;#ASMEND -; CHECK: v_mov_b32_e64 v8, -1 -; CHECK: ;;#ASMEND -; CHECK: v_cmpx_gt_f32_e32 vcc, 0, v7 - -; TODO: We could do an early-exit here (the branch above is uniform!) -; CHECK-NOT: exp null - -; CHECK: buffer_store_dword v8 -; CHECK: v_mov_b32_e64 v9, -2 - -; CHECK: {{^}}BB{{[0-9]+_[0-9]+}}: -; CHECK: buffer_store_dword v9 -; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_control_flow_remainder(i32 inreg %arg) #0 { +; SI-LABEL: test_kill_control_flow_remainder: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_cmp_lg_u32 s0, 0 +; SI-NEXT: v_mov_b32_e32 v9, 0 +; SI-NEXT: s_cbranch_scc1 BB8_3 +; SI-NEXT: ; %bb.1: ; %bb +; SI-NEXT: ;;#ASMSTART +; SI-NEXT: v_mov_b32_e64 v7, -1 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: ;;#ASMEND +; SI-NEXT: ;;#ASMSTART +; SI-NEXT: v_mov_b32_e64 v8, -1 +; SI-NEXT: ;;#ASMEND +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7 +; SI-NEXT: ; %bb.2: ; %bb +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], 0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: ;;#ASMSTART +; SI-NEXT: v_mov_b32_e64 v9, -2 +; SI-NEXT: ;;#ASMEND +; SI-NEXT: BB8_3: ; %exit +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_kill_control_flow_remainder: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: v_mov_b32_e32 v9, 0 +; GFX10-NEXT: s_cmp_lg_u32 s0, 0 +; GFX10-NEXT: s_cbranch_scc0 BB8_2 +; GFX10-NEXT: ; %bb.1: ; %exit +; GFX10-NEXT: global_store_dword v[0:1], v9, off +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB8_2: ; %bb +; GFX10-NEXT: ;;#ASMSTART +; GFX10-NEXT: v_mov_b32_e64 v7, -1 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: ;;#ASMSTART +; GFX10-NEXT: v_mov_b32_e64 v8, -1 +; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7 +; GFX10-NEXT: s_cbranch_execz BB8_4 +; GFX10-NEXT: ; %bb.3: ; %bb +; GFX10-NEXT: s_nop 3 +; GFX10-NEXT: global_store_dword v[0:1], v8, off +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: ;;#ASMSTART +; GFX10-NEXT: v_mov_b32_e64 v9, -2 +; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: global_store_dword v[0:1], v9, off +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB8_4: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm entry: %cmp = icmp eq i32 %arg, 0 br i1 %cmp, label %bb, label %exit bb: - %var = call float asm sideeffect " - v_mov_b32_e64 v7, -1 + %var = call float asm sideeffect "v_mov_b32_e64 v7, -1 v_nop_e64 v_nop_e64 v_nop_e64 @@ -221,6 +349,7 @@ v_nop_e64", "={v7}"() %live.across = call float asm sideeffect "v_mov_b32_e64 v8, -1", "={v8}"() %cmp.var = fcmp olt float %var, 0.0 + ; TODO: We could do an early-exit here (the branch above is uniform!) call void @llvm.amdgcn.kill(i1 %cmp.var) store volatile float %live.across, float addrspace(1)* undef %live.out = call float asm sideeffect "v_mov_b32_e64 v9, -2", "={v9}"() @@ -232,36 +361,70 @@ ret void } -; CHECK-LABEL: {{^}}test_kill_control_flow_return: - -; CHECK: v_cmp_eq_u32_e64 [[KILL_CC:s\[[0-9]+:[0-9]+\]]], s0, 1 -; CHECK: s_and_b64 exec, exec, s[2:3] -; CHECK-NEXT: s_cbranch_execz [[EXIT_BB:BB[0-9]+_[0-9]+]] - -; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0 -; CHECK: s_cbranch_scc0 [[COND_BB:BB[0-9]+_[0-9]+]] -; CHECK: s_branch [[RETURN_BB:BB[0-9]+_[0-9]+]] - -; CHECK: [[COND_BB]]: -; CHECK: v_mov_b32_e64 v7, -1 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_nop_e64 -; CHECK: v_mov_b32_e32 v0, v7 - -; CHECK: [[EXIT_BB]]: -; CHECK-NEXT: exp null -; CHECK-NEXT: s_endpgm - -; CHECK: [[RETURN_BB]]: define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 { +; SI-LABEL: test_kill_control_flow_return: +; SI: ; %bb.0: ; %entry +; SI-NEXT: v_cmp_eq_u32_e64 s[2:3], s0, 1 +; SI-NEXT: s_and_b64 exec, exec, s[2:3] +; SI-NEXT: s_cbranch_execz BB9_4 +; SI-NEXT: ; %bb.1: ; %entry +; SI-NEXT: s_cmp_lg_u32 s0, 0 +; SI-NEXT: v_mov_b32_e32 v0, 0 +; SI-NEXT: s_cbranch_scc0 BB9_3 +; SI-NEXT: ; %bb.2: ; %exit +; SI-NEXT: s_branch BB9_5 +; SI-NEXT: BB9_3: ; %bb +; SI-NEXT: ;;#ASMSTART +; SI-NEXT: v_mov_b32_e64 v7, -1 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: ;;#ASMEND +; SI-NEXT: v_mov_b32_e32 v0, v7 +; SI-NEXT: s_branch BB9_5 +; SI-NEXT: BB9_4: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; SI-NEXT: BB9_5: +; +; GFX10-LABEL: test_kill_control_flow_return: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: v_cmp_eq_u32_e64 s[2:3], s0, 1 +; GFX10-NEXT: s_and_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_cbranch_execz BB9_4 +; GFX10-NEXT: ; %bb.1: ; %entry +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: s_cmp_lg_u32 s0, 0 +; GFX10-NEXT: s_cbranch_scc0 BB9_3 +; GFX10-NEXT: ; %bb.2: ; %exit +; GFX10-NEXT: s_branch BB9_5 +; GFX10-NEXT: BB9_3: ; %bb +; GFX10-NEXT: ;;#ASMSTART +; GFX10-NEXT: v_mov_b32_e64 v7, -1 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: v_mov_b32_e32 v0, v7 +; GFX10-NEXT: s_branch BB9_5 +; GFX10-NEXT: BB9_4: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB9_5: entry: %kill = icmp eq i32 %arg, 1 %cmp = icmp eq i32 %arg, 0 @@ -269,8 +432,7 @@ br i1 %cmp, label %bb, label %exit bb: - %var = call float asm sideeffect " - v_mov_b32_e64 v7, -1 + %var = call float asm sideeffect "v_mov_b32_e64 v7, -1 v_nop_e64 v_nop_e64 v_nop_e64 @@ -288,39 +450,101 @@ ret float %ret } -; CHECK-LABEL: {{^}}test_kill_divergent_loop: -; CHECK: v_cmp_eq_u32_e32 vcc, 0, v0 -; CHECK-NEXT: s_and_saveexec_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], vcc -; CHECK-NEXT: s_xor_b64 [[SAVEEXEC]], exec, [[SAVEEXEC]] -; CHECK-NEXT: s_cbranch_execz [[EXIT:BB[0-9]+_[0-9]+]] - -; CHECK: ; %bb.{{[0-9]+}}: ; %bb.preheader -; CHECK: s_mov_b32 - -; CHECK: [[LOOP_BB:BB[0-9]+_[0-9]+]]: - -; CHECK: v_mov_b32_e64 v7, -1 -; CHECK: v_nop_e64 -; CHECK: v_cmpx_gt_f32_e32 vcc, 0, v7 - -; CHECK-NEXT: ; %bb.3: -; CHECK: buffer_load_dword [[LOAD:v[0-9]+]] -; CHECK: v_cmp_eq_u32_e32 vcc, 0, [[LOAD]] -; CHECK-NEXT: s_and_b64 vcc, exec, vcc -; CHECK-NEXT: s_cbranch_vccnz [[LOOP_BB]] - -; CHECK-NEXT: {{^}}[[EXIT]]: -; CHECK: s_or_b64 exec, exec, [[SAVEEXEC]] -; CHECK: buffer_store_dword -; CHECK: s_endpgm define amdgpu_ps void @test_kill_divergent_loop(i32 %arg) #0 { +; SI-LABEL: test_kill_divergent_loop: +; SI: ; %bb.0: ; %entry +; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc +; SI-NEXT: s_xor_b64 s[0:1], exec, s[0:1] +; SI-NEXT: s_cbranch_execz BB10_4 +; SI-NEXT: ; %bb.1: ; %bb.preheader +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: BB10_2: ; %bb +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: ;;#ASMSTART +; SI-NEXT: v_mov_b32_e64 v7, -1 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: v_nop_e64 +; SI-NEXT: ;;#ASMEND +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7 +; SI-NEXT: ; %bb.3: ; %bb +; SI-NEXT: ; in Loop: Header=BB10_2 Depth=1 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; SI-NEXT: s_and_b64 vcc, exec, vcc +; SI-NEXT: s_cbranch_vccnz BB10_2 +; SI-NEXT: BB10_4: ; %Flow1 +; SI-NEXT: s_or_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execz BB10_6 +; SI-NEXT: ; %bb.5: ; %Flow1 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, 8 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_endpgm +; SI-NEXT: BB10_6: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: test_kill_divergent_loop: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX10-NEXT: s_and_saveexec_b64 s[0:1], vcc +; GFX10-NEXT: s_xor_b64 s[0:1], exec, s[0:1] +; GFX10-NEXT: s_cbranch_execz BB10_3 +; GFX10-NEXT: BB10_1: ; %bb +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: ;;#ASMSTART +; GFX10-NEXT: v_mov_b32_e64 v7, -1 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: v_nop_e64 +; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: s_waitcnt_depctr 0xfffe +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7 +; GFX10-NEXT: ; %bb.2: ; %bb +; GFX10-NEXT: ; in Loop: Header=BB10_1 Depth=1 +; GFX10-NEXT: s_nop 4 +; GFX10-NEXT: global_load_dword v0, v[0:1], off glc dlc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX10-NEXT: s_and_b64 vcc, exec, vcc +; GFX10-NEXT: s_cbranch_vccnz BB10_1 +; GFX10-NEXT: BB10_3: ; %Flow1 +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execz BB10_5 +; GFX10-NEXT: ; %bb.4: ; %Flow1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: global_store_dword v[0:1], v0, off +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB10_5: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm entry: %cmp = icmp eq i32 %arg, 0 br i1 %cmp, label %bb, label %exit bb: - %var = call float asm sideeffect " - v_mov_b32_e64 v7, -1 + %var = call float asm sideeffect "v_mov_b32_e64 v7, -1 v_nop_e64 v_nop_e64 v_nop_e64 @@ -343,29 +567,67 @@ } ; bug 28550 -; CHECK-LABEL: {{^}}phi_use_def_before_kill: -; CHECK: v_cndmask_b32_e64 [[PHIREG:v[0-9]+]], 0, -1.0, -; CHECK: v_cmpx_lt_f32_e32 vcc, 0, -; CHECK-NEXT: s_cbranch_execz [[EXITBB:BB[0-9]+_[0-9]+]] - -; CHECK: ; %[[KILLBB:bb.[0-9]+]]: -; CHECK-NEXT: s_cbranch_scc0 [[PHIBB:BB[0-9]+_[0-9]+]] - -; CHECK: [[PHIBB]]: -; CHECK: v_cmp_eq_f32_e32 vcc, 0, [[PHIREG]] -; CHECK: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]] - -; CHECK: ; %bb10 -; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 9 -; CHECK: buffer_store_dword - -; CHECK: [[ENDBB]]: -; CHECK-NEXT: s_endpgm - -; CHECK: [[EXITBB]]: -; CHECK: exp null -; CHECK-NEXT: s_endpgm define amdgpu_ps void @phi_use_def_before_kill(float inreg %x) #0 { +; SI-LABEL: phi_use_def_before_kill: +; SI: ; %bb.0: ; %bb +; SI-NEXT: v_add_f32_e64 v1, s0, 1.0 +; SI-NEXT: v_cmp_lt_f32_e32 vcc, 0, v1 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; SI-NEXT: v_cmpx_lt_f32_e32 vcc, 0, v1 +; SI-NEXT: s_cbranch_execz BB11_6 +; SI-NEXT: ; %bb.1: ; %bb +; SI-NEXT: s_cbranch_scc0 BB11_3 +; SI-NEXT: ; %bb.2: ; %bb8 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, 8 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_mov_b32_e32 v0, 4.0 +; SI-NEXT: BB11_3: ; %phibb +; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_and_b64 vcc, exec, vcc +; SI-NEXT: s_cbranch_vccz BB11_5 +; SI-NEXT: ; %bb.4: ; %bb10 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, 9 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: BB11_5: ; %end +; SI-NEXT: s_endpgm +; SI-NEXT: BB11_6: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: phi_use_def_before_kill: +; GFX10: ; %bb.0: ; %bb +; GFX10-NEXT: v_add_f32_e64 v1, s0, 1.0 +; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 0, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc +; GFX10-NEXT: v_cmpx_lt_f32_e32 0, v1 +; GFX10-NEXT: s_cbranch_execz BB11_6 +; GFX10-NEXT: ; %bb.1: ; %bb +; GFX10-NEXT: s_cbranch_scc0 BB11_3 +; GFX10-NEXT: ; %bb.2: ; %bb8 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v0, 4.0 +; GFX10-NEXT: s_nop 0 +; GFX10-NEXT: global_store_dword v[0:1], v1, off +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: BB11_3: ; %phibb +; GFX10-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_and_b64 vcc, exec, vcc +; GFX10-NEXT: s_cbranch_vccz BB11_5 +; GFX10-NEXT: ; %bb.4: ; %bb10 +; GFX10-NEXT: v_mov_b32_e32 v0, 9 +; GFX10-NEXT: global_store_dword v[0:1], v0, off +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: BB11_5: ; %end +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB11_6: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm bb: %tmp = fadd float %x, 1.000000e+00 %tmp1 = fcmp olt float 0.000000e+00, %tmp @@ -391,18 +653,35 @@ ret void } -; CHECK-LABEL: {{^}}no_skip_no_successors: -; CHECK: v_cmp_nge_f32 -; CHECK: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]] - -; CHECK: ; %bb6 -; CHECK: s_mov_b64 exec, 0 - -; CHECK: [[SKIPKILL]]: -; CHECK: v_cmp_nge_f32_e32 vcc -; CHECK: %bb.3: ; %bb5 -; CHECK-NEXT: .Lfunc_end{{[0-9]+}} define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 { +; SI-LABEL: no_skip_no_successors: +; SI: ; %bb.0: ; %bb +; SI-NEXT: v_cmp_nge_f32_e64 s[2:3], s1, 0 +; SI-NEXT: s_and_b64 vcc, exec, s[2:3] +; SI-NEXT: s_cbranch_vccz BB12_2 +; SI-NEXT: ; %bb.1: ; %bb6 +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; SI-NEXT: BB12_2: ; %bb3 +; SI-NEXT: v_mov_b32_e32 v0, 0x3e7ae148 +; SI-NEXT: v_cmp_nge_f32_e32 vcc, s0, v0 +; SI-NEXT: s_and_b64 vcc, exec, vcc +; SI-NEXT: ; %bb.3: ; %bb5 +; +; GFX10-LABEL: no_skip_no_successors: +; GFX10: ; %bb.0: ; %bb +; GFX10-NEXT: v_cmp_nge_f32_e64 s[2:3], s1, 0 +; GFX10-NEXT: s_and_b64 vcc, exec, s[2:3] +; GFX10-NEXT: s_cbranch_vccz BB12_2 +; GFX10-NEXT: ; %bb.1: ; %bb6 +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB12_2: ; %bb3 +; GFX10-NEXT: v_cmp_nle_f32_e64 s[0:1], 0x3e7ae148, s0 +; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1] +; GFX10-NEXT: ; %bb.3: ; %bb5 bb: %tmp = fcmp ult float %arg1, 0.000000e+00 %tmp2 = fcmp ult float %arg, 0x3FCF5C2900000000 @@ -425,27 +704,81 @@ ret void } -; CHECK-LABEL: {{^}}if_after_kill_block: -; CHECK: ; %bb.0: -; CHECK: s_and_saveexec_b64 -; CHECK: s_xor_b64 - -; CHECK: v_cmpx_gt_f32_e32 vcc, 0, -; CHECK: BB{{[0-9]+_[0-9]+}}: -; CHECK: s_or_b64 exec, exec -; CHECK: image_sample_c - -; CHECK: v_cmp_neq_f32_e32 vcc, 0, -; CHECK: s_and_saveexec_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc -; CHECK-NEXT: s_cbranch_execz [[END:BB[0-9]+_[0-9]+]] -; CHECK-NOT: branch - -; CHECK: ; %bb.{{[0-9]+}}: ; %bb8 -; CHECK: buffer_store_dword - -; CHECK: [[END]]: -; CHECK: s_endpgm define amdgpu_ps void @if_after_kill_block(float %arg, float %arg1, float %arg2, float %arg3) #0 { +; SI-LABEL: if_after_kill_block: +; SI: ; %bb.0: ; %bb +; SI-NEXT: s_wqm_b64 exec, exec +; SI-NEXT: s_mov_b32 s0, 0 +; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc +; SI-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; SI-NEXT: s_cbranch_execz BB13_2 +; SI-NEXT: ; %bb.1: ; %bb3 +; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0 +; SI-NEXT: BB13_2: ; %bb4 +; SI-NEXT: s_or_b64 exec, exec, s[2:3] +; SI-NEXT: s_cbranch_execz BB13_6 +; SI-NEXT: ; %bb.3: ; %bb4 +; SI-NEXT: s_mov_b32 s1, s0 +; SI-NEXT: s_mov_b32 s2, s0 +; SI-NEXT: s_mov_b32 s3, s0 +; SI-NEXT: s_mov_b32 s4, s0 +; SI-NEXT: s_mov_b32 s5, s0 +; SI-NEXT: s_mov_b32 s6, s0 +; SI-NEXT: s_mov_b32 s7, s0 +; SI-NEXT: image_sample_c v0, v[2:3], s[0:7], s[0:3] dmask:0x10 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc +; SI-NEXT: s_cbranch_execz BB13_5 +; SI-NEXT: ; %bb.4: ; %bb8 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: v_mov_b32_e32 v0, 9 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: BB13_5: ; %UnifiedReturnBlock +; SI-NEXT: s_endpgm +; SI-NEXT: BB13_6: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: if_after_kill_block: +; GFX10: ; %bb.0: ; %bb +; GFX10-NEXT: s_wqm_b64 exec, exec +; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1 +; GFX10-NEXT: s_mov_b32 s0, 0 +; GFX10-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX10-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX10-NEXT: s_cbranch_execz BB13_2 +; GFX10-NEXT: ; %bb.1: ; %bb3 +; GFX10-NEXT: s_waitcnt_depctr 0xfffe +; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0 +; GFX10-NEXT: BB13_2: ; %bb4 +; GFX10-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_cbranch_execz BB13_6 +; GFX10-NEXT: ; %bb.3: ; %bb4 +; GFX10-NEXT: s_mov_b32 s1, s0 +; GFX10-NEXT: s_mov_b32 s2, s0 +; GFX10-NEXT: s_mov_b32 s3, s0 +; GFX10-NEXT: s_mov_b32 s4, s0 +; GFX10-NEXT: s_mov_b32 s5, s0 +; GFX10-NEXT: s_mov_b32 s6, s0 +; GFX10-NEXT: s_mov_b32 s7, s0 +; GFX10-NEXT: image_sample_c v0, v[2:3], s[0:7], s[0:3] dmask:0x10 dim:SQ_RSRC_IMG_1D +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0 +; GFX10-NEXT: s_and_saveexec_b64 s[0:1], vcc +; GFX10-NEXT: s_cbranch_execz BB13_5 +; GFX10-NEXT: ; %bb.4: ; %bb8 +; GFX10-NEXT: v_mov_b32_e32 v0, 9 +; GFX10-NEXT: global_store_dword v[0:1], v0, off +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: BB13_5: ; %UnifiedReturnBlock +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB13_6: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm bb: %tmp = fcmp ult float %arg1, 0.000000e+00 br i1 %tmp, label %bb3, label %bb4 @@ -469,13 +802,108 @@ ret void } -; CHECK-LABEL: {{^}}cbranch_kill: -; CHECK: ; %bb.{{[0-9]+}}: ; %export -; CHECK-NEXT: s_or_b64 -; CHECK-NEXT: s_cbranch_execz [[EXIT:BB[0-9]+_[0-9]+]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: exp null off, off, off, off done vm define amdgpu_ps void @cbranch_kill(i32 inreg %0, <2 x float> %1) { +; SI-LABEL: cbranch_kill: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: s_mov_b32 m0, s0 +; SI-NEXT: s_mov_b32 s4, 0 +; SI-NEXT: v_interp_p1_f32 v2, v0, attr1.x +; SI-NEXT: v_mov_b32_e32 v3, v2 +; SI-NEXT: v_mov_b32_e32 v4, v2 +; SI-NEXT: s_mov_b32 s5, s4 +; SI-NEXT: s_mov_b32 s6, s4 +; SI-NEXT: s_mov_b32 s7, s4 +; SI-NEXT: s_mov_b32 s8, s4 +; SI-NEXT: s_mov_b32 s9, s4 +; SI-NEXT: s_mov_b32 s10, s4 +; SI-NEXT: s_mov_b32 s11, s4 +; SI-NEXT: image_sample_lz v2, v[2:4], s[4:11], s[0:3] dmask:0x1 da +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v2 +; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc +; SI-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; SI-NEXT: s_cbranch_execz BB14_2 +; SI-NEXT: ; %bb.1: ; %kill +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: BB14_2: ; %Flow +; SI-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: s_xor_b64 exec, exec, s[2:3] +; SI-NEXT: ; %bb.3: ; %live +; SI-NEXT: s_mov_b32 m0, s0 +; SI-NEXT: v_interp_p1_f32 v4, v0, attr0.x +; SI-NEXT: v_interp_p1_f32 v0, v0, attr0.y +; SI-NEXT: v_mul_f32_e32 v3, v4, v2 +; SI-NEXT: v_interp_p2_f32 v4, v1, attr0.x +; SI-NEXT: v_mul_f32_e32 v5, v0, v2 +; SI-NEXT: v_interp_p2_f32 v0, v1, attr0.y +; SI-NEXT: v_mul_f32_e32 v4, v4, v2 +; SI-NEXT: v_mul_f32_e32 v6, v0, v2 +; SI-NEXT: ; %bb.4: ; %export +; SI-NEXT: s_or_b64 exec, exec, s[2:3] +; SI-NEXT: s_cbranch_execz BB14_6 +; SI-NEXT: ; %bb.5: ; %export +; SI-NEXT: v_cvt_pkrtz_f16_f32_e32 v0, v3, v4 +; SI-NEXT: v_cvt_pkrtz_f16_f32_e32 v1, v5, v6 +; SI-NEXT: exp mrt0 v0, v0, v1, v1 done compr vm +; SI-NEXT: s_endpgm +; SI-NEXT: BB14_6: +; SI-NEXT: exp null off, off, off, off done vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: cbranch_kill: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: s_mov_b32 m0, s0 +; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: v_interp_p1_f32_e32 v2, v0, attr1.x +; GFX10-NEXT: s_mov_b32 s5, s4 +; GFX10-NEXT: s_mov_b32 s6, s4 +; GFX10-NEXT: s_mov_b32 s7, s4 +; GFX10-NEXT: s_mov_b32 s8, s4 +; GFX10-NEXT: s_mov_b32 s9, s4 +; GFX10-NEXT: s_mov_b32 s10, s4 +; GFX10-NEXT: s_mov_b32 s11, s4 +; GFX10-NEXT: image_sample_lz v2, [v2, v2, v2], s[4:11], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_ge_f32_e32 vcc, 0, v2 +; GFX10-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX10-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX10-NEXT: s_cbranch_execz BB14_2 +; GFX10-NEXT: ; %bb.1: ; %kill +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: BB14_2: ; %Flow +; GFX10-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX10-NEXT: ; implicit-def: $vgpr3 +; GFX10-NEXT: ; implicit-def: $vgpr5 +; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: ; implicit-def: $vgpr6 +; GFX10-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX10-NEXT: ; %bb.3: ; %live +; GFX10-NEXT: s_mov_b32 m0, s0 +; GFX10-NEXT: v_interp_p1_f32_e32 v3, v0, attr0.x +; GFX10-NEXT: v_interp_p1_f32_e32 v0, v0, attr0.y +; GFX10-NEXT: v_mov_b32_e32 v7, v3 +; GFX10-NEXT: v_mov_b32_e32 v11, v0 +; GFX10-NEXT: v_mul_f32_e32 v3, v3, v2 +; GFX10-NEXT: v_mul_f32_e32 v4, v0, v2 +; GFX10-NEXT: v_interp_p2_f32_e32 v7, v1, attr0.x +; GFX10-NEXT: v_interp_p2_f32_e32 v11, v1, attr0.y +; GFX10-NEXT: v_mul_f32_e32 v5, v7, v2 +; GFX10-NEXT: v_mul_f32_e32 v6, v11, v2 +; GFX10-NEXT: ; %bb.4: ; %export +; GFX10-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX10-NEXT: s_cbranch_execz BB14_6 +; GFX10-NEXT: ; %bb.5: ; %export +; GFX10-NEXT: v_cvt_pkrtz_f16_f32_e32 v0, v3, v5 +; GFX10-NEXT: v_cvt_pkrtz_f16_f32_e32 v1, v4, v6 +; GFX10-NEXT: exp mrt0 v0, v0, v1, v1 done compr vm +; GFX10-NEXT: s_endpgm +; GFX10-NEXT: BB14_6: +; GFX10-NEXT: exp null off, off, off, off done vm +; GFX10-NEXT: s_endpgm .entry: %val0 = extractelement <2 x float> %1, i32 0 %val1 = extractelement <2 x float> %1, i32 1 @@ -510,10 +938,74 @@ ret void } -; CHECK-LABEL: {{^}}complex_loop: -; CHECK: s_mov_b64 exec, 0 -; CHECK-NOT: exp null define amdgpu_ps void @complex_loop(i32 inreg %cmpa, i32 %cmpb, i32 %cmpc) { +; SI-LABEL: complex_loop: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: s_cmp_lt_i32 s0, 1 +; SI-NEXT: v_mov_b32_e32 v2, -1 +; SI-NEXT: s_cbranch_scc1 BB15_6 +; SI-NEXT: ; %bb.1: ; %.lr.ph +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_branch BB15_4 +; SI-NEXT: BB15_2: ; %kill +; SI-NEXT: ; in Loop: Header=BB15_4 Depth=1 +; SI-NEXT: s_mov_b64 exec, 0 +; SI-NEXT: BB15_3: ; %latch +; SI-NEXT: ; in Loop: Header=BB15_4 Depth=1 +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_add_i32 s2, s2, 1 +; SI-NEXT: v_cmp_ge_i32_e32 vcc, s2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v2, s2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execz BB15_5 +; SI-NEXT: BB15_4: ; %hdr +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_cmp_gt_u32_e32 vcc, s2, v0 +; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc +; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; SI-NEXT: s_cbranch_execnz BB15_2 +; SI-NEXT: s_branch BB15_3 +; SI-NEXT: BB15_5: ; %Flow +; SI-NEXT: s_or_b64 exec, exec, s[0:1] +; SI-NEXT: BB15_6: ; %._crit_edge +; SI-NEXT: exp mrt0 v2, v2, v0, v0 done compr vm +; SI-NEXT: s_endpgm +; +; GFX10-LABEL: complex_loop: +; GFX10: ; %bb.0: ; %.entry +; GFX10-NEXT: v_mov_b32_e32 v2, -1 +; GFX10-NEXT: s_cmp_lt_i32 s0, 1 +; GFX10-NEXT: s_cbranch_scc1 BB15_6 +; GFX10-NEXT: ; %bb.1: ; %.lr.ph +; GFX10-NEXT: s_mov_b32 s2, 0 +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: s_branch BB15_4 +; GFX10-NEXT: BB15_2: ; %kill +; GFX10-NEXT: ; in Loop: Header=BB15_4 Depth=1 +; GFX10-NEXT: s_mov_b64 exec, 0 +; GFX10-NEXT: BB15_3: ; %latch +; GFX10-NEXT: ; in Loop: Header=BB15_4 Depth=1 +; GFX10-NEXT: s_or_b64 exec, exec, s[4:5] +; GFX10-NEXT: s_add_i32 s2, s2, 1 +; GFX10-NEXT: v_cmp_ge_i32_e32 vcc, s2, v1 +; GFX10-NEXT: v_mov_b32_e32 v2, s2 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execz BB15_5 +; GFX10-NEXT: BB15_4: ; %hdr +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: v_cmp_gt_u32_e32 vcc, s2, v0 +; GFX10-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX10-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; GFX10-NEXT: s_cbranch_execnz BB15_2 +; GFX10-NEXT: s_branch BB15_3 +; GFX10-NEXT: BB15_5: ; %Flow +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: BB15_6: ; %._crit_edge +; GFX10-NEXT: exp mrt0 v2, v2, v0, v0 done compr vm +; GFX10-NEXT: s_endpgm .entry: %flaga = icmp sgt i32 %cmpa, 0 br i1 %flaga, label %.lr.ph, label %._crit_edge @@ -542,12 +1034,31 @@ ret void } -; CHECK-LABEL: {{^}}skip_mode_switch: -; CHECK: s_and_saveexec_b64 -; CHECK-NEXT: s_cbranch_execz -; CHECK: s_setreg_imm32 -; CHECK: s_or_b64 exec, exec define void @skip_mode_switch(i32 %arg) { +; SI-LABEL: skip_mode_switch: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc +; SI-NEXT: s_cbranch_execz BB16_2 +; SI-NEXT: ; %bb.1: ; %bb.0 +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 0, 2), 3 +; SI-NEXT: BB16_2: ; %bb.1 +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: skip_mode_switch: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX10-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GFX10-NEXT: s_cbranch_execz BB16_2 +; GFX10-NEXT: ; %bb.1: ; %bb.0 +; GFX10-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 0, 2), 3 +; GFX10-NEXT: BB16_2: ; %bb.1 +; GFX10-NEXT: s_or_b64 exec, exec, s[4:5] +; GFX10-NEXT: s_setpc_b64 s[30:31] entry: %cmp = icmp eq i32 %arg, 0 br i1 %cmp, label %bb.0, label %bb.1