diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -866,7 +866,7 @@ if (EnableStructurizerWorkarounds) { addPass(createUnifyLoopExitsPass()); } - addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions + addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions } addPass(createSinkingPass()); addPass(createAMDGPUAnnotateUniformValues()); diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll @@ -136,9 +136,10 @@ ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_cmp_lg_u32 s4, 0 ; CHECK-NEXT: s_cselect_b32 s4, 1, 0 +; CHECK-NEXT: s_xor_b32 s4, s4, 1 ; CHECK-NEXT: s_and_b32 s4, s4, 1 ; CHECK-NEXT: s_cmp_lg_u32 s4, 0 -; CHECK-NEXT: s_cbranch_scc1 BB4_6 +; CHECK-NEXT: s_cbranch_scc0 BB4_6 ; CHECK-NEXT: ; %bb.1: ; %bb2 ; CHECK-NEXT: s_getpc_b64 s[6:7] ; CHECK-NEXT: s_add_u32 s6, s6, const.ptr@gotpcrel32@lo+4 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll @@ -7,38 +7,45 @@ define amdgpu_kernel void @localize_constants(i1 %cond) { ; GFX9-LABEL: localize_constants: ; GFX9: ; %bb.0: ; %entry -; GFX9-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX9-NEXT: s_load_dword s1, s[4:5], 0x0 +; GFX9-NEXT: s_mov_b32 s0, 1 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_and_b32 s0, s0, 1 -; GFX9-NEXT: s_cmp_lg_u32 s0, 0 +; GFX9-NEXT: s_xor_b32 s1, s1, 1 +; GFX9-NEXT: s_and_b32 s1, s1, 1 +; GFX9-NEXT: s_cmp_lg_u32 s1, 0 ; GFX9-NEXT: s_cbranch_scc0 BB0_2 -; GFX9-NEXT: ; %bb.1: ; %bb0 -; GFX9-NEXT: v_mov_b32_e32 v0, 0x7b -; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: v_mov_b32_e32 v0, 0x1c8 +; GFX9-NEXT: ; %bb.1: ; %bb1 +; GFX9-NEXT: v_mov_b32_e32 v0, 0x5be6 ; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: v_mov_b32_e32 v0, 0x3e7 +; GFX9-NEXT: v_mov_b32_e32 v0, 0x1c7 ; GFX9-NEXT: global_store_dword v[0:1], v0, off ; GFX9-NEXT: v_mov_b32_e32 v0, 0x3e8 ; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: v_mov_b32_e32 v0, 0x1c7 -; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: v_mov_b32_e32 v0, 0x5be6 +; GFX9-NEXT: v_mov_b32_e32 v0, 0x1c8 ; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: s_endpgm -; GFX9-NEXT: BB0_2: ; %bb1 -; GFX9-NEXT: v_mov_b32_e32 v0, 0x5be6 +; GFX9-NEXT: v_mov_b32_e32 v0, 0x3e7 ; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: v_mov_b32_e32 v0, 0x1c7 +; GFX9-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: v_mov_b32_e32 v0, 0x3e8 +; GFX9-NEXT: BB0_2: ; %Flow +; GFX9-NEXT: s_and_b32 s0, s0, 1 +; GFX9-NEXT: s_cmp_lg_u32 s0, 0 +; GFX9-NEXT: s_cbranch_scc0 BB0_4 +; GFX9-NEXT: ; %bb.3: ; %bb0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0x7b ; GFX9-NEXT: global_store_dword v[0:1], v0, off ; GFX9-NEXT: v_mov_b32_e32 v0, 0x1c8 ; GFX9-NEXT: global_store_dword v[0:1], v0, off ; GFX9-NEXT: v_mov_b32_e32 v0, 0x3e7 ; GFX9-NEXT: global_store_dword v[0:1], v0, off -; GFX9-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX9-NEXT: v_mov_b32_e32 v0, 0x3e8 ; GFX9-NEXT: global_store_dword v[0:1], v0, off +; GFX9-NEXT: v_mov_b32_e32 v0, 0x1c7 +; GFX9-NEXT: global_store_dword v[0:1], v0, off +; GFX9-NEXT: v_mov_b32_e32 v0, 0x5be6 +; GFX9-NEXT: global_store_dword v[0:1], v0, off +; GFX9-NEXT: BB0_4: ; %bb2 ; GFX9-NEXT: s_endpgm entry: br i1 %cond, label %bb0, label %bb1 @@ -75,31 +82,46 @@ define amdgpu_kernel void @localize_globals(i1 %cond) { ; GFX9-LABEL: localize_globals: ; GFX9: ; %bb.0: ; %entry -; GFX9-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX9-NEXT: s_load_dword s1, s[4:5], 0x0 +; GFX9-NEXT: s_mov_b32 s0, 1 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_xor_b32 s1, s1, 1 +; GFX9-NEXT: s_and_b32 s1, s1, 1 +; GFX9-NEXT: s_cmp_lg_u32 s1, 0 +; GFX9-NEXT: s_cbranch_scc0 BB1_2 +; GFX9-NEXT: ; %bb.1: ; %bb1 +; GFX9-NEXT: s_getpc_b64 s[2:3] +; GFX9-NEXT: s_add_u32 s2, s2, gv2@gotpcrel32@lo+4 +; GFX9-NEXT: s_addc_u32 s3, s3, gv2@gotpcrel32@hi+4 +; GFX9-NEXT: s_getpc_b64 s[4:5] +; GFX9-NEXT: s_add_u32 s4, s4, gv3@gotpcrel32@lo+4 +; GFX9-NEXT: s_addc_u32 s5, s5, gv3@gotpcrel32@hi+4 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: global_store_dword v[0:1], v2, off +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v2, 1 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: global_store_dword v[0:1], v2, off +; GFX9-NEXT: BB1_2: ; %Flow ; GFX9-NEXT: s_and_b32 s0, s0, 1 ; GFX9-NEXT: s_cmp_lg_u32 s0, 0 -; GFX9-NEXT: s_cbranch_scc0 BB1_2 -; GFX9-NEXT: ; %bb.1: ; %bb0 +; GFX9-NEXT: s_cbranch_scc0 BB1_4 +; GFX9-NEXT: ; %bb.3: ; %bb0 ; GFX9-NEXT: s_getpc_b64 s[0:1] ; GFX9-NEXT: s_add_u32 s0, s0, gv0@gotpcrel32@lo+4 ; GFX9-NEXT: s_addc_u32 s1, s1, gv0@gotpcrel32@hi+4 -; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX9-NEXT: s_getpc_b64 s[2:3] ; GFX9-NEXT: s_add_u32 s2, s2, gv1@gotpcrel32@lo+4 ; GFX9-NEXT: s_addc_u32 s3, s3, gv1@gotpcrel32@hi+4 -; GFX9-NEXT: s_branch BB1_3 -; GFX9-NEXT: BB1_2: ; %bb1 -; GFX9-NEXT: s_getpc_b64 s[0:1] -; GFX9-NEXT: s_add_u32 s0, s0, gv2@gotpcrel32@lo+4 -; GFX9-NEXT: s_addc_u32 s1, s1, gv2@gotpcrel32@hi+4 -; GFX9-NEXT: v_mov_b32_e32 v2, 0 -; GFX9-NEXT: s_getpc_b64 s[2:3] -; GFX9-NEXT: s_add_u32 s2, s2, gv3@gotpcrel32@lo+4 -; GFX9-NEXT: s_addc_u32 s3, s3, gv3@gotpcrel32@hi+4 -; GFX9-NEXT: BB1_3: ; %bb2 -; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 ; GFX9-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: v_mov_b32_e32 v3, 1 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s0 @@ -108,6 +130,7 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 ; GFX9-NEXT: global_store_dword v[0:1], v3, off +; GFX9-NEXT: BB1_4: ; %bb2 ; GFX9-NEXT: s_endpgm entry: br i1 %cond, label %bb0, label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll --- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll @@ -7,11 +7,11 @@ define amdgpu_kernel void @long_branch_dbg_value(float addrspace(1)* nocapture %arg, float %arg1) #1 !dbg !5 { ; GCN-LABEL: long_branch_dbg_value: -; GCN: BB0_4: ; %bb +; GCN: BB0_5: ; %bb ; GCN-NEXT: ;DEBUG_VALUE: test_debug_value:globalptr_arg <- [DW_OP_plus_uconst 12, DW_OP_stack_value] ; GCN-NEXT: .loc 1 0 42 is_stmt 0 ; /tmp/test_debug_value.cl:0:42 ; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}} -; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], BB0_3-(BB0_4+4) +; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], BB0_4-(BB0_5+4) ; GCN-NEXT: s_addc_u32 s[[PC_HI]], s[[PC_HI]], 0 ; GCN-NEXT: s_setpc_b64 bb: diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll --- a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll @@ -224,7 +224,7 @@ ; GCN-LABEL: {{^}}uniform_unconditional_min_long_forward_branch: ; GCN: s_cmp_eq_u32 -; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]+_[0-9]+]] +; GCN: s_cbranch_scc{{[0-1]}} [[BB2:BB[0-9]+_[0-9]+]] ; GCN-NEXT: [[LONG_JUMP0:BB[0-9]+_[0-9]+]]: ; %bb0 ; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}} @@ -232,24 +232,17 @@ ; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], 0{{$}} ; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC0_LO]]:[[PC0_HI]]{{\]}} -; GCN-NEXT: [[BB2]]: ; %bb2 -; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17 -; GCN: buffer_store_dword [[BB2_K]] - -; GCN-NEXT: [[LONG_JUMP1:BB[0-9]+_[0-9]+]]: ; %bb2 -; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC1_LO:[0-9]+]]:[[PC1_HI:[0-9]+]]{{\]}} -; GCN-NEXT: s_add_u32 s[[PC1_LO]], s[[PC1_LO]], [[BB4:BB[0-9]_[0-9]+]]-([[LONG_JUMP1]]+4) -; GCN-NEXT: s_addc_u32 s[[PC1_HI]], s[[PC1_HI]], 0{{$}} -; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC1_LO]]:[[PC1_HI]]{{\]}} - -; GCN: [[BB3]]: ; %bb3 +; GCN: [[BB2]]: ; %bb3 ; GCN: v_nop_e64 ; GCN: v_nop_e64 ; GCN: v_nop_e64 ; GCN: v_nop_e64 ; GCN: ;;#ASMEND -; GCN-NEXT: [[BB4]]: ; %bb4 +; GCN: [[BB3]]: +; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17 +; GCN: buffer_store_dword [[BB2_K]] + ; GCN: v_mov_b32_e32 [[BB4_K:v[0-9]+]], 63 ; GCN: buffer_store_dword [[BB4_K]] ; GCN-NEXT: s_endpgm @@ -317,23 +310,15 @@ ; GCN-LABEL: {{^}}expand_requires_expand: ; GCN-NEXT: ; %bb.0: ; %bb0 ; GCN: s_load_dword -; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}} -; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]] - -; GCN-NEXT: [[LONGBB0:BB[0-9]+_[0-9]+]]: ; %bb0 +; GCN: {{s|v}}_cmp_lt_i32 +; GCN: s_cbranch -; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}} -; GCN-NEXT: s_add_u32 s[[PC0_LO]], s[[PC0_LO]], [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB0]]+4) -; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], 0{{$}} -; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC0_LO]]:[[PC0_HI]]{{\]}} - -; GCN-NEXT: [[BB1]]: ; %bb1 -; GCN-NEXT: s_load_dword +; GCN: s_load_dword ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_cmp_eq_u32 s{{[0-9]+}}, 3{{$}} -; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]_[0-9]+]] +; GCN-NEXT: v_cmp_{{eq|ne}}_u32_e64 +; GCN: s_cbranch_vccz [[BB2:BB[0-9]_[0-9]+]] -; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]: ; %bb1 +; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]: ; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC1_LO:[0-9]+]]:[[PC1_HI:[0-9]+]]{{\]}} ; GCN-NEXT: s_add_u32 s[[PC1_LO]], s[[PC1_LO]], [[BB3:BB[0-9]+_[0-9]+]]-([[LONGBB1]]+4) ; GCN-NEXT: s_addc_u32 s[[PC1_HI]], s[[PC1_HI]], 0{{$}} @@ -451,7 +436,7 @@ ; GCN: v_nop_e64 ; GCN: v_nop_e64 ; GCN: ;;#ASMEND -; GCN: s_cbranch_vccz [[RET:BB[0-9]+_[0-9]+]] +; GCN: s_cbranch_{{vccz|vccnz}} [[RET:BB[0-9]+_[0-9]+]] ; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop ; GCN-NEXT: ; in Loop: Header=[[LOOP_BODY]] Depth=1 @@ -491,7 +476,7 @@ ; GCN-LABEL: {{^}}long_branch_hang: ; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 6 -; GCN: s_cbranch_scc0 [[LONG_BR_0:BB[0-9]+_[0-9]+]] +; GCN: s_cbranch_scc{{[0-1]}} [[LONG_BR_0:BB[0-9]+_[0-9]+]] ; GCN-NEXT: BB{{[0-9]+_[0-9]+}}: ; GCN: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, [[LONG_BR_DEST0:BB[0-9]+_[0-9]+]]-( @@ -499,14 +484,14 @@ ; GCN-NEXT: s_setpc_b64 ; GCN-NEXT: [[LONG_BR_0]]: -; GCN-DAG: v_cmp_lt_i32 -; GCN-DAG: v_cmp_gt_i32 -; GCN: s_cbranch_vccnz - -; GCN: s_setpc_b64 ; GCN: s_setpc_b64 ; GCN: [[LONG_BR_DEST0]] + +; GCN: s_cbranch_vccnz +; GCN-DAG: v_cmp_lt_i32 +; GCN-DAG: v_cmp_ge_i32 + ; GCN: s_cbranch_vccz ; GCN: s_setpc_b64 diff --git a/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll b/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll --- a/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll @@ -8,8 +8,8 @@ ; ; CHECK-LABEL: {{^}}main: ; CHECK: ; %LOOP49 -; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0 -; CHECK: s_cbranch_scc1 +; CHECK: s_cmp_{{lg|eq}}_u32 s{{[0-9]+}}, 0 +; CHECK: s_cbranch_scc{{[0-1]}} ; CHECK: ; %ENDIF53 define amdgpu_vs float @main(i32 %in) { main_body: diff --git a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll --- a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll +++ b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll @@ -102,7 +102,7 @@ ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80 ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4 -; GCN: s_cbranch_vccnz [[LOOPBB]] +; GCN: s_cbranch_{{vccz|vccnz}} [[LOOPBB]] ; GCN-NEXT: ; %bb.2 ; GCN-NEXT: s_endpgm define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n) nounwind { diff --git a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll --- a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll +++ b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll @@ -27,13 +27,12 @@ ; GCN-LABEL: {{^}}sink_ubfe_i32: ; GCN-NOT: lshr -; GCN: s_cbranch_scc1 +; GCN: s_cbranch_scc{{[0-1]}} -; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80008 -; GCN: BB0_2: ; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70008 - ; GCN: BB0_3: +; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80008 + ; GCN: buffer_store_dword ; GCN: s_endpgm define amdgpu_kernel void @sink_ubfe_i32(i32 addrspace(1)* %out, i32 %arg1) #0 { @@ -122,16 +121,15 @@ ; GCN-NOT: lshr ; VI: s_load_dword [[ARG:s[0-9]+]], s[0:1], 0x2c ; VI: s_bfe_u32 [[BFE:s[0-9]+]], [[ARG]], 0xc0004 -; GCN: s_cbranch_scc1 +; GCN: s_cbranch_scc{{[0-1]}} -; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80004 -; VI: v_mov_b32_e32 v{{[0-9]+}}, 0xff - -; GCN: BB2_2: ; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70004 ; VI: v_mov_b32_e32 v{{[0-9]+}}, 0x7f ; GCN: BB2_3: +; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80004 +; VI: v_mov_b32_e32 v{{[0-9]+}}, 0xff + ; GCN: buffer_store_short ; GCN: s_endpgm define amdgpu_kernel void @sink_ubfe_i16(i16 addrspace(1)* %out, i16 %arg1) #0 { @@ -177,14 +175,13 @@ ; GCN-LABEL: {{^}}sink_ubfe_i64_span_midpoint: +; GCN: s_cbranch_scc{{[0-1]}} BB3_2 ; GCN: v_alignbit_b32 v[[LO:[0-9]+]], s{{[0-9]+}}, v{{[0-9]+}}, 30 -; GCN: s_cbranch_scc1 BB3_2 -; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]] - -; GCN: BB3_2: ; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x7f, v[[LO]] ; GCN: BB3_3: +; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]] + ; GCN: buffer_store_dwordx2 define amdgpu_kernel void @sink_ubfe_i64_span_midpoint(i64 addrspace(1)* %out, i64 %arg1) #0 { entry: @@ -226,14 +223,13 @@ ; GCN-LABEL: {{^}}sink_ubfe_i64_low32: -; GCN: s_cbranch_scc1 BB4_2 +; GCN: s_cbranch_scc{{[0-1]}} BB4_2 -; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x8000f - -; GCN: BB4_2: ; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x7000f ; GCN: BB4_3: +; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x8000f + ; GCN: buffer_store_dwordx2 define amdgpu_kernel void @sink_ubfe_i64_low32(i64 addrspace(1)* %out, i64 %arg1) #0 { entry: @@ -274,13 +270,12 @@ ; OPT: ret ; GCN-LABEL: {{^}}sink_ubfe_i64_high32: -; GCN: s_cbranch_scc1 BB5_2 -; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80003 - -; GCN: BB5_2: +; GCN: s_cbranch_scc{{[0-1]}} BB5_2 ; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70003 ; GCN: BB5_3: +; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80003 + ; GCN: buffer_store_dwordx2 define amdgpu_kernel void @sink_ubfe_i64_high32(i64 addrspace(1)* %out, i64 %arg1) #0 { entry: diff --git a/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll b/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll --- a/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll +++ b/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll @@ -89,7 +89,7 @@ } ; GCN-LABEL: {{^}}divergent_loop: -; VGPR: workitem_private_segment_byte_size = 12{{$}} +; VGPR: workitem_private_segment_byte_size = 16{{$}} ; GCN: {{^}}; %bb.0: @@ -123,9 +123,10 @@ ; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[0:3], 0 offset:[[LOAD0_OFFSET]] ; 4-byte Folded Reload ; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}}, v[[VAL_LOOP_RELOAD]] ; GCN: s_cmp_lg_u32 -; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], 0 offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill +; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], 0 offset:{{[0-9]+}} ; 4-byte Folded Spill ; GCN-NEXT: s_cbranch_scc1 [[LOOP]] +; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], 0 offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill ; GCN: [[END]]: ; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]] diff --git a/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll b/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll --- a/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll +++ b/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll @@ -15,8 +15,8 @@ ; GCN: s_mov_b64 exec ; GCN: s_or_b64 exec, exec -; GCN: s_cmp_eq_u32 -; GCN: s_cbranch_scc1 +; GCN: {{[s|v]}}_cmp_eq_u32 +; GCN: s_cbranch ; GCN-NEXT: s_branch define amdgpu_kernel void @copytoreg_divergent_brcond(i32 %arg, i32 %arg1, i32 %arg2) #0 { bb: diff --git a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll --- a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll +++ b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll @@ -178,18 +178,18 @@ ; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 1 ; GCN-NEXT: s_and_b64 vcc, exec, s[4:5] ; GCN-NEXT: s_mov_b32 s32, 0 -; GCN-NEXT: s_cbranch_vccz BB4_2 -; GCN-NEXT: ; %bb.1: -; GCN-NEXT: s_mov_b32 s4, 0 -; GCN-NEXT: s_mov_b32 s5, s4 -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NEXT: s_branch BB4_3 -; GCN-NEXT: BB4_2: ; %if.else +; GCN-NEXT: s_cbranch_vccnz BB4_2 +; GCN-NEXT: ; %bb.1: ; %if.else ; GCN-NEXT: s_getpc_b64 s[4:5] ; GCN-NEXT: s_add_u32 s4, s4, func_v3i16@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s5, s5, func_v3i16@rel32@hi+4 ; GCN-NEXT: s_swappc_b64 s[30:31], s[4:5] +; GCN-NEXT: s_branch BB4_3 +; GCN-NEXT: BB4_2: +; GCN-NEXT: s_mov_b32 s4, 0 +; GCN-NEXT: s_mov_b32 s5, s4 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, s5 ; GCN-NEXT: BB4_3: ; %if.end ; GCN-NEXT: global_store_short v[0:1], v1, off ; GCN-NEXT: global_store_dword v[0:1], v0, off @@ -223,18 +223,18 @@ ; GCN-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 1 ; GCN-NEXT: s_and_b64 vcc, exec, s[4:5] ; GCN-NEXT: s_mov_b32 s32, 0 -; GCN-NEXT: s_cbranch_vccz BB5_2 -; GCN-NEXT: ; %bb.1: -; GCN-NEXT: s_mov_b32 s4, 0 -; GCN-NEXT: s_mov_b32 s5, s4 -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NEXT: s_branch BB5_3 -; GCN-NEXT: BB5_2: ; %if.else +; GCN-NEXT: s_cbranch_vccnz BB5_2 +; GCN-NEXT: ; %bb.1: ; %if.else ; GCN-NEXT: s_getpc_b64 s[4:5] ; GCN-NEXT: s_add_u32 s4, s4, func_v3f16@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s5, s5, func_v3f16@rel32@hi+4 ; GCN-NEXT: s_swappc_b64 s[30:31], s[4:5] +; GCN-NEXT: s_branch BB5_3 +; GCN-NEXT: BB5_2: +; GCN-NEXT: s_mov_b32 s4, 0 +; GCN-NEXT: s_mov_b32 s5, s4 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, s5 ; GCN-NEXT: BB5_3: ; %if.end ; GCN-NEXT: global_store_short v[0:1], v1, off ; GCN-NEXT: global_store_dword v[0:1], v0, off diff --git a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll --- a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll +++ b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll @@ -30,7 +30,6 @@ ; GCN: v_cmp_neq_f32_e32 vcc, 1.0, [[VAL]] ; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[VAL]], [[VAL]] ; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[VAL]], [[VAL]] -; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], [[ADD]], [[MUL]], vcc ; GCN: buffer_store_dword [[RESULT]] define amdgpu_kernel void @test_vccnz_ifcvt_diamond(float addrspace(1)* %out, float addrspace(1)* %in) #0 { entry: diff --git a/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll b/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll --- a/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll +++ b/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll @@ -4,11 +4,11 @@ ; GCN: ; %entry ; GCN: s_cmp_eq_u32 s0, 0 -; GCN: s_cbranch_scc1 [[PREEXIT:BB[0-9_]+]] +; GCN: s_cbranch_scc1 [[EXIT:BB[0-9_]+]] ; GCN: ; %blocka ; GCN: s_cmp_eq_u32 s1, 0 -; GCN: s_cbranch_scc1 [[EXIT:BB[0-9_]+]] +; GCN: s_cbranch_scc1 [[PREEXIT:BB[0-9_]+]] ; GCN: [[PREEXIT]]: ; GCN: [[EXIT]]: diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll --- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll +++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll @@ -630,6 +630,7 @@ ; GCN-LABEL: {{^}}broken_phi_bb: ; GCN: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8 +; GCN: {{BB[0-9]+_[0-9]+}}: ; GCN: [[BB2:BB[0-9]+_[0-9]+]]: ; GCN: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]] ; GCN: buffer_load_dword @@ -645,7 +646,7 @@ ; GCN: {{^; %bb.[0-9]}}: ; GCN: s_mov_b64 exec, -; GCN: s_branch [[BB2]] +; GCN: s_cbranch_vccnz [[BB2]] define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) #0 { bb: diff --git a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll --- a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll +++ b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll @@ -77,28 +77,36 @@ ; SI-LABEL: infinite_loops: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: s_mov_b64 s[2:3], -1 +; SI-NEXT: s_cbranch_scc1 BB2_4 +; SI-NEXT: ; %bb.1: ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: s_cbranch_scc0 BB2_3 -; SI-NEXT: ; %bb.1: ; %loop1.preheader -; SI-NEXT: v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT: v_mov_b32_e32 v0, 0x378 ; SI-NEXT: s_and_b64 vcc, exec, -1 -; SI-NEXT: BB2_2: ; %loop1 -; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: BB2_2: +; SI: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_cbranch_vccnz BB2_2 -; SI-NEXT: s_branch BB2_5 -; SI-NEXT: BB2_3: -; SI-NEXT: v_mov_b32_e32 v0, 0x378 -; SI-NEXT: s_and_b64 vcc, exec, -1 -; SI-NEXT: BB2_4: ; %loop2 -; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: ; %bb.3: +; SI-NEXT: s_mov_b64 s[2:3], 0 +; SI-NEXT: BB2_4: +; SI-NEXT: s_and_b64 vcc, exec, s[2:3] ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 -; SI-NEXT: s_cbranch_vccnz BB2_4 -; SI-NEXT: BB2_5: ; %DummyReturnBlock +; SI-NEXT: s_mov_b64 vcc, vcc +; SI-NEXT: s_cbranch_vccz BB2_7 +; SI-NEXT: ; %bb.5: +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT: s_and_b64 vcc, exec, 0 +; SI-NEXT: BB2_6: +; SI: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: s_cbranch_vccz BB2_6 +; SI-NEXT: BB2_7: ; SI-NEXT: s_endpgm + ; IR-LABEL: @infinite_loops( ; IR-NEXT: entry: ; IR-NEXT: br i1 undef, label [[LOOP1:%.*]], label [[LOOP2:%.*]] diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll --- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll +++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll @@ -1334,10 +1334,19 @@ ; SI-NEXT: s_cbranch_scc0 BB26_2 ; SI-NEXT: ; %bb.1: ; %else ; SI-NEXT: s_load_dword s1, s[6:7], 0x1 -; SI-NEXT: s_branch BB26_3 -; SI-NEXT: BB26_2: ; %if +; SI-NEXT: s_mov_b64 s[2:3], 0 +; SI-NEXT: s_andn2_b64 vcc, exec, s[2:3] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 vcc, vcc +; SI-NEXT: s_cbranch_vccz BB26_3 +; SI-NEXT: s_branch BB26_4 +; SI-NEXT: BB26_2: +; SI-NEXT: s_mov_b64 s[2:3], -1 +; SI-NEXT: s_andn2_b64 vcc, exec, s[2:3] +; SI-NEXT: s_cbranch_vccnz BB26_4 +; SI-NEXT: BB26_3: ; %if ; SI-NEXT: s_load_dword s1, s[6:7], 0x0 -; SI-NEXT: BB26_3: ; %endif +; SI-NEXT: BB26_4: ; %endif ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s0 ; SI-NEXT: s_mov_b32 s7, 0x100f000 @@ -1353,12 +1362,20 @@ ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_cmp_lg_u32 s0, 0 ; VI-NEXT: s_cbranch_scc0 BB26_2 -; VI-NEXT: ; %bb.1: ; %else +; VI-NEXT: ; %bb.1: ; %else ; VI-NEXT: s_load_dword s1, s[6:7], 0x4 -; VI-NEXT: s_branch BB26_3 -; VI-NEXT: BB26_2: ; %if +; VI-NEXT: s_mov_b64 s[2:3], 0 +; VI-NEXT: s_andn2_b64 vcc, exec, s[2:3] +; VI-NEXT: s_cbranch_vccz BB26_3 +; VI-NEXT: s_branch BB26_4 +; VI-NEXT: BB26_2: +; VI-NEXT: s_mov_b64 s[2:3], -1 +; VI-NEXT: s_andn2_b64 vcc, exec, s[2:3] +; VI-NEXT: s_cbranch_vccnz BB26_4 +; VI-NEXT: BB26_3: ; %if +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_load_dword s1, s[6:7], 0x0 -; VI-NEXT: BB26_3: ; %endif +; VI-NEXT: BB26_4: ; %endif ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: s_mov_b32 s7, 0x1100f000 diff --git a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll --- a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll +++ b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll @@ -31,10 +31,10 @@ ; GCN-NEXT: BB0_3: ; %bb8 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: ds_read_b32 v0, v0 -; GCN-NEXT: s_and_b64 vcc, exec, -1 +; GCN-NEXT: s_and_b64 vcc, exec, 0 ; GCN-NEXT: BB0_4: ; %bb9 ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 -; GCN-NEXT: s_cbranch_vccnz BB0_4 +; GCN-NEXT: s_cbranch_vccz BB0_4 ; GCN-NEXT: BB0_5: ; %DummyReturnBlock ; GCN-NEXT: s_endpgm ; IR-LABEL: @reduced_nested_loop_conditions( @@ -144,33 +144,39 @@ ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_cmp_lt_i32_e32 vcc, 8, v0 ; GCN-NEXT: s_and_b64 vcc, exec, vcc -; GCN-NEXT: s_cbranch_vccnz BB1_5 +; GCN-NEXT: s_cbranch_vccnz BB1_6 + ; GCN-NEXT: ; %bb.1: ; %bb14.lr.ph ; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 -; GCN-NEXT: BB1_2: ; %bb14 +; GCN-NEXT: s_branch BB1_3 +; GCN-NEXT: BB1_2: ; in Loop: Header=BB1_3 Depth=1 +; GCN-NEXT: s_mov_b64 s[0:1], -1 +; GCN-NEXT: ; implicit-def: $vgpr0 +; GCN-NEXT: s_cbranch_execnz BB1_6 +; GCN-NEXT: BB1_3: ; %bb14 ; GCN-NEXT: ; =>This Loop Header: Depth=1 -; GCN-NEXT: ; Child Loop BB1_3 Depth 2 +; GCN-NEXT: ; Child Loop BB1_4 Depth 2 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0 ; GCN-NEXT: s_and_b64 vcc, exec, vcc -; GCN-NEXT: s_cbranch_vccnz BB1_5 -; GCN-NEXT: BB1_3: ; %bb18 -; GCN-NEXT: ; Parent Loop BB1_2 Depth=1 -; GCN-NEXT: ; => This Inner Loop Header: Depth=2 +; GCN-NEXT: s_cbranch_vccnz BB1_2 +; GCN-NEXT: BB1_4: ; %bb18 +; GCN-NEXT: ; Parent Loop BB1_3 Depth=1 +; GCN-NEXT: ; => This Inner Loop Header: Depth=2 ; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_cmp_lt_i32_e32 vcc, 8, v0 ; GCN-NEXT: s_and_b64 vcc, exec, vcc -; GCN-NEXT: s_cbranch_vccnz BB1_3 -; GCN-NEXT: ; %bb.4: ; %bb21 -; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1 +; GCN-NEXT: s_cbranch_vccnz BB1_4 +; GCN-NEXT: ; %bb.5: ; %bb21 +; GCN-NEXT: ; in Loop: Header=BB1_3 Depth=1 ; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 ; GCN-NEXT: buffer_load_dword v1, off, s[0:3], 0 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 9, v1 -; GCN-NEXT: s_and_b64 vcc, exec, vcc -; GCN-NEXT: s_cbranch_vccnz BB1_2 -; GCN-NEXT: BB1_5: ; %bb31 +; GCN-NEXT: v_cmp_lt_i32_e64 s[0:1], 8, v1 +; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-NEXT: s_cbranch_vccz BB1_3 +; GCN-NEXT: BB1_6: ; %bb31 ; GCN-NEXT: v_mov_b32_e32 v0, 0 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GCN-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll --- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll +++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll @@ -35,12 +35,19 @@ } ; GCN-LABEL: {{^}}negated_cond_dominated_blocks: -; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]], -; GCN: %bb4 +; GCN: v_cmp_ne_u32_e64 [[CC1:[^,]+]], +; GCN: s_branch [[BB1:BB[0-9]+_[0-9]+]] +; GCN: [[BB0:BB[0-9]+_[0-9]+]] ; GCN-NOT: v_cndmask_b32 ; GCN-NOT: v_cmp -; GCN: s_andn2_b64 vcc, exec, [[CC]] -; GCN: s_cbranch_vccnz BB1_1 +; GCN: [[BB1]]: +; GCN: s_mov_b64 [[CC2:[^,]+]], -1 +; GCN: s_mov_b64 vcc, [[CC1]] +; GCN: s_cbranch_vccz [[BB2:BB[0-9]+_[0-9]+]] +; GCN: s_mov_b64 [[CC2]], 0 +; GCN: [[BB2]]: +; GCN: s_andn2_b64 vcc, exec, [[CC2]] +; GCN: s_cbranch_vccnz [[BB0]] define amdgpu_kernel void @negated_cond_dominated_blocks(i32 addrspace(1)* %arg1) { bb: br label %bb2 diff --git a/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll b/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll --- a/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll +++ b/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll @@ -55,10 +55,10 @@ ; GCN-LABEL: {{^}}smrd_valu: ; SI: s_movk_i32 [[OFFSET:s[0-9]+]], 0x2ee0 -; SI: s_mov_b32 ; GCN: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}} ; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}} -; SI: s_nop 3 +; SI: s_mov_b32 +; SI: s_nop 1 ; SI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, [[OFFSET]] ; CI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xbb8 diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll --- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll @@ -144,72 +144,63 @@ ; GCN-IR-LABEL: s_test_sdiv: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-IR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN-IR-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0xd ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 ; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_ashr_i32 s8, s1, 31 -; GCN-IR-NEXT: s_xor_b64 s[6:7], s[2:3], s[6:7] -; GCN-IR-NEXT: s_sub_u32 s10, s6, s2 +; GCN-IR-NEXT: s_ashr_i32 s8, s11, 31 +; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[6:7] +; GCN-IR-NEXT: s_sub_u32 s0, s0, s2 ; GCN-IR-NEXT: s_mov_b32 s9, s8 -; GCN-IR-NEXT: s_subb_u32 s11, s7, s2 -; GCN-IR-NEXT: s_xor_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_sub_u32 s6, s0, s8 -; GCN-IR-NEXT: s_subb_u32 s7, s1, s8 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[10:11], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[6:7], 0 +; GCN-IR-NEXT: s_subb_u32 s1, s1, s2 +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[10:11] +; GCN-IR-NEXT: s_sub_u32 s6, s6, s8 +; GCN-IR-NEXT: s_flbit_i32_b32 s14, s6 +; GCN-IR-NEXT: s_subb_u32 s7, s7, s8 +; GCN-IR-NEXT: s_add_i32 s14, s14, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s15, s7 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 +; GCN-IR-NEXT: s_flbit_i32_b32 s14, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 -; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[0:1] -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s10 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s11 +; GCN-IR-NEXT: s_add_i32 s14, s14, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s15, s1 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 +; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s1, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[12:13], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB0_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[12:13] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[12:13] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[10:11], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[14:15], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[0:1], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[12:13], s[10:11], -1 +; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[0:1], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[10:11], v4 ; GCN-IR-NEXT: s_add_u32 s10, s6, -1 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[0:1], v6 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: s_addc_u32 s11, s7, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_5: ; %udiv-do-while +; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -234,20 +225,30 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB0_5 -; GCN-IR-NEXT: BB0_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: s_branch BB0_6 +; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[10:11] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[10:11] +; GCN-IR-NEXT: s_branch BB0_7 +; GCN-IR-NEXT: BB0_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB0_6: ; %Flow6 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB0_7: ; %udiv-end +; GCN-IR-NEXT: BB0_7: ; %Flow7 ; GCN-IR-NEXT: s_xor_b64 s[0:1], s[8:9], s[2:3] ; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s1 ; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 +; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 -; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = sdiv i64 %x, %y @@ -1006,82 +1007,71 @@ ; ; GCN-IR-LABEL: s_test_sdiv24_48: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases +; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 ; GCN-IR-NEXT: s_load_dword s2, s[0:1], 0xb ; GCN-IR-NEXT: s_load_dword s3, s[0:1], 0xc -; GCN-IR-NEXT: s_load_dword s4, s[0:1], 0xd -; GCN-IR-NEXT: s_load_dword s5, s[0:1], 0xe +; GCN-IR-NEXT: s_load_dword s6, s[0:1], 0xd +; GCN-IR-NEXT: s_load_dword s0, s[0:1], 0xe ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 -; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[2:3], 24 +; GCN-IR-NEXT: s_sext_i32_i16 s7, s0 +; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[2:3], 24 ; GCN-IR-NEXT: s_ashr_i32 s2, s3, 31 -; GCN-IR-NEXT: s_sext_i32_i16 s5, s5 ; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_ashr_i32 s6, s5, 31 -; GCN-IR-NEXT: s_ashr_i64 s[12:13], s[4:5], 24 -; GCN-IR-NEXT: s_xor_b64 s[4:5], s[2:3], s[8:9] -; GCN-IR-NEXT: s_sub_u32 s10, s4, s2 +; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 24 +; GCN-IR-NEXT: s_ashr_i32 s6, s7, 31 +; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1] +; GCN-IR-NEXT: s_sub_u32 s0, s0, s2 ; GCN-IR-NEXT: s_mov_b32 s7, s6 -; GCN-IR-NEXT: s_subb_u32 s11, s5, s2 -; GCN-IR-NEXT: s_xor_b64 s[4:5], s[6:7], s[12:13] -; GCN-IR-NEXT: s_sub_u32 s8, s4, s6 -; GCN-IR-NEXT: s_subb_u32 s9, s5, s6 -; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s8 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s10 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 +; GCN-IR-NEXT: s_subb_u32 s1, s1, s2 +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[6:7], s[8:9] +; GCN-IR-NEXT: s_sub_u32 s8, s8, s6 +; GCN-IR-NEXT: s_flbit_i32_b32 s14, s8 +; GCN-IR-NEXT: s_subb_u32 s9, s9, s6 +; GCN-IR-NEXT: s_add_i32 s14, s14, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s15, s9 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 +; GCN-IR-NEXT: s_flbit_i32_b32 s14, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s11 +; GCN-IR-NEXT: s_add_i32 s14, s14, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s15, s1 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s11, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s15 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s14 +; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s1, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[10:11], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[12:13], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_mov_b64 vcc, vcc -; GCN-IR-NEXT: s_cbranch_vccz BB9_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[12:13] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s10 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[12:13] -; GCN-IR-NEXT: s_branch BB9_7 -; GCN-IR-NEXT: BB9_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[10:11], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[14:15], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[0:1], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] +; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[12:13], s[10:11], -1 +; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13] ; GCN-IR-NEXT: s_cbranch_vccz BB9_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB9_6 -; GCN-IR-NEXT: BB9_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[0:1], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB9_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[10:11], v4 ; GCN-IR-NEXT: s_add_u32 s10, s8, -1 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[0:1], v6 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: s_addc_u32 s11, s9, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB9_5: ; %udiv-do-while +; GCN-IR-NEXT: BB9_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -1106,12 +1096,22 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB9_5 -; GCN-IR-NEXT: BB9_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB9_3 +; GCN-IR-NEXT: s_branch BB9_6 +; GCN-IR-NEXT: BB9_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[10:11] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[10:11] +; GCN-IR-NEXT: s_branch BB9_7 +; GCN-IR-NEXT: BB9_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB9_6: ; %Flow3 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 -; GCN-IR-NEXT: BB9_7: ; %udiv-end +; GCN-IR-NEXT: BB9_7: ; %Flow4 ; GCN-IR-NEXT: s_xor_b64 s[0:1], s[6:7], s[2:3] ; GCN-IR-NEXT: v_xor_b32_e32 v0, s0, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s1, v1 @@ -1262,64 +1262,56 @@ ; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 ; GCN-IR-NEXT: s_mov_b32 s3, s2 ; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[6:7] -; GCN-IR-NEXT: s_sub_u32 s8, s0, s2 -; GCN-IR-NEXT: s_subb_u32 s9, s1, s2 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s8 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 +; GCN-IR-NEXT: s_sub_u32 s6, s0, s2 +; GCN-IR-NEXT: s_subb_u32 s7, s1, s2 +; GCN-IR-NEXT: s_flbit_i32_b32 s8, s6 +; GCN-IR-NEXT: s_add_i32 s8, s8, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s9, s7 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s9 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s8 +; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[0:1], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[10:11], s[0:1] -; GCN-IR-NEXT: s_mov_b32 s6, -1 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB10_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[10:11] -; GCN-IR-NEXT: s_branch BB10_7 -; GCN-IR-NEXT: BB10_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 0xffffffc5, v2 +; GCN-IR-NEXT: v_addc_u32_e64 v4, s[8:9], 0, -1, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_cbranch_vccz BB10_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB10_6 -; GCN-IR-NEXT: BB10_4: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s7, s8, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-IR-NEXT: v_addc_u32_e32 v6, vcc, 0, v4, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v3 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[3:4] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB10_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader +; GCN-IR-NEXT: s_add_u32 s8, s6, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s10, s9, -1 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v5 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: s_addc_u32 s9, s7, -1 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB10_5: ; %udiv-do-while +; GCN-IR-NEXT: BB10_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 ; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s10 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s7, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s9 +; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s8, v6 ; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc ; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s8, v8 +; GCN-IR-NEXT: v_and_b32_e32 v10, s6, v8 ; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s9, v8 +; GCN-IR-NEXT: v_and_b32_e32 v11, s7, v8 ; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 ; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 ; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc @@ -1331,8 +1323,16 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB10_5 -; GCN-IR-NEXT: BB10_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB10_3 +; GCN-IR-NEXT: s_branch BB10_6 +; GCN-IR-NEXT: BB10_4: +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB10_7 +; GCN-IR-NEXT: BB10_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB10_6: ; %Flow5 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -1341,8 +1341,9 @@ ; GCN-IR-NEXT: v_xor_b32_e32 v1, s3, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 ; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = sdiv i64 24, %x diff --git a/llvm/test/CodeGen/AMDGPU/setcc.ll b/llvm/test/CodeGen/AMDGPU/setcc.ll --- a/llvm/test/CodeGen/AMDGPU/setcc.ll +++ b/llvm/test/CodeGen/AMDGPU/setcc.ll @@ -397,9 +397,9 @@ } ; FUNC-LABEL: setcc-i1-and-xor -; GCN-DAG: v_cmp_nge_f32_e64 [[A:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}} -; GCN-DAG: v_cmp_nle_f32_e64 [[B:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0 -; GCN: s_or_b64 s[2:3], [[A]], [[B]] +; GCN-DAG: v_cmp_ge_f32_e64 [[A:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}} +; GCN-DAG: v_cmp_le_f32_e64 [[B:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0 +; GCN: s_and_b64 s[2:3], [[A]], [[B]] define amdgpu_kernel void @setcc-i1-and-xor(i32 addrspace(1)* %out, float %cond) #0 { bb0: %tmp5 = fcmp oge float %cond, 0.000000e+00 diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll --- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll +++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll @@ -19,10 +19,18 @@ ; SI-NEXT: s_cbranch_scc0 BB0_2 ; SI-NEXT: ; %bb.1: ; %else ; SI-NEXT: s_add_i32 s0, s11, s0 -; SI-NEXT: s_branch BB0_3 -; SI-NEXT: BB0_2: ; %if +; SI-NEXT: s_mov_b64 s[2:3], 0 +; SI-NEXT: s_andn2_b64 vcc, exec, s[2:3] +; SI-NEXT: s_cbranch_vccz BB0_3 +; SI-NEXT: s_branch BB0_4 +; SI-NEXT: BB0_2: +; SI-NEXT: s_mov_b64 s[2:3], -1 +; SI-NEXT: ; implicit-def: $sgpr0 +; SI-NEXT: s_andn2_b64 vcc, exec, s[2:3] +; SI-NEXT: s_cbranch_vccnz BB0_4 +; SI-NEXT: BB0_3: ; %if ; SI-NEXT: s_sub_i32 s0, s9, s10 -; SI-NEXT: BB0_3: ; %endif +; SI-NEXT: BB0_4: ; %endif ; SI-NEXT: s_add_i32 s0, s0, s8 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 @@ -58,15 +66,25 @@ ; SI-NEXT: s_cbranch_scc0 BB1_2 ; SI-NEXT: ; %bb.1: ; %else ; SI-NEXT: s_load_dword s3, s[0:1], 0x2e -; SI-NEXT: s_load_dword s0, s[0:1], 0x37 -; SI-NEXT: s_branch BB1_3 -; SI-NEXT: BB1_2: ; %if +; SI-NEXT: s_load_dword s6, s[0:1], 0x37 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_add_i32 s3, s3, s6 +; SI-NEXT: s_mov_b64 s[6:7], 0 +; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7] +; SI-NEXT: s_cbranch_vccz BB1_3 +; SI-NEXT: s_branch BB1_4 +; SI-NEXT: BB1_2: +; SI-NEXT: s_mov_b64 s[6:7], -1 +; SI-NEXT: ; implicit-def: $sgpr3 +; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7] +; SI-NEXT: s_cbranch_vccnz BB1_4 +; SI-NEXT: BB1_3: ; %if ; SI-NEXT: s_load_dword s3, s[0:1], 0x1c ; SI-NEXT: s_load_dword s0, s[0:1], 0x25 -; SI-NEXT: BB1_3: ; %endif ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_add_i32 s0, s3, s0 -; SI-NEXT: s_add_i32 s0, s0, s2 +; SI-NEXT: s_add_i32 s3, s3, s0 +; SI-NEXT: BB1_4: ; %endif +; SI-NEXT: s_add_i32 s0, s3, s2 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: v_mov_b32_e32 v0, s0 diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll b/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll --- a/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll +++ b/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll @@ -211,14 +211,14 @@ ; an assertion failure. ; CHECK-LABEL: {{^}}sample_v3: -; CHECK: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 11 -; CHECK: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 13 +; CHECK: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 5 +; CHECK: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 7 ; CHECK: s_branch -; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 5 -; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 7 - ; CHECK: BB{{[0-9]+_[0-9]+}}: +; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 11 +; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 13 + ; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[SAMPLE_LO]]:[[SAMPLE_HI]]{{\]}} ; CHECK: exp ; CHECK: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll --- a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll +++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll @@ -200,10 +200,10 @@ ; SI-NEXT: s_and_b64 vcc, exec, vcc ; SI-NEXT: s_cbranch_vccz BB3_8 ; SI-NEXT: ; %bb.6: ; %for.body -; SI-NEXT: s_and_b64 vcc, exec, -1 +; SI-NEXT: s_and_b64 vcc, exec, 0 ; SI-NEXT: BB3_7: ; %self.loop ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_cbranch_vccnz BB3_7 +; SI-NEXT: s_cbranch_vccz BB3_7 ; SI-NEXT: BB3_8: ; %DummyReturnBlock ; SI-NEXT: s_endpgm ; @@ -246,10 +246,10 @@ ; FLAT-NEXT: s_and_b64 vcc, exec, vcc ; FLAT-NEXT: s_cbranch_vccz BB3_8 ; FLAT-NEXT: ; %bb.6: ; %for.body -; FLAT-NEXT: s_and_b64 vcc, exec, -1 +; FLAT-NEXT: s_and_b64 vcc, exec, 0 ; FLAT-NEXT: BB3_7: ; %self.loop ; FLAT-NEXT: ; =>This Inner Loop Header: Depth=1 -; FLAT-NEXT: s_cbranch_vccnz BB3_7 +; FLAT-NEXT: s_cbranch_vccz BB3_7 ; FLAT-NEXT: BB3_8: ; %DummyReturnBlock ; FLAT-NEXT: s_endpgm entry: diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll --- a/llvm/test/CodeGen/AMDGPU/srem64.ll +++ b/llvm/test/CodeGen/AMDGPU/srem64.ll @@ -130,59 +130,50 @@ ; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 -; GCN-IR-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9] -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 ; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB0_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[8:9] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[8:9] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[10:11], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 ; GCN-IR-NEXT: s_add_u32 s8, s2, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v6 ; GCN-IR-NEXT: s_addc_u32 s9, s3, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_5: ; %udiv-do-while +; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -207,8 +198,18 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB0_5 -; GCN-IR-NEXT: BB0_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: s_branch BB0_6 +; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB0_7 +; GCN-IR-NEXT: BB0_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB0_6: ; %Flow6 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -217,16 +218,14 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s11, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s10, -1 -; GCN-IR-NEXT: s_mov_b32 s8, s4 -; GCN-IR-NEXT: s_mov_b32 s9, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 %x, %y store i64 %result, i64 addrspace(1)* %out @@ -1027,82 +1026,73 @@ ; GCN-IR-NEXT: s_ashr_i32 s2, s7, 31 ; GCN-IR-NEXT: s_ashr_i64 s[10:11], s[0:1], 31 ; GCN-IR-NEXT: s_ashr_i32 s0, s1, 31 -; GCN-IR-NEXT: s_mov_b32 s1, s0 ; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 31 ; GCN-IR-NEXT: s_mov_b32 s3, s2 +; GCN-IR-NEXT: s_mov_b32 s1, s0 ; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[2:3] -; GCN-IR-NEXT: s_xor_b64 s[10:11], s[10:11], s[0:1] -; GCN-IR-NEXT: s_sub_u32 s8, s6, s2 -; GCN-IR-NEXT: s_subb_u32 s9, s7, s2 -; GCN-IR-NEXT: s_sub_u32 s10, s10, s0 -; GCN-IR-NEXT: s_subb_u32 s11, s11, s0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[10:11], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[8:9], 0 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s11, 0 -; GCN-IR-NEXT: s_or_b64 s[6:7], s[0:1], s[6:7] -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s10 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s11 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s8 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s9 -; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[10:11], s[0:1] +; GCN-IR-NEXT: s_sub_u32 s6, s6, s2 +; GCN-IR-NEXT: s_subb_u32 s7, s7, s2 +; GCN-IR-NEXT: s_sub_u32 s8, s8, s0 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s8 +; GCN-IR-NEXT: s_subb_u32 s9, s9, s0 +; GCN-IR-NEXT: s_add_i32 s12, s12, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s9 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 +; GCN-IR-NEXT: s_add_i32 s12, s12, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s7 +; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc +; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 +; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[6:7], s[6:7], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[6:7], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB8_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s9 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[6:7] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[6:7] -; GCN-IR-NEXT: s_branch BB8_7 -; GCN-IR-NEXT: BB8_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[8:9], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[12:13], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[8:9], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[10:11], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[10:11], s[10:11], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_vccz BB8_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB8_6 -; GCN-IR-NEXT: BB8_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB8_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: s_add_u32 s6, s10, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v4 +; GCN-IR-NEXT: s_add_u32 s10, s8, -1 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s7, s11, -1 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v6 +; GCN-IR-NEXT: s_addc_u32 s11, s9, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB8_5: ; %udiv-do-while +; GCN-IR-NEXT: BB8_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 ; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s6, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s11 +; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s10, v6 ; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc ; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 -; GCN-IR-NEXT: v_and_b32_e32 v10, s10, v8 +; GCN-IR-NEXT: v_and_b32_e32 v10, s8, v8 ; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8 -; GCN-IR-NEXT: v_and_b32_e32 v11, s11, v8 +; GCN-IR-NEXT: v_and_b32_e32 v11, s9, v8 ; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4 ; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1 ; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc @@ -1114,28 +1104,38 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB8_5 -; GCN-IR-NEXT: BB8_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB8_3 +; GCN-IR-NEXT: s_branch BB8_6 +; GCN-IR-NEXT: BB8_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB8_7 +; GCN-IR-NEXT: BB8_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB8_6: ; %Flow6 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 ; GCN-IR-NEXT: BB8_7: ; %udiv-end -; GCN-IR-NEXT: v_mul_lo_u32 v1, s10, v1 -; GCN-IR-NEXT: v_mul_hi_u32 v2, s10, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v3, s11, v0 -; GCN-IR-NEXT: v_mul_lo_u32 v0, s10, v0 -; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: v_mul_lo_u32 v1, s8, v1 +; GCN-IR-NEXT: v_mul_hi_u32 v2, s8, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v3, s9, v0 +; GCN-IR-NEXT: v_mul_lo_u32 v0, s8, v0 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s9 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc ; GCN-IR-NEXT: v_xor_b32_e32 v0, s2, v0 ; GCN-IR-NEXT: v_xor_b32_e32 v1, s3, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 ; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %1 = ashr i64 %x, 31 @@ -1185,82 +1185,71 @@ ; ; GCN-IR-LABEL: s_test_srem24_48: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases +; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 ; GCN-IR-NEXT: s_load_dword s2, s[0:1], 0xb ; GCN-IR-NEXT: s_load_dword s3, s[0:1], 0xc -; GCN-IR-NEXT: s_load_dword s4, s[0:1], 0xd -; GCN-IR-NEXT: s_load_dword s5, s[0:1], 0xe +; GCN-IR-NEXT: s_load_dword s6, s[0:1], 0xd +; GCN-IR-NEXT: s_load_dword s0, s[0:1], 0xe ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 -; GCN-IR-NEXT: s_ashr_i64 s[6:7], s[2:3], 24 -; GCN-IR-NEXT: s_sext_i32_i16 s5, s5 +; GCN-IR-NEXT: s_sext_i32_i16 s7, s0 +; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[2:3], 24 ; GCN-IR-NEXT: s_ashr_i32 s2, s3, 31 -; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[4:5], 24 -; GCN-IR-NEXT: s_ashr_i32 s4, s5, 31 +; GCN-IR-NEXT: s_ashr_i32 s10, s7, 31 ; GCN-IR-NEXT: s_mov_b32 s3, s2 -; GCN-IR-NEXT: s_mov_b32 s5, s4 -; GCN-IR-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3] -; GCN-IR-NEXT: s_xor_b64 s[8:9], s[8:9], s[4:5] -; GCN-IR-NEXT: s_sub_u32 s6, s6, s2 -; GCN-IR-NEXT: s_subb_u32 s7, s7, s2 -; GCN-IR-NEXT: s_sub_u32 s8, s8, s4 -; GCN-IR-NEXT: s_subb_u32 s9, s9, s4 -; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s8 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 +; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 24 +; GCN-IR-NEXT: s_mov_b32 s11, s10 +; GCN-IR-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[8:9], s[10:11] +; GCN-IR-NEXT: s_sub_u32 s6, s0, s2 +; GCN-IR-NEXT: s_subb_u32 s7, s1, s2 +; GCN-IR-NEXT: s_sub_u32 s8, s8, s10 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s8 +; GCN-IR-NEXT: s_subb_u32 s9, s9, s10 +; GCN-IR-NEXT: s_add_i32 s12, s12, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s9 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 +; GCN-IR-NEXT: s_flbit_i32_b32 s12, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 +; GCN-IR-NEXT: s_add_i32 s12, s12, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s13, s7 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s13 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s12 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[10:11], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_mov_b64 vcc, vcc -; GCN-IR-NEXT: s_cbranch_vccz BB9_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[10:11] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[10:11] -; GCN-IR-NEXT: s_branch BB9_7 -; GCN-IR-NEXT: BB9_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[12:13], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[8:9], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[10:11], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[10:11], s[10:11], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[10:11] ; GCN-IR-NEXT: s_cbranch_vccz BB9_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB9_6 -; GCN-IR-NEXT: BB9_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB9_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 ; GCN-IR-NEXT: s_add_u32 s10, s8, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v6 ; GCN-IR-NEXT: s_addc_u32 s11, s9, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB9_5: ; %udiv-do-while +; GCN-IR-NEXT: BB9_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -1285,8 +1274,18 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB9_5 -; GCN-IR-NEXT: BB9_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB9_3 +; GCN-IR-NEXT: s_branch BB9_6 +; GCN-IR-NEXT: BB9_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB9_7 +; GCN-IR-NEXT: BB9_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB9_6: ; %Flow3 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -1445,57 +1444,49 @@ ; GCN-IR-NEXT: s_xor_b64 s[2:3], s[6:7], s[0:1] ; GCN-IR-NEXT: s_sub_u32 s2, s2, s0 ; GCN-IR-NEXT: s_subb_u32 s3, s3, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s2 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: s_flbit_i32_b32 s6, s2 +; GCN-IR-NEXT: s_add_i32 s6, s6, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s7, s3 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s6 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[0:1], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_mov_b32 s6, -1 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB10_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[8:9] -; GCN-IR-NEXT: s_branch BB10_7 -; GCN-IR-NEXT: BB10_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 0xffffffc5, v2 +; GCN-IR-NEXT: v_addc_u32_e64 v4, s[6:7], 0, -1, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_xor_b64 s[6:7], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[6:7], s[6:7], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[6:7] ; GCN-IR-NEXT: s_cbranch_vccz BB10_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB10_6 -; GCN-IR-NEXT: BB10_4: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s7, s2, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-IR-NEXT: v_addc_u32_e32 v6, vcc, 0, v4, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v3 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[3:4] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB10_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader +; GCN-IR-NEXT: s_add_u32 s6, s2, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s8, s3, -1 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v5 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB10_5: ; %udiv-do-while +; GCN-IR-NEXT: BB10_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 ; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s8 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s7, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 +; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s6, v6 ; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc ; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 ; GCN-IR-NEXT: v_and_b32_e32 v10, s2, v8 @@ -1512,8 +1503,16 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB10_5 -; GCN-IR-NEXT: BB10_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB10_3 +; GCN-IR-NEXT: s_branch BB10_6 +; GCN-IR-NEXT: BB10_4: +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB10_7 +; GCN-IR-NEXT: BB10_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB10_6: ; %Flow5 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -1527,6 +1526,7 @@ ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc +; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = srem i64 24, %x diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll --- a/llvm/test/CodeGen/AMDGPU/udiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll @@ -131,59 +131,50 @@ ; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 -; GCN-IR-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9] -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 ; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB0_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[8:9] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[8:9] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[10:11], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 -; GCN-IR-NEXT: s_add_u32 s6, s2, -1 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v6 +; GCN-IR-NEXT: s_add_u32 s6, s2, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_5: ; %udiv-do-while +; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -208,8 +199,18 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB0_5 -; GCN-IR-NEXT: BB0_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: s_branch BB0_6 +; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB0_7 +; GCN-IR-NEXT: BB0_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB0_6: ; %Flow6 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -825,76 +826,65 @@ ; ; GCN-IR-LABEL: s_test_udiv24_i48: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases +; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 ; GCN-IR-NEXT: s_load_dword s2, s[0:1], 0xb ; GCN-IR-NEXT: s_load_dword s3, s[0:1], 0xc ; GCN-IR-NEXT: s_load_dword s6, s[0:1], 0xd -; GCN-IR-NEXT: s_load_dword s5, s[0:1], 0xe -; GCN-IR-NEXT: s_mov_b32 s4, 0xffff -; GCN-IR-NEXT: s_mov_b32 s7, 0xff000000 +; GCN-IR-NEXT: s_load_dword s7, s[0:1], 0xe +; GCN-IR-NEXT: s_mov_b32 s8, 0xffff +; GCN-IR-NEXT: s_mov_b32 s9, 0xff000000 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_and_b32 s3, s3, s4 -; GCN-IR-NEXT: s_and_b32 s2, s2, s7 -; GCN-IR-NEXT: s_and_b32 s5, s5, s4 -; GCN-IR-NEXT: s_and_b32 s4, s6, s7 -; GCN-IR-NEXT: s_lshr_b64 s[6:7], s[2:3], 24 -; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[4:5], 24 -; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s2 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 +; GCN-IR-NEXT: s_and_b32 s1, s3, s8 +; GCN-IR-NEXT: s_and_b32 s0, s2, s9 +; GCN-IR-NEXT: s_and_b32 s3, s7, s8 +; GCN-IR-NEXT: s_and_b32 s2, s6, s9 +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 +; GCN-IR-NEXT: s_lshr_b64 s[0:1], s[0:1], 24 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s1 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 -; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 +; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s1, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_mov_b64 vcc, vcc -; GCN-IR-NEXT: s_cbranch_vccz BB7_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[8:9] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[8:9] -; GCN-IR-NEXT: s_branch BB7_7 -; GCN-IR-NEXT: BB7_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[10:11], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[0:1], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9] +; GCN-IR-NEXT: s_or_b64 s[6:7], s[6:7], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[6:7], -1 +; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_cbranch_vccz BB7_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB7_6 -; GCN-IR-NEXT: BB7_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[0:1], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB7_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 ; GCN-IR-NEXT: s_add_u32 s6, s2, -1 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[0:1], v6 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 ; GCN-IR-NEXT: s_addc_u32 s7, s3, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB7_5: ; %udiv-do-while +; GCN-IR-NEXT: BB7_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -919,8 +909,18 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB7_5 -; GCN-IR-NEXT: BB7_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB7_3 +; GCN-IR-NEXT: s_branch BB7_6 +; GCN-IR-NEXT: BB7_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[6:7] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[6:7] +; GCN-IR-NEXT: s_branch BB7_7 +; GCN-IR-NEXT: BB7_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB7_6: ; %Flow3 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -1053,58 +1053,50 @@ ; GCN-IR-LABEL: s_test_udiv_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-IR-NEXT: s_mov_b32 s2, -1 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 +; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 +; GCN-IR-NEXT: s_add_i32 s2, s2, 32 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[0:1], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB8_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[8:9] -; GCN-IR-NEXT: s_branch BB8_7 -; GCN-IR-NEXT: BB8_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 0xffffffc5, v2 +; GCN-IR-NEXT: v_addc_u32_e64 v4, s[2:3], 0, -1, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] ; GCN-IR-NEXT: s_cbranch_vccz BB8_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB8_6 -; GCN-IR-NEXT: BB8_4: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s3, s6, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-IR-NEXT: v_addc_u32_e32 v6, vcc, 0, v4, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v3 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[3:4] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB8_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader +; GCN-IR-NEXT: s_add_u32 s2, s6, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s8, s7, -1 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v5 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: s_addc_u32 s3, s7, -1 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB8_5: ; %udiv-do-while +; GCN-IR-NEXT: BB8_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 ; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s8 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s3, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 +; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s2, v6 ; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc ; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 ; GCN-IR-NEXT: v_and_b32_e32 v10, s6, v8 @@ -1121,14 +1113,22 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB8_5 -; GCN-IR-NEXT: BB8_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB8_3 +; GCN-IR-NEXT: s_branch BB8_6 +; GCN-IR-NEXT: BB8_4: +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB8_7 +; GCN-IR-NEXT: BB8_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB8_6: ; %Flow5 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 ; GCN-IR-NEXT: BB8_7: ; %udiv-end ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s6, s2 +; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = udiv i64 24, %x @@ -1534,48 +1534,39 @@ ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 +; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 +; GCN-IR-NEXT: s_add_i32 s2, s2, 32 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 59, v2 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[2:3], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[2:3], s[2:3], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB11_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[2:3] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[2:3] -; GCN-IR-NEXT: s_branch BB11_7 -; GCN-IR-NEXT: BB11_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v3, vcc, 59, v2 +; GCN-IR-NEXT: v_subb_u32_e64 v4, s[2:3], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] ; GCN-IR-NEXT: s_cbranch_vccz BB11_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB11_6 -; GCN-IR-NEXT: BB11_4: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v3 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffc4, v2 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-IR-NEXT: v_addc_u32_e32 v6, vcc, 0, v4, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v3 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[3:4] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB11_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v5 +; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffc4, v2 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], 0, -1, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB11_5: ; %udiv-do-while +; GCN-IR-NEXT: BB11_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -1598,8 +1589,18 @@ ; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB11_5 -; GCN-IR-NEXT: BB11_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB11_3 +; GCN-IR-NEXT: s_branch BB11_6 +; GCN-IR-NEXT: BB11_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB11_7 +; GCN-IR-NEXT: BB11_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB11_6: ; %Flow5 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 diff --git a/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll b/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll --- a/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll +++ b/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll @@ -39,7 +39,6 @@ } ; COMMON-LABEL: {{^}}branch_false: -; SI: s_cbranch_vccnz ; SI: s_cbranch_scc1 ; SI: s_endpgm define amdgpu_kernel void @branch_false(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 { diff --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll --- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll +++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -structurizecfg-skip-uniform-regions -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -structurizecfg-skip-uniform-regions -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s ; GCN-LABEL: {{^}}uniform_if_scc: ; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 0 diff --git a/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll b/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll --- a/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll +++ b/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll @@ -8,7 +8,7 @@ ; CHECK-NEXT: s_cbranch_execz BB{{[0-9]+_[0-9]+}} ; CHECK: [[LOOP_BODY_LABEL:BB[0-9]+_[0-9]+]]: ; %loop_body -; CHECK: s_cbranch_scc0 [[LOOP_BODY_LABEL]] +; CHECK: s_cbranch_scc1 [[LOOP_BODY_LABEL]] ; CHECK: s_endpgm define amdgpu_ps void @test1(<8 x i32> inreg %rsrc, <2 x i32> %addr.base, i32 %y, i32 %p) { diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll --- a/llvm/test/CodeGen/AMDGPU/urem64.ll +++ b/llvm/test/CodeGen/AMDGPU/urem64.ll @@ -130,59 +130,50 @@ ; GCN-IR-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) ; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 ; GCN-IR-NEXT: s_flbit_i32_b32 s10, s2 -; GCN-IR-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9] -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 ; GCN-IR-NEXT: s_add_i32 s10, s10, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s3 ; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 +; GCN-IR-NEXT: s_flbit_i32_b32 s10, s6 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 +; GCN-IR-NEXT: s_add_i32 s10, s10, 32 +; GCN-IR-NEXT: s_flbit_i32_b32 s11, s7 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s11 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s10 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v2, v3 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB0_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[8:9] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[8:9] -; GCN-IR-NEXT: s_branch BB0_7 -; GCN-IR-NEXT: BB0_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v2, v3 +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[2:3], 0 +; GCN-IR-NEXT: v_subb_u32_e64 v5, s[10:11], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5] +; GCN-IR-NEXT: s_xor_b64 s[8:9], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[8:9] ; GCN-IR-NEXT: s_cbranch_vccz BB0_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB0_6 -; GCN-IR-NEXT: BB0_4: ; %udiv-preheader +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4 +; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v5, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v4 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[4:5] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB0_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_not_b32_e32 v2, v2 ; GCN-IR-NEXT: s_add_u32 s8, s2, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v4 ; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, v2, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v6 ; GCN-IR-NEXT: s_addc_u32 s9, s3, -1 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], -1, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB0_5: ; %udiv-do-while +; GCN-IR-NEXT: BB0_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -207,8 +198,18 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB0_5 -; GCN-IR-NEXT: BB0_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB0_3 +; GCN-IR-NEXT: s_branch BB0_6 +; GCN-IR-NEXT: BB0_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB0_7 +; GCN-IR-NEXT: BB0_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB0_6: ; %Flow6 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -217,16 +218,14 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, s2, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v3, s3, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v0, s2, v0 -; GCN-IR-NEXT: s_mov_b32 s11, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s10, -1 -; GCN-IR-NEXT: s_mov_b32 s8, s4 -; GCN-IR-NEXT: s_mov_b32 s9, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 %x, %y store i64 %result, i64 addrspace(1)* %out @@ -861,58 +860,50 @@ ; GCN-IR-LABEL: s_test_urem_k_num_i64: ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-IR-NEXT: s_mov_b32 s2, -1 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 +; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 +; GCN-IR-NEXT: s_add_i32 s2, s2, 32 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc5, v2 -; GCN-IR-NEXT: v_addc_u32_e64 v1, s[0:1], 0, -1, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB6_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[8:9] -; GCN-IR-NEXT: s_branch BB6_7 -; GCN-IR-NEXT: BB6_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 0xffffffc5, v2 +; GCN-IR-NEXT: v_addc_u32_e64 v4, s[2:3], 0, -1, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] ; GCN-IR-NEXT: s_cbranch_vccz BB6_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB6_6 -; GCN-IR-NEXT: BB6_4: ; %udiv-preheader -; GCN-IR-NEXT: s_add_u32 s3, s6, -1 -; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v3 -; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-IR-NEXT: v_addc_u32_e32 v6, vcc, 0, v4, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v3 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[3:4] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], 24, v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB6_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader +; GCN-IR-NEXT: s_add_u32 s2, s6, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 -; GCN-IR-NEXT: s_addc_u32 s8, s7, -1 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v5 +; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 58, v2 +; GCN-IR-NEXT: s_addc_u32 s3, s7, -1 ; GCN-IR-NEXT: v_subb_u32_e64 v5, s[0:1], 0, 0, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB6_5: ; %udiv-do-while +; GCN-IR-NEXT: BB6_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2 ; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s8 -; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s3, v6 +; GCN-IR-NEXT: v_mov_b32_e32 v2, s3 +; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, s2, v6 ; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, v2, v7, vcc ; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2 ; GCN-IR-NEXT: v_and_b32_e32 v10, s6, v8 @@ -929,8 +920,16 @@ ; GCN-IR-NEXT: v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB6_5 -; GCN-IR-NEXT: BB6_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB6_3 +; GCN-IR-NEXT: s_branch BB6_6 +; GCN-IR-NEXT: BB6_4: +; GCN-IR-NEXT: v_mov_b32_e32 v1, 0 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, 24, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB6_7 +; GCN-IR-NEXT: BB6_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB6_6: ; %Flow5 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -939,14 +938,13 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, s6, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v3, s7, v0 ; GCN-IR-NEXT: v_mul_lo_u32 v0, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v3 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 -; GCN-IR-NEXT: s_mov_b32 s0, s4 -; GCN-IR-NEXT: s_mov_b32 s1, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 24, %x store i64 %result, i64 addrspace(1)* %out @@ -1064,48 +1062,39 @@ ; GCN-IR: ; %bb.0: ; %_udiv-special-cases ; GCN-IR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_flbit_i32_b32 s0, s6 -; GCN-IR-NEXT: s_flbit_i32_b32 s1, s7 -; GCN-IR-NEXT: s_add_i32 s0, s0, 32 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s1 -; GCN-IR-NEXT: v_mov_b32_e32 v1, s0 +; GCN-IR-NEXT: s_flbit_i32_b32 s2, s6 +; GCN-IR-NEXT: s_flbit_i32_b32 s3, s7 +; GCN-IR-NEXT: s_add_i32 s2, s2, 32 +; GCN-IR-NEXT: v_mov_b32_e32 v0, s3 +; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 ; GCN-IR-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GCN-IR-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 59, v2 -; GCN-IR-NEXT: v_subb_u32_e64 v1, s[0:1], 0, 0, vcc -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[2:3], s[6:7], 0 -; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[0:1] -; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], 63, v[0:1] -; GCN-IR-NEXT: s_or_b64 s[2:3], s[2:3], vcc -; GCN-IR-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] -; GCN-IR-NEXT: s_cbranch_vccz BB7_2 -; GCN-IR-NEXT: ; %bb.1: -; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 -; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[2:3] -; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 -; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[2:3] -; GCN-IR-NEXT: s_branch BB7_7 -; GCN-IR-NEXT: BB7_2: ; %udiv-bb1 -; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1] -; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v0 -; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 -; GCN-IR-NEXT: s_and_b64 vcc, exec, s[0:1] +; GCN-IR-NEXT: v_sub_i32_e32 v3, vcc, 59, v2 +; GCN-IR-NEXT: v_subb_u32_e64 v4, s[2:3], 0, 0, vcc +; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], 0 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_or_b64 s[0:1], s[0:1], vcc +; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[3:4] +; GCN-IR-NEXT: s_xor_b64 s[2:3], s[0:1], -1 +; GCN-IR-NEXT: s_and_b64 s[2:3], s[2:3], vcc +; GCN-IR-NEXT: s_and_b64 vcc, exec, s[2:3] ; GCN-IR-NEXT: s_cbranch_vccz BB7_4 -; GCN-IR-NEXT: ; %bb.3: -; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 -; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: s_branch BB7_6 -; GCN-IR-NEXT: BB7_4: ; %udiv-preheader -; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v3 -; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffc4, v2 +; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1 +; GCN-IR-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; GCN-IR-NEXT: v_addc_u32_e32 v6, vcc, 0, v4, vcc +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, 63, v3 +; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[3:4] +; GCN-IR-NEXT: v_lshl_b64 v[0:1], s[6:7], v0 +; GCN-IR-NEXT: s_andn2_b64 vcc, exec, vcc +; GCN-IR-NEXT: s_cbranch_vccz BB7_5 +; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader ; GCN-IR-NEXT: v_mov_b32_e32 v8, 0 +; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[6:7], v5 +; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffc4, v2 ; GCN-IR-NEXT: v_addc_u32_e64 v5, s[0:1], 0, -1, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v9, 0 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 -; GCN-IR-NEXT: BB7_5: ; %udiv-do-while +; GCN-IR-NEXT: BB7_3: ; %udiv-do-while ; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1 ; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1 @@ -1128,8 +1117,18 @@ ; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1] ; GCN-IR-NEXT: s_and_b64 vcc, exec, vcc ; GCN-IR-NEXT: v_mov_b32_e32 v8, v2 -; GCN-IR-NEXT: s_cbranch_vccz BB7_5 -; GCN-IR-NEXT: BB7_6: ; %udiv-loop-exit +; GCN-IR-NEXT: s_cbranch_vccz BB7_3 +; GCN-IR-NEXT: s_branch BB7_6 +; GCN-IR-NEXT: BB7_4: +; GCN-IR-NEXT: v_mov_b32_e32 v0, s7 +; GCN-IR-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1] +; GCN-IR-NEXT: v_mov_b32_e32 v0, s6 +; GCN-IR-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1] +; GCN-IR-NEXT: s_branch BB7_7 +; GCN-IR-NEXT: BB7_5: +; GCN-IR-NEXT: v_mov_b32_e32 v2, 0 +; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 +; GCN-IR-NEXT: BB7_6: ; %Flow5 ; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; GCN-IR-NEXT: v_or_b32_e32 v0, v2, v0 ; GCN-IR-NEXT: v_or_b32_e32 v1, v3, v1 @@ -1137,15 +1136,13 @@ ; GCN-IR-NEXT: v_mul_hi_u32 v2, v0, 24 ; GCN-IR-NEXT: v_mul_lo_u32 v1, v1, 24 ; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, 24 -; GCN-IR-NEXT: s_mov_b32 s3, 0xf000 -; GCN-IR-NEXT: s_mov_b32 s2, -1 ; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-IR-NEXT: v_mov_b32_e32 v2, s7 ; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s6, v0 -; GCN-IR-NEXT: s_mov_b32 s0, s4 -; GCN-IR-NEXT: s_mov_b32 s1, s5 ; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc -; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 +; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 +; GCN-IR-NEXT: s_mov_b32 s6, -1 +; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-IR-NEXT: s_endpgm %result = urem i64 %x, 24 store i64 %result, i64 addrspace(1)* %out diff --git a/llvm/test/CodeGen/AMDGPU/valu-i1.ll b/llvm/test/CodeGen/AMDGPU/valu-i1.ll --- a/llvm/test/CodeGen/AMDGPU/valu-i1.ll +++ b/llvm/test/CodeGen/AMDGPU/valu-i1.ll @@ -159,8 +159,8 @@ ; SI: [[LABEL_LOOP:BB[0-9]+_[0-9]+]]: ; SI: buffer_load_dword ; SI-DAG: buffer_store_dword -; SI-DAG: s_cmpk_eq_i32 s{{[0-9+]}}, 0x100 -; SI: s_cbranch_scc0 [[LABEL_LOOP]] +; SI-DAG: s_cmpk_lg_i32 s{{[0-9+]}}, 0x100 +; SI: s_cbranch_scc1 [[LABEL_LOOP]] ; SI: [[LABEL_EXIT]]: ; SI: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll --- a/llvm/test/CodeGen/AMDGPU/wqm.ll +++ b/llvm/test/CodeGen/AMDGPU/wqm.ll @@ -652,13 +652,13 @@ ; CHECK-DAG: v_mov_b32_e32 [[CTR:v[0-9]+]], 0 ; CHECK-DAG: s_mov_b32 [[SEVEN:s[0-9]+]], 0x40e00000 +; CHECK: ; %body +; CHECK: v_add_f32_e32 [[CTR]], 2.0, [[CTR]] ; CHECK: [[LOOPHDR:BB[0-9]+_[0-9]+]]: ; %loop ; CHECK: v_cmp_lt_f32_e32 vcc, [[SEVEN]], [[CTR]] -; CHECK: s_cbranch_vccnz +; CHECK: s_cbranch_vccz -; CHECK: ; %body -; CHECK: v_add_f32_e32 [[CTR]], 2.0, [[CTR]] -; CHECK: s_branch [[LOOPHDR]] +; CHECK: s_cbranch_vccnz [[LOOPHDR]] ; CHECK: ; %break ; CHECK: ; return @@ -769,13 +769,12 @@ ; CHECK: s_wqm_b64 exec, exec ; CHECK: s_cmp_ ; CHECK-NEXT: s_cbranch_scc -; CHECK: ; %if -; CHECK: s_and_b64 exec, exec, [[ORIG]] -; CHECK: image_sample ; CHECK: ; %else -; CHECK: s_and_b64 exec, exec, [[ORIG]] +; CHECK: image_sample +; CHECK: ; %if ; CHECK: image_sample ; CHECK: ; %end +; CHECK: s_and_b64 exec, exec, [[ORIG]] define amdgpu_ps <4 x float> @test_scc(i32 inreg %sel, i32 %idx) #1 { main_body: %cc = icmp sgt i32 %sel, 0