Index: llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td +++ llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td @@ -238,7 +238,7 @@ // Special case divide FMA with scale and flags (src0 = Quotient, // src1 = Denominator, src2 = Numerator). -def AMDGPUdiv_fmas : SDNode<"AMDGPUISD::DIV_FMAS", AMDGPUFmasOp, +def AMDGPUdiv_fmas_impl : SDNode<"AMDGPUISD::DIV_FMAS", AMDGPUFmasOp, [SDNPOptInGlue]>; // Single or double precision division fixup. @@ -476,3 +476,7 @@ def AMDGPUfdot2 : PatFrags<(ops node:$src0, node:$src1, node:$src2, node:$clamp), [(int_amdgcn_fdot2 node:$src0, node:$src1, node:$src2, node:$clamp), (AMDGPUfdot2_impl node:$src0, node:$src1, node:$src2, node:$clamp)]>; + +def AMDGPUdiv_fmas : PatFrags<(ops node:$src0, node:$src1, node:$src2, node:$vcc), + [(int_amdgcn_div_fmas node:$src0, node:$src1, node:$src2, node:$vcc), + (AMDGPUdiv_fmas_impl node:$src0, node:$src1, node:$src2, node:$vcc)]>; Index: llvm/lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -718,9 +718,9 @@ class DivFmasPat : GCNPat< (AMDGPUdiv_fmas (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)), - (VOP3Mods vt:$src1, i32:$src1_modifiers), - (VOP3Mods vt:$src2, i32:$src2_modifiers), - (i1 CondReg)), + (vt (VOP3Mods vt:$src1, i32:$src1_modifiers)), + (vt (VOP3Mods vt:$src2, i32:$src2_modifiers)), + (i1 CondReg)), (inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2) >; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.div.fmas.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.div.fmas.ll @@ -0,0 +1,1141 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefixes=GCN,GFX7 %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX10_W32 %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GCN,GFX10_W64 %s + +define float @v_div_fmas_f32(float %a, float %b, float %c, i1 %d) { +; GFX7-LABEL: v_div_fmas_f32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_div_fmas_f32: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10_W32-LABEL: v_div_fmas_f32: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10_W32-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10_W32-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 +; GFX10_W32-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX10_W32-NEXT: s_setpc_b64 s[30:31] +; +; GFX10_W64-LABEL: v_div_fmas_f32: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10_W64-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10_W64-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX10_W64-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; GFX10_W64-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX10_W64-NEXT: s_setpc_b64 s[30:31] + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) + ret float %result +} + +define double @v_div_fmas_f64(double %a, double %b, double %c, i1 %d) { +; GFX7-LABEL: v_div_fmas_f64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v6, 1, v6 +; GFX7-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_div_fmas_f64: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_and_b32_e32 v6, 1, v6 +; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX10_W32-LABEL: v_div_fmas_f64: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10_W32-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10_W32-NEXT: v_and_b32_e32 v6, 1, v6 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6 +; GFX10_W32-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX10_W32-NEXT: s_setpc_b64 s[30:31] +; +; GFX10_W64-LABEL: v_div_fmas_f64: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10_W64-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10_W64-NEXT: v_and_b32_e32 v6, 1, v6 +; GFX10_W64-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 +; GFX10_W64-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX10_W64-NEXT: s_setpc_b64 s[30:31] + %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) + ret double %result +} + +define amdgpu_ps float @s_div_fmas_f32(float inreg %a, float inreg %b, float inreg %c, i32 inreg %d) { +; GFX7-LABEL: s_div_fmas_f32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_cmp_eq_u32 s3, 0 +; GFX7-NEXT: s_cselect_b32 s3, 1, 0 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_and_b32 s0, 1, s3 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX7-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: s_div_fmas_f32: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_cmp_eq_u32 s3, 0 +; GFX8-NEXT: s_cselect_b32 s3, 1, 0 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: s_and_b32 s0, 1, s3 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: v_mov_b32_e32 v2, s2 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX8-NEXT: ; return to shader part epilog +; +; GFX10_W32-LABEL: s_div_fmas_f32: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_cmp_eq_u32 s3, 0 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s1 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s2 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_cselect_b32 s3, 1, 0 +; GFX10_W32-NEXT: s_and_b32 s3, 1, s3 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s3 +; GFX10_W32-NEXT: v_div_fmas_f32 v0, s0, v0, v1 +; GFX10_W32-NEXT: ; return to shader part epilog +; +; GFX10_W64-LABEL: s_div_fmas_f32: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_cmp_eq_u32 s3, 0 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s1 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s2 +; GFX10_W64-NEXT: s_cselect_b32 s3, 1, 0 +; GFX10_W64-NEXT: s_and_b32 s3, 1, s3 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s3 +; GFX10_W64-NEXT: v_div_fmas_f32 v0, s0, v0, v1 +; GFX10_W64-NEXT: ; return to shader part epilog + %vcc = icmp eq i32 %d, 0 + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %vcc) + ret float %result +} + +define amdgpu_ps double @s_div_fmas_f64(double inreg %a, double inreg %b, double inreg %c, i32 inreg %d) { +; GFX7-LABEL: s_div_fmas_f64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_cmp_eq_u32 s6, 0 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_cselect_b32 s6, 1, 0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_mov_b32_e32 v4, s4 +; GFX7-NEXT: s_and_b32 s0, 1, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s3 +; GFX7-NEXT: v_mov_b32_e32 v5, s5 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX7-NEXT: v_readfirstlane_b32 s0, v0 +; GFX7-NEXT: v_readfirstlane_b32 s1, v1 +; GFX7-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: s_div_fmas_f64: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_cmp_eq_u32 s6, 0 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: s_cselect_b32 s6, 1, 0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: v_mov_b32_e32 v2, s2 +; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: s_and_b32 s0, 1, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s3 +; GFX8-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX8-NEXT: v_readfirstlane_b32 s0, v0 +; GFX8-NEXT: v_readfirstlane_b32 s1, v1 +; GFX8-NEXT: ; return to shader part epilog +; +; GFX10_W32-LABEL: s_div_fmas_f64: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_cmp_eq_u32 s6, 0 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s2 +; GFX10_W32-NEXT: v_mov_b32_e32 v2, s4 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s3 +; GFX10_W32-NEXT: v_mov_b32_e32 v3, s5 +; GFX10_W32-NEXT: s_cselect_b32 s6, 1, 0 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_and_b32 s6, 1, s6 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s6 +; GFX10_W32-NEXT: v_div_fmas_f64 v[0:1], s[0:1], v[0:1], v[2:3] +; GFX10_W32-NEXT: v_readfirstlane_b32 s0, v0 +; GFX10_W32-NEXT: v_readfirstlane_b32 s1, v1 +; GFX10_W32-NEXT: ; return to shader part epilog +; +; GFX10_W64-LABEL: s_div_fmas_f64: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_cmp_eq_u32 s6, 0 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s2 +; GFX10_W64-NEXT: v_mov_b32_e32 v2, s4 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s3 +; GFX10_W64-NEXT: v_mov_b32_e32 v3, s5 +; GFX10_W64-NEXT: s_cselect_b32 s6, 1, 0 +; GFX10_W64-NEXT: s_and_b32 s6, 1, s6 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6 +; GFX10_W64-NEXT: v_div_fmas_f64 v[0:1], s[0:1], v[0:1], v[2:3] +; GFX10_W64-NEXT: v_readfirstlane_b32 s0, v0 +; GFX10_W64-NEXT: v_readfirstlane_b32 s1, v1 +; GFX10_W64-NEXT: ; return to shader part epilog + %vcc = icmp eq i32 %d, 0 + %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %vcc) + ret double %result +} + +define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, [8 x i32], float %a, [8 x i32], float %b, [8 x i32], float %c, [8 x i32], i1 %d) { +; GFX7-LABEL: test_div_fmas_f32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dword s2, s[0:1], 0x13 +; GFX7-NEXT: s_load_dword s3, s[0:1], 0x1c +; GFX7-NEXT: s_load_dword s6, s[0:1], 0x25 +; GFX7-NEXT: s_load_dword s0, s[0:1], 0x2e +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: s_and_b32 s0, 1, s0 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_mov_b32 s6, -1 +; GFX7-NEXT: s_nop 2 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dword s2, s[0:1], 0x4c +; GFX8-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX8-NEXT: s_load_dword s4, s[0:1], 0x94 +; GFX8-NEXT: s_load_dword s5, s[0:1], 0xb8 +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: s_and_b32 s2, 1, s5 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f32 v2, v0, v1, v2 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dword s2, s[0:1], 0xb8 +; GFX10_W32-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX10_W32-NEXT: s_load_dword s4, s[0:1], 0x94 +; GFX10_W32-NEXT: s_load_dword s5, s[0:1], 0x4c +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s4 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; GFX10_W32-NEXT: v_div_fmas_f32 v2, s5, v0, v1 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dword s2, s[0:1], 0xb8 +; GFX10_W64-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX10_W64-NEXT: s_load_dword s4, s[0:1], 0x94 +; GFX10_W64-NEXT: s_load_dword s5, s[0:1], 0x4c +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s4 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX10_W64-NEXT: v_div_fmas_f32 v2, s5, v0, v1 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_inline_imm_0(float addrspace(1)* %out, [8 x i32], float %a, [8 x i32], float %b, [8 x i32], float %c, [8 x i32], i1 %d) { +; GFX7-LABEL: test_div_fmas_f32_inline_imm_0: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dword s2, s[0:1], 0x1c +; GFX7-NEXT: s_load_dword s3, s[0:1], 0x25 +; GFX7-NEXT: s_load_dword s0, s[0:1], 0x2e +; GFX7-NEXT: s_mov_b32 s6, -1 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: s_and_b32 s0, 1, s0 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f32 v0, 1.0, v0, v1 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_inline_imm_0: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dword s2, s[0:1], 0x70 +; GFX8-NEXT: s_load_dword s3, s[0:1], 0x94 +; GFX8-NEXT: s_load_dword s4, s[0:1], 0xb8 +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: s_and_b32 s2, 1, s4 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f32 v2, 1.0, v0, v1 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_inline_imm_0: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dword s2, s[0:1], 0xb8 +; GFX10_W32-NEXT: s_load_dword s3, s[0:1], 0x94 +; GFX10_W32-NEXT: s_load_dword s4, s[0:1], 0x70 +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; GFX10_W32-NEXT: v_div_fmas_f32 v2, 1.0, s4, v0 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_inline_imm_0: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dword s2, s[0:1], 0xb8 +; GFX10_W64-NEXT: s_load_dword s3, s[0:1], 0x94 +; GFX10_W64-NEXT: s_load_dword s4, s[0:1], 0x70 +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX10_W64-NEXT: v_div_fmas_f32 v2, 1.0, s4, v0 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %result = call float @llvm.amdgcn.div.fmas.f32(float 1.0, float %b, float %c, i1 %d) + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_inline_imm_1(float addrspace(1)* %out, float %a, float %b, float %c, [8 x i32], i1 %d) { +; GFX7-LABEL: test_div_fmas_f32_inline_imm_1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dword s2, s[0:1], 0xb +; GFX7-NEXT: s_load_dword s3, s[0:1], 0xd +; GFX7-NEXT: s_load_dword s0, s[0:1], 0x16 +; GFX7-NEXT: s_mov_b32 s6, -1 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: s_and_b32 s0, 1, s0 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, 1.0, v1 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_inline_imm_1: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dword s2, s[0:1], 0x2c +; GFX8-NEXT: s_load_dword s3, s[0:1], 0x34 +; GFX8-NEXT: s_load_dword s4, s[0:1], 0x58 +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: s_and_b32 s2, 1, s4 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f32 v2, v0, 1.0, v1 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_inline_imm_1: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dword s2, s[0:1], 0x58 +; GFX10_W32-NEXT: s_load_dword s3, s[0:1], 0x34 +; GFX10_W32-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; GFX10_W32-NEXT: v_div_fmas_f32 v2, s4, 1.0, v0 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_inline_imm_1: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dword s2, s[0:1], 0x58 +; GFX10_W64-NEXT: s_load_dword s3, s[0:1], 0x34 +; GFX10_W64-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX10_W64-NEXT: v_div_fmas_f32 v2, s4, 1.0, v0 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float 1.0, float %c, i1 %d) + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_inline_imm_2(float addrspace(1)* %out, [8 x i32], float %a, [8 x i32], float %b, [8 x i32], float %c, [8 x i32], i1 %d) { +; GFX7-LABEL: test_div_fmas_f32_inline_imm_2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dword s2, s[0:1], 0x13 +; GFX7-NEXT: s_load_dword s3, s[0:1], 0x1c +; GFX7-NEXT: s_load_dword s0, s[0:1], 0x2e +; GFX7-NEXT: s_mov_b32 s6, -1 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: s_and_b32 s0, 1, s0 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, v1, 1.0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_inline_imm_2: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dword s2, s[0:1], 0x4c +; GFX8-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX8-NEXT: s_load_dword s4, s[0:1], 0xb8 +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: s_and_b32 s2, 1, s4 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f32 v2, v0, v1, 1.0 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_inline_imm_2: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dword s2, s[0:1], 0xb8 +; GFX10_W32-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX10_W32-NEXT: s_load_dword s4, s[0:1], 0x4c +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; GFX10_W32-NEXT: v_div_fmas_f32 v2, s4, v0, 1.0 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_inline_imm_2: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dword s2, s[0:1], 0xb8 +; GFX10_W64-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX10_W64-NEXT: s_load_dword s4, s[0:1], 0x4c +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s3 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX10_W64-NEXT: v_div_fmas_f32 v2, s4, v0, 1.0 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float 1.0, i1 %d) + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) { +; GFX7-LABEL: test_div_fmas_f64: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s8, s[0:1], 0x11 +; GFX7-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: v_mov_b32_e32 v2, s4 +; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: s_and_b32 s2, 1, s8 +; GFX7-NEXT: v_mov_b32_e32 v3, s5 +; GFX7-NEXT: v_mov_b32_e32 v5, s7 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v3, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s0 +; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f64: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dword s8, s[0:1], 0x44 +; GFX8-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: s_and_b32 s2, 1, s8 +; GFX8-NEXT: v_mov_b32_e32 v3, s5 +; GFX8-NEXT: v_mov_b32_e32 v5, s7 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f64 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_mov_b32_e32 v2, s0 +; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f64: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dword s8, s[0:1], 0x44 +; GFX10_W32-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_and_b32 s8, 1, s8 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s4 +; GFX10_W32-NEXT: v_mov_b32_e32 v2, s6 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s5 +; GFX10_W32-NEXT: v_mov_b32_e32 v3, s7 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s8 +; GFX10_W32-NEXT: v_div_fmas_f64 v[0:1], s[2:3], v[0:1], v[2:3] +; GFX10_W32-NEXT: v_mov_b32_e32 v3, s1 +; GFX10_W32-NEXT: v_mov_b32_e32 v2, s0 +; GFX10_W32-NEXT: global_store_dwordx2 v[2:3], v[0:1], off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f64: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dword s8, s[0:1], 0x44 +; GFX10_W64-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_and_b32 s8, 1, s8 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s4 +; GFX10_W64-NEXT: v_mov_b32_e32 v2, s6 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s5 +; GFX10_W64-NEXT: v_mov_b32_e32 v3, s7 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s8 +; GFX10_W64-NEXT: v_div_fmas_f64 v[0:1], s[2:3], v[0:1], v[2:3] +; GFX10_W64-NEXT: v_mov_b32_e32 v3, s1 +; GFX10_W64-NEXT: v_mov_b32_e32 v2, s0 +; GFX10_W64-NEXT: global_store_dwordx2 v[2:3], v[0:1], off +; GFX10_W64-NEXT: s_endpgm + %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) + store double %result, double addrspace(1)* %out, align 8 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) { +; GFX7-LABEL: test_div_fmas_f32_cond_to_vcc: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xb +; GFX7-NEXT: s_mov_b32 s6, -1 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_cmp_eq_u32 s3, 0 +; GFX7-NEXT: s_cselect_b32 s3, 1, 0 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_and_b32 s0, 1, s3 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_nop 3 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_cond_to_vcc: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: s_cmp_eq_u32 s7, 0 +; GFX8-NEXT: s_cselect_b32 s2, 1, 0 +; GFX8-NEXT: s_and_b32 s2, 1, s2 +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: v_mov_b32_e32 v2, s6 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: v_div_fmas_f32 v2, v0, v1, v2 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_cond_to_vcc: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_cmp_eq_u32 s7, 0 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s5 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s6 +; GFX10_W32-NEXT: s_cselect_b32 s2, 1, 0 +; GFX10_W32-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2 +; GFX10_W32-NEXT: v_div_fmas_f32 v2, s4, v0, v1 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_cond_to_vcc: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_cmp_eq_u32 s7, 0 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s5 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s6 +; GFX10_W64-NEXT: s_cselect_b32 s2, 1, 0 +; GFX10_W64-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX10_W64-NEXT: v_div_fmas_f32 v2, s4, v0, v1 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %cmp = icmp eq i32 %i, 0 + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cmp) + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_imm_false_cond_to_vcc(float addrspace(1)* %out, [8 x i32], float %a, [8 x i32], float %b, [8 x i32], float %c) { +; GFX7-LABEL: test_div_fmas_f32_imm_false_cond_to_vcc: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dword s2, s[0:1], 0x13 +; GFX7-NEXT: s_load_dword s3, s[0:1], 0x1c +; GFX7-NEXT: s_load_dword s0, s[0:1], 0x25 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, 0 +; GFX7-NEXT: s_mov_b32 s6, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: v_mov_b32_e32 v2, s0 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_imm_false_cond_to_vcc: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dword s2, s[0:1], 0x4c +; GFX8-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX8-NEXT: s_load_dword s4, s[0:1], 0x94 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, 0 +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_div_fmas_f32 v2, v0, v1, v2 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_imm_false_cond_to_vcc: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dword s2, s[0:1], 0x70 +; GFX10_W32-NEXT: s_load_dword s3, s[0:1], 0x94 +; GFX10_W32-NEXT: s_load_dword s4, s[0:1], 0x4c +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, 0 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s2 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s3 +; GFX10_W32-NEXT: v_div_fmas_f32 v2, s4, v0, v1 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_imm_false_cond_to_vcc: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dword s2, s[0:1], 0x70 +; GFX10_W64-NEXT: s_load_dword s3, s[0:1], 0x94 +; GFX10_W64-NEXT: s_load_dword s4, s[0:1], 0x4c +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, 0 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s2 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s3 +; GFX10_W64-NEXT: v_div_fmas_f32 v2, s4, v0, v1 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 false) + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspace(1)* %out, [8 x i32], float %a, [8 x i32], float %b, [8 x i32], float %c) { +; GFX7-LABEL: test_div_fmas_f32_imm_true_cond_to_vcc: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dword s2, s[0:1], 0x13 +; GFX7-NEXT: s_load_dword s3, s[0:1], 0x1c +; GFX7-NEXT: s_load_dword s0, s[0:1], 0x25 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, 1 +; GFX7-NEXT: s_mov_b32 s6, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: v_mov_b32_e32 v2, s0 +; GFX7-NEXT: v_div_fmas_f32 v0, v0, v1, v2 +; GFX7-NEXT: s_mov_b32 s7, 0xf000 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_imm_true_cond_to_vcc: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dword s2, s[0:1], 0x4c +; GFX8-NEXT: s_load_dword s3, s[0:1], 0x70 +; GFX8-NEXT: s_load_dword s4, s[0:1], 0x94 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, 1 +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_div_fmas_f32 v2, v0, v1, v2 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_imm_true_cond_to_vcc: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dword s2, s[0:1], 0x70 +; GFX10_W32-NEXT: s_load_dword s3, s[0:1], 0x94 +; GFX10_W32-NEXT: s_load_dword s4, s[0:1], 0x4c +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, 1 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s2 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s3 +; GFX10_W32-NEXT: v_div_fmas_f32 v2, s4, v0, v1 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_imm_true_cond_to_vcc: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dword s2, s[0:1], 0x70 +; GFX10_W64-NEXT: s_load_dword s3, s[0:1], 0x94 +; GFX10_W64-NEXT: s_load_dword s4, s[0:1], 0x4c +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, 1 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s2 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s3 +; GFX10_W64-NEXT: v_div_fmas_f32 v2, s4, v0, v1 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 true) + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, float addrspace(1)* %in, [8 x i32], i32 %d) { +; GFX7-LABEL: test_div_fmas_f32_logical_cond_to_vcc: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GFX7-NEXT: s_load_dword s8, s[0:1], 0x15 +; GFX7-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX7-NEXT: v_lshl_b64 v[1:2], v[0:1], 2 +; GFX7-NEXT: s_mov_b32 s2, 0 +; GFX7-NEXT: s_mov_b32 s3, 0xf000 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_mov_b64 s[0:1], s[6:7] +; GFX7-NEXT: buffer_load_dword v3, v[1:2], s[0:3], 0 addr64 +; GFX7-NEXT: buffer_load_dword v4, v[1:2], s[0:3], 0 addr64 offset:4 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: buffer_load_dword v0, v[1:2], s[0:3], 0 addr64 offset:8 +; GFX7-NEXT: s_cmp_lg_u32 s8, 0 +; GFX7-NEXT: s_cselect_b32 s6, 1, 0 +; GFX7-NEXT: s_and_b32 s0, 1, s6 +; GFX7-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0 +; GFX7-NEXT: s_mov_b32 s2, -1 +; GFX7-NEXT: s_and_b64 vcc, vcc, s[0:1] +; GFX7-NEXT: s_mov_b64 s[6:7], s[2:3] +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_div_fmas_f32 v0, v3, v4, v0 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 offset:8 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_logical_cond_to_vcc: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX8-NEXT: s_load_dword s2, s[0:1], 0x54 +; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX8-NEXT: v_lshlrev_b64 v[1:2], 2, v[0:1] +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s7 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v3, v1 +; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v4, v2, vcc +; GFX8-NEXT: v_add_u32_e32 v3, vcc, 4, v1 +; GFX8-NEXT: v_addc_u32_e32 v4, vcc, 0, v2, vcc +; GFX8-NEXT: v_add_u32_e32 v5, vcc, 8, v1 +; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v2, vcc +; GFX8-NEXT: flat_load_dword v1, v[1:2] +; GFX8-NEXT: flat_load_dword v2, v[3:4] +; GFX8-NEXT: flat_load_dword v3, v[5:6] +; GFX8-NEXT: s_add_u32 s0, s4, 8 +; GFX8-NEXT: s_addc_u32 s1, s5, 0 +; GFX8-NEXT: s_cmp_lg_u32 s2, 0 +; GFX8-NEXT: s_cselect_b32 s2, 1, 0 +; GFX8-NEXT: s_and_b32 s2, 1, s2 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX8-NEXT: v_cmp_ne_u32_e64 s[2:3], 0, s2 +; GFX8-NEXT: s_and_b64 vcc, vcc, s[2:3] +; GFX8-NEXT: s_nop 1 +; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_div_fmas_f32 v2, v1, v2, v3 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_logical_cond_to_vcc: +; GFX10_W32: ; %bb.0: +; GFX10_W32-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX10_W32-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX10_W32-NEXT: s_load_dword s2, s[0:1], 0x54 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: v_lshlrev_b64 v[1:2], 2, v[0:1] +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: v_mov_b32_e32 v3, s6 +; GFX10_W32-NEXT: v_mov_b32_e32 v4, s7 +; GFX10_W32-NEXT: s_add_u32 s0, s4, 8 +; GFX10_W32-NEXT: s_addc_u32 s1, s5, 0 +; GFX10_W32-NEXT: s_cmp_lg_u32 s2, 0 +; GFX10_W32-NEXT: v_add_co_u32_e64 v1, vcc_lo, v3, v1 +; GFX10_W32-NEXT: s_cselect_b32 s2, 1, 0 +; GFX10_W32-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo +; GFX10_W32-NEXT: v_add_co_u32_e64 v3, vcc_lo, v1, 8 +; GFX10_W32-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W32-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v2, vcc_lo +; GFX10_W32-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 s2, 0, s2 +; GFX10_W32-NEXT: global_load_dword v1, v[1:2], off +; GFX10_W32-NEXT: global_load_dword v2, v[3:4], off offset:-4 +; GFX10_W32-NEXT: global_load_dword v3, v[3:4], off +; GFX10_W32-NEXT: s_and_b32 vcc_lo, vcc_lo, s2 +; GFX10_W32-NEXT: s_waitcnt vmcnt(0) +; GFX10_W32-NEXT: v_div_fmas_f32 v2, v1, v2, v3 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_logical_cond_to_vcc: +; GFX10_W64: ; %bb.0: +; GFX10_W64-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX10_W64-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX10_W64-NEXT: s_load_dword s2, s[0:1], 0x54 +; GFX10_W64-NEXT: v_lshlrev_b64 v[1:2], 2, v[0:1] +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: v_mov_b32_e32 v3, s6 +; GFX10_W64-NEXT: v_mov_b32_e32 v4, s7 +; GFX10_W64-NEXT: s_add_u32 s0, s4, 8 +; GFX10_W64-NEXT: s_addc_u32 s1, s5, 0 +; GFX10_W64-NEXT: s_cmp_lg_u32 s2, 0 +; GFX10_W64-NEXT: v_add_co_u32_e64 v1, vcc, v3, v1 +; GFX10_W64-NEXT: s_cselect_b32 s2, 1, 0 +; GFX10_W64-NEXT: v_add_co_ci_u32_e32 v2, vcc, v4, v2, vcc +; GFX10_W64-NEXT: v_add_co_u32_e64 v3, vcc, v1, 8 +; GFX10_W64-NEXT: s_and_b32 s2, 1, s2 +; GFX10_W64-NEXT: v_add_co_ci_u32_e32 v4, vcc, 0, v2, vcc +; GFX10_W64-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 s[2:3], 0, s2 +; GFX10_W64-NEXT: global_load_dword v1, v[1:2], off +; GFX10_W64-NEXT: global_load_dword v2, v[3:4], off offset:-4 +; GFX10_W64-NEXT: global_load_dword v3, v[3:4], off +; GFX10_W64-NEXT: s_and_b64 vcc, vcc, s[2:3] +; GFX10_W64-NEXT: s_waitcnt vmcnt(0) +; GFX10_W64-NEXT: v_div_fmas_f32 v2, v1, v2, v3 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid + %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1 + %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2 + %gep.out = getelementptr float, float addrspace(1)* %out, i32 2 + + %a = load volatile float, float addrspace(1)* %gep.a + %b = load volatile float, float addrspace(1)* %gep.b + %c = load volatile float, float addrspace(1)* %gep.c + + %cmp0 = icmp eq i32 %tid, 0 + %cmp1 = icmp ne i32 %d, 0 + %and = and i1 %cmp0, %cmp1 + + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %and) + store float %result, float addrspace(1)* %gep.out, align 4 + ret void +} + +define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, [8 x i32], float addrspace(1)* %in, [8 x i32], i32 addrspace(1)* %dummy) { +; GFX7-LABEL: test_div_fmas_f32_i1_phi_vcc: +; GFX7: ; %bb.0: ; %entry +; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x13 +; GFX7-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX7-NEXT: v_lshl_b64 v[1:2], v[0:1], 2 +; GFX7-NEXT: s_mov_b32 s10, 0 +; GFX7-NEXT: s_mov_b32 s11, 0xf000 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: buffer_load_dwordx3 v[1:3], v[1:2], s[8:11], 0 addr64 +; GFX7-NEXT: s_mov_b32 s2, 0 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX7-NEXT: s_and_saveexec_b64 s[6:7], vcc +; GFX7-NEXT: s_cbranch_execz BB13_2 +; GFX7-NEXT: ; %bb.1: ; %bb +; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x1d +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_load_dword s0, s[0:1], 0x0 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_cmp_lg_u32 s0, 0 +; GFX7-NEXT: s_cselect_b32 s2, 1, 0 +; GFX7-NEXT: BB13_2: ; %exit +; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX7-NEXT: s_and_b32 s0, 1, s2 +; GFX7-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX7-NEXT: s_mov_b32 s10, -1 +; GFX7-NEXT: s_mov_b64 s[6:7], s[10:11] +; GFX7-NEXT: s_nop 1 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_div_fmas_f32 v0, v1, v2, v3 +; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 offset:8 +; GFX7-NEXT: s_endpgm +; +; GFX8-LABEL: test_div_fmas_f32_i1_phi_vcc: +; GFX8: ; %bb.0: ; %entry +; GFX8-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GFX8-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x4c +; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX8-NEXT: v_lshlrev_b64 v[1:2], 2, v[0:1] +; GFX8-NEXT: s_mov_b32 s2, 0 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s7 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v3, v1 +; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v4, v2, vcc +; GFX8-NEXT: flat_load_dwordx3 v[1:3], v[1:2] +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX8-NEXT: s_and_saveexec_b64 s[6:7], vcc +; GFX8-NEXT: s_cbranch_execz BB13_2 +; GFX8-NEXT: ; %bb.1: ; %bb +; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x74 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: s_load_dword s0, s[0:1], 0x0 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: s_cmp_lg_u32 s0, 0 +; GFX8-NEXT: s_cselect_b32 s2, 1, 0 +; GFX8-NEXT: BB13_2: ; %exit +; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX8-NEXT: s_add_u32 s0, s4, 8 +; GFX8-NEXT: s_addc_u32 s1, s5, 0 +; GFX8-NEXT: s_and_b32 s2, 1, s2 +; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s2 +; GFX8-NEXT: s_nop 3 +; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_div_fmas_f32 v2, v1, v2, v3 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: flat_store_dword v[0:1], v2 +; GFX8-NEXT: s_endpgm +; +; GFX10_W32-LABEL: test_div_fmas_f32_i1_phi_vcc: +; GFX10_W32: ; %bb.0: ; %entry +; GFX10_W32-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c +; GFX10_W32-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX10_W32-NEXT: s_mov_b32 s4, 0 +; GFX10_W32-NEXT: ; implicit-def: $vcc_hi +; GFX10_W32-NEXT: v_lshlrev_b64 v[1:2], 2, v[0:1] +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: v_mov_b32_e32 v4, s3 +; GFX10_W32-NEXT: v_mov_b32_e32 v3, s2 +; GFX10_W32-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX10_W32-NEXT: v_add_co_u32_e64 v1, vcc_lo, v3, v1 +; GFX10_W32-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo +; GFX10_W32-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX10_W32-NEXT: global_load_dwordx3 v[1:3], v[1:2], off +; GFX10_W32-NEXT: s_and_saveexec_b32 s5, vcc_lo +; GFX10_W32-NEXT: s_cbranch_execz BB13_2 +; GFX10_W32-NEXT: ; %bb.1: ; %bb +; GFX10_W32-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x74 +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_load_dword s0, s[0:1], 0x0 +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_cmp_lg_u32 s0, 0 +; GFX10_W32-NEXT: s_cselect_b32 s4, 1, 0 +; GFX10_W32-NEXT: BB13_2: ; %exit +; GFX10_W32-NEXT: v_nop +; GFX10_W32-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10_W32-NEXT: s_and_b32 s0, 1, s4 +; GFX10_W32-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0 +; GFX10_W32-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W32-NEXT: s_add_u32 s0, s2, 8 +; GFX10_W32-NEXT: s_addc_u32 s1, s3, 0 +; GFX10_W32-NEXT: s_waitcnt vmcnt(0) +; GFX10_W32-NEXT: v_div_fmas_f32 v2, v1, v2, v3 +; GFX10_W32-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W32-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W32-NEXT: s_endpgm +; +; GFX10_W64-LABEL: test_div_fmas_f32_i1_phi_vcc: +; GFX10_W64: ; %bb.0: ; %entry +; GFX10_W64-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c +; GFX10_W64-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX10_W64-NEXT: s_mov_b32 s4, 0 +; GFX10_W64-NEXT: v_lshlrev_b64 v[1:2], 2, v[0:1] +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: v_mov_b32_e32 v4, s3 +; GFX10_W64-NEXT: v_mov_b32_e32 v3, s2 +; GFX10_W64-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX10_W64-NEXT: v_add_co_u32_e64 v1, vcc, v3, v1 +; GFX10_W64-NEXT: v_add_co_ci_u32_e32 v2, vcc, v4, v2, vcc +; GFX10_W64-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GFX10_W64-NEXT: global_load_dwordx3 v[1:3], v[1:2], off +; GFX10_W64-NEXT: s_and_saveexec_b64 s[6:7], vcc +; GFX10_W64-NEXT: s_cbranch_execz BB13_2 +; GFX10_W64-NEXT: ; %bb.1: ; %bb +; GFX10_W64-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x74 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_load_dword s0, s[0:1], 0x0 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_cmp_lg_u32 s0, 0 +; GFX10_W64-NEXT: s_cselect_b32 s4, 1, 0 +; GFX10_W64-NEXT: BB13_2: ; %exit +; GFX10_W64-NEXT: v_nop +; GFX10_W64-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX10_W64-NEXT: s_and_b32 s0, 1, s4 +; GFX10_W64-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0 +; GFX10_W64-NEXT: s_waitcnt lgkmcnt(0) +; GFX10_W64-NEXT: s_add_u32 s0, s2, 8 +; GFX10_W64-NEXT: s_addc_u32 s1, s3, 0 +; GFX10_W64-NEXT: s_waitcnt vmcnt(0) +; GFX10_W64-NEXT: v_div_fmas_f32 v2, v1, v2, v3 +; GFX10_W64-NEXT: v_mov_b32_e32 v0, s0 +; GFX10_W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX10_W64-NEXT: global_store_dword v[0:1], v2, off +; GFX10_W64-NEXT: s_endpgm +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep.out = getelementptr float, float addrspace(1)* %out, i32 2 + %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid + %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1 + %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2 + + %a = load float, float addrspace(1)* %gep.a + %b = load float, float addrspace(1)* %gep.b + %c = load float, float addrspace(1)* %gep.c + + %cmp0 = icmp eq i32 %tid, 0 + br i1 %cmp0, label %bb, label %exit + +bb: + %val = load i32, i32 addrspace(1)* %dummy + %cmp1 = icmp ne i32 %val, 0 + br label %exit + +exit: + %cond = phi i1 [false, %entry], [%cmp1, %bb] + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) + store float %result, float addrspace(1)* %gep.out, align 4 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #0 +declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1) #0 +declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1) #0 + +attributes #0 = { nounwind readnone speculatable }