diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll @@ -1,5 +1,6 @@ -; RUN: llc -march=amdgcn -mcpu=gfx1030 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mcpu=gfx1013 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=gfx1013 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1013 %s +; RUN: llc -march=amdgcn -mcpu=gfx1030 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1030 %s ; RUN: not --crash llc -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=ERR %s ; uint4 llvm.amdgcn.image.bvh.intersect.ray.i32.v4f32(uint node_ptr, float ray_extent, float4 ray_origin, float4 ray_dir, float4 ray_inv_dir, uint4 texture_descr) @@ -12,12 +13,15 @@ declare <4 x i32> @llvm.amdgcn.image.bvh.intersect.ray.i64.v4f32(i64, float, <4 x float>, <4 x float>, <4 x float>, <4 x i32>) declare <4 x i32> @llvm.amdgcn.image.bvh.intersect.ray.i64.v4f16(i64, float, <4 x float>, <4 x half>, <4 x half>, <4 x i32>) -; GCN-LABEL: {{^}}image_bvh_intersect_ray: -; GCN: image_bvh_intersect_ray v[0:3], v[0:15], s[0:3]{{$}} ; ERR: in function image_bvh_intersect_ray{{.*}}intrinsic not supported on subtarget ; Arguments are flattened to represent the actual VGPR_A layout, so we have no ; extra moves in the generated kernel. define amdgpu_ps <4 x float> @image_bvh_intersect_ray(i32 %node_ptr, float %ray_extent, float %ray_origin_x, float %ray_origin_y, float %ray_origin_z, float %ray_dir_x, float %ray_dir_y, float %ray_dir_z, float %ray_inv_dir_x, float %ray_inv_dir_y, float %ray_inv_dir_z, <4 x i32> inreg %tdescr) { +; GCN-LABEL: image_bvh_intersect_ray: +; GCN: ; %bb.0: ; %main_body +; GCN-NEXT: image_bvh_intersect_ray v[0:3], v[0:15], s[0:3] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: ; return to shader part epilog main_body: %ray_origin0 = insertelement <4 x float> undef, float %ray_origin_x, i32 0 %ray_origin1 = insertelement <4 x float> %ray_origin0, float %ray_origin_y, i32 1 @@ -33,20 +37,41 @@ ret <4 x float> %r } -; GCN-LABEL: {{^}}image_bvh_intersect_ray_a16: -; GCN: image_bvh_intersect_ray v[0:3], v[{{[0-9:]+}}], s[{{[0-9:]+}}] a16{{$}} define amdgpu_ps <4 x float> @image_bvh_intersect_ray_a16(i32 inreg %node_ptr, float inreg %ray_extent, <4 x float> inreg %ray_origin, <4 x half> inreg %ray_dir, <4 x half> inreg %ray_inv_dir, <4 x i32> inreg %tdescr) { +; GCN-LABEL: image_bvh_intersect_ray_a16: +; GCN: ; %bb.0: ; %main_body +; GCN-NEXT: s_lshr_b32 s5, s8, 16 +; GCN-NEXT: s_pack_ll_b32_b16 s7, s7, s8 +; GCN-NEXT: s_pack_ll_b32_b16 s5, s5, s9 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s3 +; GCN-NEXT: v_mov_b32_e32 v4, s4 +; GCN-NEXT: v_mov_b32_e32 v5, s6 +; GCN-NEXT: v_mov_b32_e32 v6, s7 +; GCN-NEXT: v_mov_b32_e32 v7, s5 +; GCN-NEXT: s_mov_b32 s15, s13 +; GCN-NEXT: s_mov_b32 s14, s12 +; GCN-NEXT: s_mov_b32 s13, s11 +; GCN-NEXT: s_mov_b32 s12, s10 +; GCN-NEXT: image_bvh_intersect_ray v[0:3], v[0:7], s[12:15] a16 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: ; return to shader part epilog main_body: %v = call <4 x i32> @llvm.amdgcn.image.bvh.intersect.ray.i32.v4f16(i32 %node_ptr, float %ray_extent, <4 x float> %ray_origin, <4 x half> %ray_dir, <4 x half> %ray_inv_dir, <4 x i32> %tdescr) %r = bitcast <4 x i32> %v to <4 x float> ret <4 x float> %r } -; GCN-LABEL: {{^}}image_bvh64_intersect_ray: -; GCN: image_bvh64_intersect_ray v[0:3], v[0:15], s[0:3]{{$}} ; Arguments are flattened to represent the actual VGPR_A layout, so we have no ; extra moves in the generated kernel. define amdgpu_ps <4 x float> @image_bvh64_intersect_ray(<2 x i32> %node_ptr_vec, float %ray_extent, float %ray_origin_x, float %ray_origin_y, float %ray_origin_z, float %ray_dir_x, float %ray_dir_y, float %ray_dir_z, float %ray_inv_dir_x, float %ray_inv_dir_y, float %ray_inv_dir_z, <4 x i32> inreg %tdescr) { +; GCN-LABEL: image_bvh64_intersect_ray: +; GCN: ; %bb.0: ; %main_body +; GCN-NEXT: image_bvh64_intersect_ray v[0:3], v[0:15], s[0:3] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: ; return to shader part epilog main_body: %node_ptr = bitcast <2 x i32> %node_ptr_vec to i64 %ray_origin0 = insertelement <4 x float> undef, float %ray_origin_x, i32 0 @@ -63,9 +88,28 @@ ret <4 x float> %r } -; GCN-LABEL: {{^}}image_bvh64_intersect_ray_a16: -; GCN: image_bvh64_intersect_ray v[0:3], v[{{[0-9:]+}}], s[{{[0-9:]+}}] a16{{$}} define amdgpu_ps <4 x float> @image_bvh64_intersect_ray_a16(i64 inreg %node_ptr, float inreg %ray_extent, <4 x float> inreg %ray_origin, <4 x half> inreg %ray_dir, <4 x half> inreg %ray_inv_dir, <4 x i32> inreg %tdescr) { +; GCN-LABEL: image_bvh64_intersect_ray_a16: +; GCN: ; %bb.0: ; %main_body +; GCN-NEXT: s_lshr_b32 s6, s9, 16 +; GCN-NEXT: s_pack_ll_b32_b16 s8, s8, s9 +; GCN-NEXT: s_pack_ll_b32_b16 s6, s6, s10 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s3 +; GCN-NEXT: v_mov_b32_e32 v4, s4 +; GCN-NEXT: v_mov_b32_e32 v5, s5 +; GCN-NEXT: v_mov_b32_e32 v6, s7 +; GCN-NEXT: v_mov_b32_e32 v7, s8 +; GCN-NEXT: v_mov_b32_e32 v8, s6 +; GCN-NEXT: s_mov_b32 s15, s14 +; GCN-NEXT: s_mov_b32 s14, s13 +; GCN-NEXT: s_mov_b32 s13, s12 +; GCN-NEXT: s_mov_b32 s12, s11 +; GCN-NEXT: image_bvh64_intersect_ray v[0:3], v[0:15], s[12:15] a16 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: ; return to shader part epilog main_body: %v = call <4 x i32> @llvm.amdgcn.image.bvh.intersect.ray.i64.v4f16(i64 %node_ptr, float %ray_extent, <4 x float> %ray_origin, <4 x half> %ray_dir, <4 x half> %ray_inv_dir, <4 x i32> %tdescr) %r = bitcast <4 x i32> %v to <4 x float> @@ -74,9 +118,60 @@ ; TODO: NSA reassign is very limited and cannot work with VGPR tuples and subregs. -; GCN-LABEL: {{^}}image_bvh_intersect_ray_nsa_reassign: -; GCN: image_bvh_intersect_ray v[{{[0-9:]+}}], v[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} define amdgpu_kernel void @image_bvh_intersect_ray_nsa_reassign(i32* %p_node_ptr, float* %p_ray, <4 x i32> inreg %tdescr) { +; GFX1013-LABEL: image_bvh_intersect_ray_nsa_reassign: +; GFX1013: ; %bb.0: ; %main_body +; GFX1013-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX1013-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1013-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1013-NEXT: v_mov_b32_e32 v6, 4.0 +; GFX1013-NEXT: v_mov_b32_e32 v7, 0x40a00000 +; GFX1013-NEXT: v_mov_b32_e32 v8, 0x40c00000 +; GFX1013-NEXT: v_mov_b32_e32 v9, 0x40e00000 +; GFX1013-NEXT: v_mov_b32_e32 v10, 0x41000000 +; GFX1013-NEXT: s_waitcnt lgkmcnt(0) +; GFX1013-NEXT: v_add_co_u32 v2, s4, s4, v0 +; GFX1013-NEXT: v_add_co_ci_u32_e64 v3, s4, s5, 0, s4 +; GFX1013-NEXT: v_add_co_u32 v4, s4, s6, v0 +; GFX1013-NEXT: v_add_co_ci_u32_e64 v5, s4, s7, 0, s4 +; GFX1013-NEXT: flat_load_dword v0, v[2:3] +; GFX1013-NEXT: flat_load_dword v1, v[4:5] +; GFX1013-NEXT: v_mov_b32_e32 v2, 0 +; GFX1013-NEXT: v_mov_b32_e32 v3, 1.0 +; GFX1013-NEXT: v_mov_b32_e32 v4, 2.0 +; GFX1013-NEXT: v_mov_b32_e32 v5, 0x40400000 +; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1013-NEXT: image_bvh_intersect_ray v[0:3], v[0:15], s[0:3] +; GFX1013-NEXT: s_waitcnt vmcnt(0) +; GFX1013-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1013-NEXT: s_endpgm +; +; GFX1030-LABEL: image_bvh_intersect_ray_nsa_reassign: +; GFX1030: ; %bb.0: ; %main_body +; GFX1030-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX1030-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX1030-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1030-NEXT: v_mov_b32_e32 v10, 0x41000000 +; GFX1030-NEXT: v_mov_b32_e32 v9, 0x40e00000 +; GFX1030-NEXT: v_mov_b32_e32 v8, 0x40c00000 +; GFX1030-NEXT: v_mov_b32_e32 v7, 0x40a00000 +; GFX1030-NEXT: v_mov_b32_e32 v6, 4.0 +; GFX1030-NEXT: v_mov_b32_e32 v5, 0x40400000 +; GFX1030-NEXT: v_mov_b32_e32 v4, 2.0 +; GFX1030-NEXT: s_waitcnt lgkmcnt(0) +; GFX1030-NEXT: v_add_co_u32 v0, s4, s4, v2 +; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4 +; GFX1030-NEXT: v_add_co_u32 v2, s4, s6, v2 +; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, s4, s7, 0, s4 +; GFX1030-NEXT: flat_load_dword v0, v[0:1] +; GFX1030-NEXT: flat_load_dword v1, v[2:3] +; GFX1030-NEXT: v_mov_b32_e32 v2, 0 +; GFX1030-NEXT: v_mov_b32_e32 v3, 1.0 +; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1030-NEXT: image_bvh_intersect_ray v[0:3], v[0:15], s[0:3] +; GFX1030-NEXT: s_waitcnt vmcnt(0) +; GFX1030-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1030-NEXT: s_endpgm main_body: %lid = tail call i32 @llvm.amdgcn.workitem.id.x() %gep_node_ptr = getelementptr inbounds i32, i32* %p_node_ptr, i32 %lid @@ -97,9 +192,54 @@ ret void } -; GCN-LABEL: {{^}}image_bvh_intersect_ray_a16_nsa_reassign: -; GCN: image_bvh_intersect_ray v[{{[0-9:]+}}], v[{{[0-9:]+}}], s[{{[0-9:]+}}] a16{{$}} define amdgpu_kernel void @image_bvh_intersect_ray_a16_nsa_reassign(i32* %p_node_ptr, float* %p_ray, <4 x i32> inreg %tdescr) { +; GFX1013-LABEL: image_bvh_intersect_ray_a16_nsa_reassign: +; GFX1013: ; %bb.0: ; %main_body +; GFX1013-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX1013-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1013-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1013-NEXT: v_mov_b32_e32 v6, 0x46004500 +; GFX1013-NEXT: v_mov_b32_e32 v7, 0x48004700 +; GFX1013-NEXT: s_waitcnt lgkmcnt(0) +; GFX1013-NEXT: v_add_co_u32 v2, s4, s4, v0 +; GFX1013-NEXT: v_add_co_ci_u32_e64 v3, s4, s5, 0, s4 +; GFX1013-NEXT: v_add_co_u32 v4, s4, s6, v0 +; GFX1013-NEXT: v_add_co_ci_u32_e64 v5, s4, s7, 0, s4 +; GFX1013-NEXT: flat_load_dword v0, v[2:3] +; GFX1013-NEXT: flat_load_dword v1, v[4:5] +; GFX1013-NEXT: v_mov_b32_e32 v2, 0 +; GFX1013-NEXT: v_mov_b32_e32 v3, 1.0 +; GFX1013-NEXT: v_mov_b32_e32 v4, 2.0 +; GFX1013-NEXT: v_mov_b32_e32 v5, 0x44004200 +; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1013-NEXT: image_bvh_intersect_ray v[0:3], v[0:7], s[0:3] a16 +; GFX1013-NEXT: s_waitcnt vmcnt(0) +; GFX1013-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1013-NEXT: s_endpgm +; +; GFX1030-LABEL: image_bvh_intersect_ray_a16_nsa_reassign: +; GFX1030: ; %bb.0: ; %main_body +; GFX1030-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX1030-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GFX1030-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1030-NEXT: v_mov_b32_e32 v4, 2.0 +; GFX1030-NEXT: v_mov_b32_e32 v5, 0x44004200 +; GFX1030-NEXT: v_mov_b32_e32 v6, 0x46004500 +; GFX1030-NEXT: v_mov_b32_e32 v7, 0x48004700 +; GFX1030-NEXT: s_waitcnt lgkmcnt(0) +; GFX1030-NEXT: v_add_co_u32 v0, s4, s4, v2 +; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4 +; GFX1030-NEXT: v_add_co_u32 v2, s4, s6, v2 +; GFX1030-NEXT: v_add_co_ci_u32_e64 v3, s4, s7, 0, s4 +; GFX1030-NEXT: flat_load_dword v0, v[0:1] +; GFX1030-NEXT: flat_load_dword v1, v[2:3] +; GFX1030-NEXT: v_mov_b32_e32 v2, 0 +; GFX1030-NEXT: v_mov_b32_e32 v3, 1.0 +; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1030-NEXT: image_bvh_intersect_ray v[0:3], v[0:7], s[0:3] a16 +; GFX1030-NEXT: s_waitcnt vmcnt(0) +; GFX1030-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1030-NEXT: s_endpgm main_body: %lid = tail call i32 @llvm.amdgcn.workitem.id.x() %gep_node_ptr = getelementptr inbounds i32, i32* %p_node_ptr, i32 %lid @@ -120,9 +260,58 @@ ret void } -; GCN-LABEL: {{^}}image_bvh64_intersect_ray_nsa_reassign: -; GCN: image_bvh64_intersect_ray v[{{[0-9:]+}}], v[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(float* %p_ray, <4 x i32> inreg %tdescr) { +; GFX1013-LABEL: image_bvh64_intersect_ray_nsa_reassign: +; GFX1013: ; %bb.0: ; %main_body +; GFX1013-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GFX1013-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1013-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1013-NEXT: v_mov_b32_e32 v3, 0 +; GFX1013-NEXT: v_mov_b32_e32 v4, 1.0 +; GFX1013-NEXT: v_mov_b32_e32 v5, 2.0 +; GFX1013-NEXT: v_mov_b32_e32 v6, 0x40400000 +; GFX1013-NEXT: v_mov_b32_e32 v7, 4.0 +; GFX1013-NEXT: v_mov_b32_e32 v8, 0x40a00000 +; GFX1013-NEXT: v_mov_b32_e32 v9, 0x40c00000 +; GFX1013-NEXT: v_mov_b32_e32 v10, 0x40e00000 +; GFX1013-NEXT: v_mov_b32_e32 v11, 0x41000000 +; GFX1013-NEXT: s_waitcnt lgkmcnt(0) +; GFX1013-NEXT: v_add_co_u32 v0, s4, s4, v0 +; GFX1013-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4 +; GFX1013-NEXT: flat_load_dword v2, v[0:1] +; GFX1013-NEXT: v_mov_b32_e32 v0, 0xb36211c7 +; GFX1013-NEXT: v_mov_b32_e32 v1, 0x102 +; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1013-NEXT: image_bvh64_intersect_ray v[0:3], v[0:15], s[0:3] +; GFX1013-NEXT: s_waitcnt vmcnt(0) +; GFX1013-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1013-NEXT: s_endpgm +; +; GFX1030-LABEL: image_bvh64_intersect_ray_nsa_reassign: +; GFX1030: ; %bb.0: ; %main_body +; GFX1030-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1030-NEXT: v_mov_b32_e32 v3, 0 +; GFX1030-NEXT: v_mov_b32_e32 v11, 0x41000000 +; GFX1030-NEXT: v_mov_b32_e32 v10, 0x40e00000 +; GFX1030-NEXT: v_mov_b32_e32 v9, 0x40c00000 +; GFX1030-NEXT: v_mov_b32_e32 v8, 0x40a00000 +; GFX1030-NEXT: v_mov_b32_e32 v7, 4.0 +; GFX1030-NEXT: v_mov_b32_e32 v6, 0x40400000 +; GFX1030-NEXT: v_mov_b32_e32 v5, 2.0 +; GFX1030-NEXT: v_mov_b32_e32 v4, 1.0 +; GFX1030-NEXT: s_waitcnt lgkmcnt(0) +; GFX1030-NEXT: v_add_co_u32 v0, s4, s4, v0 +; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4 +; GFX1030-NEXT: flat_load_dword v2, v[0:1] +; GFX1030-NEXT: v_mov_b32_e32 v1, 0x102 +; GFX1030-NEXT: v_mov_b32_e32 v0, 0xb36211c7 +; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1030-NEXT: image_bvh64_intersect_ray v[0:3], v[0:15], s[0:3] +; GFX1030-NEXT: s_waitcnt vmcnt(0) +; GFX1030-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1030-NEXT: s_endpgm main_body: %lid = tail call i32 @llvm.amdgcn.workitem.id.x() %gep_ray = getelementptr inbounds float, float* %p_ray, i32 %lid @@ -141,9 +330,52 @@ ret void } -; GCN-LABEL: {{^}}image_bvh64_intersect_ray_a16_nsa_reassign: -; GCN: image_bvh64_intersect_ray v[{{[0-9:]+}}], v[{{[0-9:]+}}], s[{{[0-9:]+}}] a16{{$}} define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(float* %p_ray, <4 x i32> inreg %tdescr) { +; GFX1013-LABEL: image_bvh64_intersect_ray_a16_nsa_reassign: +; GFX1013: ; %bb.0: ; %main_body +; GFX1013-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GFX1013-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1013-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1013-NEXT: v_mov_b32_e32 v3, 0 +; GFX1013-NEXT: v_mov_b32_e32 v4, 1.0 +; GFX1013-NEXT: v_mov_b32_e32 v5, 2.0 +; GFX1013-NEXT: v_mov_b32_e32 v6, 0x44004200 +; GFX1013-NEXT: v_mov_b32_e32 v7, 0x46004500 +; GFX1013-NEXT: v_mov_b32_e32 v8, 0x48004700 +; GFX1013-NEXT: s_waitcnt lgkmcnt(0) +; GFX1013-NEXT: v_add_co_u32 v0, s4, s4, v0 +; GFX1013-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4 +; GFX1013-NEXT: flat_load_dword v2, v[0:1] +; GFX1013-NEXT: v_mov_b32_e32 v0, 0xb36211c6 +; GFX1013-NEXT: v_mov_b32_e32 v1, 0x102 +; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1013-NEXT: image_bvh64_intersect_ray v[0:3], v[0:15], s[0:3] a16 +; GFX1013-NEXT: s_waitcnt vmcnt(0) +; GFX1013-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1013-NEXT: s_endpgm +; +; GFX1030-LABEL: image_bvh64_intersect_ray_a16_nsa_reassign: +; GFX1030: ; %bb.0: ; %main_body +; GFX1030-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX1030-NEXT: v_mov_b32_e32 v3, 0 +; GFX1030-NEXT: v_mov_b32_e32 v5, 2.0 +; GFX1030-NEXT: v_mov_b32_e32 v4, 1.0 +; GFX1030-NEXT: v_mov_b32_e32 v6, 0x44004200 +; GFX1030-NEXT: v_mov_b32_e32 v7, 0x46004500 +; GFX1030-NEXT: v_mov_b32_e32 v8, 0x48004700 +; GFX1030-NEXT: s_waitcnt lgkmcnt(0) +; GFX1030-NEXT: v_add_co_u32 v0, s4, s4, v0 +; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4 +; GFX1030-NEXT: flat_load_dword v2, v[0:1] +; GFX1030-NEXT: v_mov_b32_e32 v1, 0x102 +; GFX1030-NEXT: v_mov_b32_e32 v0, 0xb36211c6 +; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1030-NEXT: image_bvh64_intersect_ray v[0:3], v[0:15], s[0:3] a16 +; GFX1030-NEXT: s_waitcnt vmcnt(0) +; GFX1030-NEXT: flat_store_dwordx4 v[0:1], v[0:3] +; GFX1030-NEXT: s_endpgm main_body: %lid = tail call i32 @llvm.amdgcn.workitem.id.x() %gep_ray = getelementptr inbounds float, float* %p_ray, i32 %lid