diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -1571,12 +1571,12 @@ SelectionDAG &DAG) const { LoadSDNode *Load = cast(Op); EVT VT = Op.getValueType(); - assert(VT.getVectorNumElements() == 3); + unsigned BaseAlign = Load->getAlignment(); + assert(VT.getVectorNumElements() == 3 && BaseAlign >= 16); SDValue BasePtr = Load->getBasePtr(); EVT MemVT = Load->getMemoryVT(); SDLoc SL(Op); const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); - unsigned BaseAlign = Load->getAlignment(); EVT WideVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -8058,7 +8058,7 @@ if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { if (MemVT.isPow2VectorType()) return SDValue(); - if (NumElements == 3) + if (NumElements == 3 && Alignment >= 16) return WidenVectorLoad(Op, DAG); return SplitVectorLoad(Op, DAG); } @@ -8076,7 +8076,7 @@ Alignment >= 4 && NumElements < 32) { if (MemVT.isPow2VectorType()) return SDValue(); - if (NumElements == 3) + if (NumElements == 3 && Alignment >= 16) return WidenVectorLoad(Op, DAG); return SplitVectorLoad(Op, DAG); } @@ -8093,7 +8093,9 @@ return SplitVectorLoad(Op, DAG); // v3 loads not supported on SI. if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) - return WidenVectorLoad(Op, DAG); + // Widen v3 loads when the alignment is 16-byte or higher. + return Alignment < 16 ? SplitVectorLoad(Op, DAG) + : WidenVectorLoad(Op, DAG); // v3 and v4 loads are supported for private and global memory. return SDValue(); } @@ -8117,7 +8119,9 @@ return SplitVectorLoad(Op, DAG); // v3 loads not supported on SI. if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) - return WidenVectorLoad(Op, DAG); + // Widen v3 loads when the alignment is 16-byte or higher. + return Alignment < 16 ? SplitVectorLoad(Op, DAG) + : WidenVectorLoad(Op, DAG); return SDValue(); default: llvm_unreachable("unsupported private_element_size"); diff --git a/llvm/test/CodeGen/AMDGPU/bfi_int.ll b/llvm/test/CodeGen/AMDGPU/bfi_int.ll --- a/llvm/test/CodeGen/AMDGPU/bfi_int.ll +++ b/llvm/test/CodeGen/AMDGPU/bfi_int.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,SI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,VI,FUNC %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=R600,FUNC %s ; BFI_INT Definition pattern from ISA docs @@ -8,9 +8,13 @@ ; FUNC-LABEL: {{^}}bfi_def: ; R600: BFI_INT -; GCN: s_andn2_b32 -; GCN: s_and_b32 -; GCN: s_or_b32 +; SI: s_andn2_b32 +; SI: s_and_b32 +; SI: s_or_b32 + +; VI: s_and_b32 +; VI: s_andn2_b32 +; VI: s_or_b32 define amdgpu_kernel void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) { entry: %0 = xor i32 %x, -1 diff --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll --- a/llvm/test/CodeGen/AMDGPU/fshl.ll +++ b/llvm/test/CodeGen/AMDGPU/fshl.ll @@ -12,14 +12,15 @@ ; SI-LABEL: fshl_i32: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xb +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb +; SI-NEXT: s_load_dword s0, s[0:1], 0xd ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, s1 -; SI-NEXT: s_lshr_b32 s1, s0, 1 -; SI-NEXT: v_alignbit_b32 v0, s0, v0, 1 -; SI-NEXT: s_not_b32 s0, s2 +; SI-NEXT: v_mov_b32_e32 v0, s3 +; SI-NEXT: s_not_b32 s0, s0 +; SI-NEXT: v_alignbit_b32 v0, s2, v0, 1 +; SI-NEXT: s_lshr_b32 s1, s2, 1 ; SI-NEXT: v_mov_b32_e32 v1, s0 ; SI-NEXT: v_alignbit_b32 v0, s1, v0, v1 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -27,33 +28,35 @@ ; ; VI-LABEL: fshl_i32: ; VI: ; %bb.0: ; %entry -; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c +; VI-NEXT: s_load_dword s0, s[0:1], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s1 -; VI-NEXT: s_not_b32 s2, s2 -; VI-NEXT: s_lshr_b32 s1, s0, 1 -; VI-NEXT: v_alignbit_b32 v0, s0, v0, 1 -; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: s_not_b32 s0, s0 +; VI-NEXT: s_lshr_b32 s1, s4, 1 +; VI-NEXT: v_alignbit_b32 v0, s4, v0, 1 +; VI-NEXT: v_mov_b32_e32 v1, s0 ; VI-NEXT: v_alignbit_b32 v2, s1, v0, v1 -; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: fshl_i32: ; GFX9: ; %bb.0: ; %entry -; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x34 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_not_b32 s2, s2 -; GFX9-NEXT: s_lshr_b32 s1, s0, 1 -; GFX9-NEXT: v_alignbit_b32 v0, s0, v0, 1 -; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: s_not_b32 s0, s0 +; GFX9-NEXT: s_lshr_b32 s1, s4, 1 +; GFX9-NEXT: v_alignbit_b32 v0, s4, v0, 1 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 ; GFX9-NEXT: v_alignbit_b32 v2, s1, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v0, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: v_mov_b32_e32 v0, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 ; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll --- a/llvm/test/CodeGen/AMDGPU/fshr.ll +++ b/llvm/test/CodeGen/AMDGPU/fshr.ll @@ -21,39 +21,42 @@ ; SI-LABEL: fshr_i32: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xb +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb +; SI-NEXT: s_load_dword s0, s[0:1], 0xd ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, s1 -; SI-NEXT: v_mov_b32_e32 v1, s2 -; SI-NEXT: v_alignbit_b32 v0, s0, v0, v1 +; SI-NEXT: v_mov_b32_e32 v0, s3 +; SI-NEXT: v_mov_b32_e32 v1, s0 +; SI-NEXT: v_alignbit_b32 v0, s2, v0, v1 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: fshr_i32: ; VI: ; %bb.0: ; %entry -; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c +; VI-NEXT: s_load_dword s0, s[0:1], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s1 -; VI-NEXT: v_mov_b32_e32 v1, s2 -; VI-NEXT: v_alignbit_b32 v2, s0, v0, v1 -; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v1, s0 +; VI-NEXT: v_alignbit_b32 v2, s4, v0, v1 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: fshr_i32: ; GFX9: ; %bb.0: ; %entry -; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x34 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: v_alignbit_b32 v2, s0, v0, v1 -; GFX9-NEXT: v_mov_b32_e32 v0, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: v_alignbit_b32 v2, s4, v0, v1 +; GFX9-NEXT: v_mov_b32_e32 v0, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 ; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/kernel-args.ll b/llvm/test/CodeGen/AMDGPU/kernel-args.ll --- a/llvm/test/CodeGen/AMDGPU/kernel-args.ll +++ b/llvm/test/CodeGen/AMDGPU/kernel-args.ll @@ -276,8 +276,10 @@ ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W -; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd -; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34 +; SI: s_load_dword s{{[0-9]+}}, s[0:1], 0xf +; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd +; MESA-VI: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34 +; MESA-VI: s_load_dword s{{[0-9]+}}, s[0:1], 0x3c ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10 define amdgpu_kernel void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind { entry: @@ -291,8 +293,10 @@ ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W -; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd -; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34 +; SI: s_load_dword s{{[0-9]+}}, s[0:1], 0xf +; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd +; MESA-VI: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34 +; MESA-VI: s_load_dword s{{[0-9]+}}, s[0:1], 0x3c ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10 define amdgpu_kernel void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind { entry: diff --git a/llvm/test/CodeGen/AMDGPU/merge-stores.ll b/llvm/test/CodeGen/AMDGPU/merge-stores.ll --- a/llvm/test/CodeGen/AMDGPU/merge-stores.ll +++ b/llvm/test/CodeGen/AMDGPU/merge-stores.ll @@ -275,7 +275,8 @@ } ; GCN-LABEL: {{^}}merge_global_store_3_adjacent_loads_i32: -; SI-DAG: buffer_load_dwordx4 +; SI-DAG: buffer_load_dwordx2 +; SI-DAG: buffer_load_dword ; CI-DAG: buffer_load_dwordx3 ; GCN: s_waitcnt ; SI-DAG: buffer_store_dwordx2 @@ -613,7 +614,8 @@ ; GCN-LABEL: {{^}}copy_v3i32_align4: ; GCN-NOT: SCRATCH_RSRC_DWORD -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}} +; SI-DAG: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}} +; SI-DAG: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8 ; CI-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}} ; GCN-NOT: offen ; GCN: s_waitcnt vmcnt @@ -647,7 +649,8 @@ ; GCN-LABEL: {{^}}copy_v3f32_align4: ; GCN-NOT: SCRATCH_RSRC_DWORD -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}} +; SI-DAG: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}} +; SI-DAG: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8 ; CI-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}} ; GCN-NOT: offen ; GCN: s_waitcnt vmcnt diff --git a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll --- a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll +++ b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll @@ -17,30 +17,32 @@ ; MUBUF: ; %bb.0: ; %entry ; MUBUF-NEXT: s_add_u32 flat_scratch_lo, s6, s9 ; MUBUF-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; MUBUF-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x8 ; MUBUF-NEXT: s_add_u32 s0, s0, s9 -; MUBUF-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8 ; MUBUF-NEXT: s_addc_u32 s1, s1, 0 ; MUBUF-NEXT: s_movk_i32 s32, 0x400 ; MUBUF-NEXT: s_mov_b32 s33, 0 ; MUBUF-NEXT: s_waitcnt lgkmcnt(0) -; MUBUF-NEXT: s_cmp_lg_u32 s8, 0 +; MUBUF-NEXT: s_cmp_lg_u32 s6, 0 ; MUBUF-NEXT: s_cbranch_scc1 BB0_3 ; MUBUF-NEXT: ; %bb.1: ; %bb.0 -; MUBUF-NEXT: s_cmp_lg_u32 s9, 0 +; MUBUF-NEXT: s_load_dword s6, s[4:5], 0x10 +; MUBUF-NEXT: s_cmp_lg_u32 s7, 0 ; MUBUF-NEXT: s_cbranch_scc1 BB0_3 ; MUBUF-NEXT: ; %bb.2: ; %bb.1 -; MUBUF-NEXT: s_add_i32 s6, s32, 0x1000 +; MUBUF-NEXT: s_add_i32 s7, s32, 0x1000 +; MUBUF-NEXT: s_waitcnt lgkmcnt(0) +; MUBUF-NEXT: s_lshl_b32 s6, s6, 2 ; MUBUF-NEXT: v_mov_b32_e32 v1, 0 -; MUBUF-NEXT: v_mov_b32_e32 v2, s6 -; MUBUF-NEXT: s_lshl_b32 s7, s10, 2 -; MUBUF-NEXT: s_mov_b32 s32, s6 +; MUBUF-NEXT: v_mov_b32_e32 v2, s7 ; MUBUF-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; MUBUF-NEXT: v_mov_b32_e32 v1, 1 -; MUBUF-NEXT: s_add_i32 s6, s6, s7 +; MUBUF-NEXT: s_add_i32 s6, s7, s6 ; MUBUF-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen offset:4 ; MUBUF-NEXT: v_mov_b32_e32 v1, s6 ; MUBUF-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen ; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0 +; MUBUF-NEXT: s_mov_b32 s32, s7 ; MUBUF-NEXT: s_waitcnt vmcnt(0) ; MUBUF-NEXT: v_add_u32_e32 v2, v1, v0 ; MUBUF-NEXT: s_waitcnt lgkmcnt(0) @@ -55,29 +57,31 @@ ; FLATSCR-LABEL: kernel_non_entry_block_static_alloca_uniformly_reached_align4: ; FLATSCR: ; %bb.0: ; %entry ; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s6, s9 -; FLATSCR-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8 ; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; FLATSCR-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x8 ; FLATSCR-NEXT: s_mov_b32 s32, 16 ; FLATSCR-NEXT: s_mov_b32 s33, 0 ; FLATSCR-NEXT: s_waitcnt lgkmcnt(0) -; FLATSCR-NEXT: s_cmp_lg_u32 s8, 0 +; FLATSCR-NEXT: s_cmp_lg_u32 s6, 0 ; FLATSCR-NEXT: s_cbranch_scc1 BB0_3 ; FLATSCR-NEXT: ; %bb.1: ; %bb.0 -; FLATSCR-NEXT: s_cmp_lg_u32 s9, 0 +; FLATSCR-NEXT: s_load_dword s6, s[4:5], 0x10 +; FLATSCR-NEXT: s_cmp_lg_u32 s7, 0 ; FLATSCR-NEXT: s_cbranch_scc1 BB0_3 ; FLATSCR-NEXT: ; %bb.2: ; %bb.1 -; FLATSCR-NEXT: s_mov_b32 s6, s32 -; FLATSCR-NEXT: s_movk_i32 s7, 0x1000 -; FLATSCR-NEXT: s_add_i32 s8, s6, s7 -; FLATSCR-NEXT: s_add_u32 s6, s6, s7 +; FLATSCR-NEXT: s_mov_b32 s7, s32 +; FLATSCR-NEXT: s_movk_i32 s8, 0x1000 +; FLATSCR-NEXT: s_add_i32 s9, s7, s8 +; FLATSCR-NEXT: s_add_u32 s7, s7, s8 ; FLATSCR-NEXT: v_mov_b32_e32 v1, 0 -; FLATSCR-NEXT: scratch_store_dword off, v1, s6 +; FLATSCR-NEXT: scratch_store_dword off, v1, s7 ; FLATSCR-NEXT: v_mov_b32_e32 v1, 1 -; FLATSCR-NEXT: s_lshl_b32 s6, s10, 2 -; FLATSCR-NEXT: s_mov_b32 s32, s8 -; FLATSCR-NEXT: scratch_store_dword off, v1, s8 offset:4 -; FLATSCR-NEXT: s_add_i32 s8, s8, s6 -; FLATSCR-NEXT: scratch_load_dword v1, off, s8 +; FLATSCR-NEXT: s_waitcnt lgkmcnt(0) +; FLATSCR-NEXT: s_lshl_b32 s6, s6, 2 +; FLATSCR-NEXT: s_mov_b32 s32, s9 +; FLATSCR-NEXT: scratch_store_dword off, v1, s9 offset:4 +; FLATSCR-NEXT: s_add_i32 s9, s9, s6 +; FLATSCR-NEXT: scratch_load_dword v1, off, s9 ; FLATSCR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0 ; FLATSCR-NEXT: s_waitcnt vmcnt(0) ; FLATSCR-NEXT: v_add_u32_e32 v2, v1, v0 diff --git a/llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll b/llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll @@ -0,0 +1,73 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s + +; The type promotion for the vector loads v3i32/v3f32 into v4i32/v4f32 is enabled +; only when the alignment is 16-byte or higher. +; Otherwise, split the load into two separate loads (dwordx2 + dword). +; This type promotion on smaller aligned loads can cause a page fault error +; while accessing one extra dword beyond the buffer. + +define protected amdgpu_kernel void @load_v3i32_align4(<3 x i32> addrspace(1)* %arg) #0 { +; GCN-LABEL: load_v3i32_align4: +; GCN: ; %bb.0: +; GCN: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0 +; GCN-NEXT: s_load_dword s{{[0-9]+}}, s[0:1], 0x8 + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 4 + store <3 x i32> %vec, <3 x i32> addrspace(1)* undef, align 4 + ret void +} + +define protected amdgpu_kernel void @load_v3i32_align8(<3 x i32> addrspace(1)* %arg) #0 { +; GCN-LABEL: load_v3i32_align8: +; GCN: ; %bb.0: +; GCN: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0 +; GCN-NEXT: s_load_dword s{{[0-9]+}}, s[0:1], 0x8 + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 8 + store <3 x i32> %vec, <3 x i32> addrspace(1)* undef, align 8 + ret void +} + +define protected amdgpu_kernel void @load_v3i32_align16(<3 x i32> addrspace(1)* %arg) #0 { +; GCN-LABEL: load_v3i32_align16: +; GCN: ; %bb.0: +; GCN: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0 + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16 + store <3 x i32> %vec, <3 x i32> addrspace(1)* undef, align 16 + ret void +} + +define protected amdgpu_kernel void @load_v3f32_align4(<3 x float> addrspace(1)* %arg) #0 { +; GCN-LABEL: load_v3f32_align4: +; GCN: ; %bb.0: +; GCN: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0 +; GCN-NEXT: s_load_dword s{{[0-9]+}}, s[0:1], 0x8 + %vec = load <3 x float>, <3 x float> addrspace(1)* %arg, align 4 + store <3 x float> %vec, <3 x float> addrspace(1)* undef, align 4 + ret void +} + +define protected amdgpu_kernel void @load_v3f32_align8(<3 x float> addrspace(1)* %arg) #0 { +; GCN-LABEL: load_v3f32_align8: +; GCN: ; %bb.0: +; GCN: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0 +; GCN-NEXT: s_load_dword s{{[0-9]+}}, s[0:1], 0x8 + %vec = load <3 x float>, <3 x float> addrspace(1)* %arg, align 8 + store <3 x float> %vec, <3 x float> addrspace(1)* undef, align 8 + ret void +} + +define protected amdgpu_kernel void @load_v3f32_align16(<3 x float> addrspace(1)* %arg) #0 { +; GCN-LABEL: load_v3f32_align16: +; GCN: ; %bb.0: +; GCN: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0 + %vec = load <3 x float>, <3 x float> addrspace(1)* %arg, align 16 + store <3 x float> %vec, <3 x float> addrspace(1)* undef, align 16 + ret void +} + +attributes #0 = { nounwind noinline } diff --git a/llvm/test/CodeGen/AMDGPU/sign_extend.ll b/llvm/test/CodeGen/AMDGPU/sign_extend.ll --- a/llvm/test/CodeGen/AMDGPU/sign_extend.ll +++ b/llvm/test/CodeGen/AMDGPU/sign_extend.ll @@ -38,30 +38,32 @@ ; SI-LABEL: test_s_sext_i32_to_i64: ; SI: ; %bb.0: ; %entry ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xb +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb +; SI-NEXT: s_load_dword s0, s[0:1], 0xd ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_mul_i32 s0, s0, s1 -; SI-NEXT: s_add_i32 s0, s0, s2 -; SI-NEXT: s_ashr_i32 s1, s0, 31 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: v_mov_b32_e32 v1, s1 +; SI-NEXT: s_mul_i32 s1, s2, s3 +; SI-NEXT: s_add_i32 s1, s1, s0 +; SI-NEXT: s_ashr_i32 s0, s1, 31 +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_mov_b32_e32 v1, s0 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; SI-NEXT: s_endpgm ; ; VI-LABEL: test_s_sext_i32_to_i64: ; VI: ; %bb.0: ; %entry ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c +; VI-NEXT: s_load_dword s0, s[0:1], 0x34 ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mul_i32 s0, s0, s1 -; VI-NEXT: s_add_i32 s0, s0, s2 -; VI-NEXT: s_ashr_i32 s1, s0, 31 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: s_mul_i32 s1, s2, s3 +; VI-NEXT: s_add_i32 s1, s1, s0 +; VI-NEXT: s_ashr_i32 s0, s1, 31 +; VI-NEXT: v_mov_b32_e32 v0, s1 +; VI-NEXT: v_mov_b32_e32 v1, s0 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm entry: @@ -290,13 +292,14 @@ ; SI-LABEL: v_sext_i1_to_i16_with_and: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xb +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb +; SI-NEXT: s_load_dword s0, s[0:1], 0xd ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_cmp_eq_u32_e32 vcc, s0, v0 -; SI-NEXT: v_mov_b32_e32 v0, s2 -; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], s1, v0 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, s2, v0 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v0 ; SI-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] ; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 @@ -305,13 +308,14 @@ ; VI-LABEL: v_sext_i1_to_i16_with_and: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 -; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c +; VI-NEXT: s_load_dword s0, s[0:1], 0x34 ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_cmp_eq_u32_e32 vcc, s0, v0 -; VI-NEXT: v_mov_b32_e32 v0, s2 -; VI-NEXT: v_cmp_eq_u32_e64 s[0:1], s1, v0 +; VI-NEXT: v_cmp_eq_u32_e32 vcc, s2, v0 +; VI-NEXT: v_mov_b32_e32 v0, s0 +; VI-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v0 ; VI-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] ; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 diff --git a/llvm/test/CodeGen/AMDGPU/store-local.96.ll b/llvm/test/CodeGen/AMDGPU/store-local.96.ll --- a/llvm/test/CodeGen/AMDGPU/store-local.96.ll +++ b/llvm/test/CodeGen/AMDGPU/store-local.96.ll @@ -7,39 +7,42 @@ ; GFX9-LABEL: store_lds_v3i32: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x3c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v3, s4 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v0, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: v_mov_b32_e32 v2, s0 ; GFX9-NEXT: ds_write_b96 v3, v[0:2] ; GFX9-NEXT: s_endpgm ; ; GFX7-LABEL: store_lds_v3i32: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v3, s4 -; GFX7-NEXT: v_mov_b32_e32 v0, s0 -; GFX7-NEXT: v_mov_b32_e32 v1, s1 -; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: v_mov_b32_e32 v2, s0 ; GFX7-NEXT: ds_write_b96 v3, v[0:2] ; GFX7-NEXT: s_endpgm ; ; GFX6-LABEL: store_lds_v3i32: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: ds_write_b32 v2, v1 offset:8 ; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: ds_write_b32 v2, v0 offset:8 +; GFX6-NEXT: v_mov_b32_e32 v0, s2 +; GFX6-NEXT: v_mov_b32_e32 v1, s3 ; GFX6-NEXT: ds_write_b64 v2, v[0:1] ; GFX6-NEXT: s_endpgm store <3 x i32> %x, <3 x i32> addrspace(3)* %out @@ -50,120 +53,123 @@ ; GFX9-LABEL: store_lds_v3i32_align1: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x3c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: v_mov_b32_e32 v2, s1 -; GFX9-NEXT: ds_write_b8 v0, v1 offset:8 -; GFX9-NEXT: ds_write_b8_d16_hi v0, v1 offset:10 -; GFX9-NEXT: ds_write_b8 v0, v2 offset:4 -; GFX9-NEXT: ds_write_b8_d16_hi v0, v2 offset:6 -; GFX9-NEXT: v_mov_b32_e32 v1, s0 -; GFX9-NEXT: s_lshr_b32 s3, s2, 8 -; GFX9-NEXT: ds_write_b8 v0, v1 -; GFX9-NEXT: ds_write_b8_d16_hi v0, v1 offset:2 -; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_lshr_b32 s2, s2, 24 +; GFX9-NEXT: s_lshr_b32 s1, s0, 8 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: s_lshr_b32 s4, s0, 24 ; GFX9-NEXT: ds_write_b8 v0, v1 offset:9 -; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_lshr_b32 s2, s1, 8 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 ; GFX9-NEXT: ds_write_b8 v0, v1 offset:11 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: ds_write_b8 v0, v1 offset:4 +; GFX9-NEXT: ds_write_b8_d16_hi v0, v1 offset:6 ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_lshr_b32 s1, s1, 24 +; GFX9-NEXT: s_lshr_b32 s1, s3, 8 +; GFX9-NEXT: ds_write_b8 v0, v1 +; GFX9-NEXT: ds_write_b8_d16_hi v0, v1 offset:2 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: s_lshr_b32 s1, s3, 24 ; GFX9-NEXT: ds_write_b8 v0, v1 offset:5 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: s_lshr_b32 s1, s0, 8 +; GFX9-NEXT: s_lshr_b32 s1, s2, 8 ; GFX9-NEXT: ds_write_b8 v0, v1 offset:7 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: s_lshr_b32 s0, s0, 24 +; GFX9-NEXT: s_lshr_b32 s1, s2, 24 ; GFX9-NEXT: ds_write_b8 v0, v1 offset:1 -; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 ; GFX9-NEXT: ds_write_b8 v0, v1 offset:3 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: ds_write_b8 v0, v1 offset:8 +; GFX9-NEXT: ds_write_b8_d16_hi v0, v1 offset:10 ; GFX9-NEXT: s_endpgm ; ; GFX7-LABEL: store_lds_v3i32_align1: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v0, s4 -; GFX7-NEXT: v_mov_b32_e32 v1, s2 -; GFX7-NEXT: v_mov_b32_e32 v2, s1 -; GFX7-NEXT: ds_write_b8 v0, v1 offset:8 -; GFX7-NEXT: ds_write_b8 v0, v2 offset:4 -; GFX7-NEXT: v_mov_b32_e32 v1, s0 -; GFX7-NEXT: s_lshr_b32 s3, s2, 8 -; GFX7-NEXT: ds_write_b8 v0, v1 -; GFX7-NEXT: v_mov_b32_e32 v1, s3 -; GFX7-NEXT: s_lshr_b32 s3, s2, 24 +; GFX7-NEXT: s_lshr_b32 s1, s0, 8 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: s_lshr_b32 s1, s0, 24 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:9 -; GFX7-NEXT: v_mov_b32_e32 v1, s3 -; GFX7-NEXT: s_lshr_b32 s2, s2, 16 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: s_lshr_b32 s1, s0, 16 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:11 -; GFX7-NEXT: v_mov_b32_e32 v1, s2 -; GFX7-NEXT: s_lshr_b32 s2, s1, 8 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:10 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: ds_write_b8 v0, v1 offset:4 ; GFX7-NEXT: v_mov_b32_e32 v1, s2 -; GFX7-NEXT: s_lshr_b32 s2, s1, 24 +; GFX7-NEXT: s_lshr_b32 s1, s3, 8 +; GFX7-NEXT: ds_write_b8 v0, v1 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: s_lshr_b32 s1, s3, 24 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:5 -; GFX7-NEXT: v_mov_b32_e32 v1, s2 -; GFX7-NEXT: s_lshr_b32 s1, s1, 16 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: s_lshr_b32 s1, s3, 16 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:7 ; GFX7-NEXT: v_mov_b32_e32 v1, s1 -; GFX7-NEXT: s_lshr_b32 s1, s0, 8 +; GFX7-NEXT: s_lshr_b32 s1, s2, 8 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:6 ; GFX7-NEXT: v_mov_b32_e32 v1, s1 -; GFX7-NEXT: s_lshr_b32 s1, s0, 24 +; GFX7-NEXT: s_lshr_b32 s1, s2, 24 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:1 ; GFX7-NEXT: v_mov_b32_e32 v1, s1 -; GFX7-NEXT: s_lshr_b32 s0, s0, 16 +; GFX7-NEXT: s_lshr_b32 s1, s2, 16 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:3 -; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 ; GFX7-NEXT: ds_write_b8 v0, v1 offset:2 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: ds_write_b8 v0, v1 offset:8 ; GFX7-NEXT: s_endpgm ; ; GFX6-LABEL: store_lds_v3i32_align1: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, s4 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: v_mov_b32_e32 v2, s1 -; GFX6-NEXT: ds_write_b8 v0, v1 offset:8 -; GFX6-NEXT: ds_write_b8 v0, v2 offset:4 -; GFX6-NEXT: v_mov_b32_e32 v1, s0 -; GFX6-NEXT: s_lshr_b32 s3, s2, 8 -; GFX6-NEXT: ds_write_b8 v0, v1 -; GFX6-NEXT: v_mov_b32_e32 v1, s3 -; GFX6-NEXT: s_lshr_b32 s3, s2, 24 +; GFX6-NEXT: s_lshr_b32 s1, s0, 8 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: s_lshr_b32 s1, s0, 24 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:9 -; GFX6-NEXT: v_mov_b32_e32 v1, s3 -; GFX6-NEXT: s_lshr_b32 s2, s2, 16 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: s_lshr_b32 s1, s0, 16 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:11 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: s_lshr_b32 s2, s1, 8 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:10 +; GFX6-NEXT: v_mov_b32_e32 v1, s3 +; GFX6-NEXT: ds_write_b8 v0, v1 offset:4 ; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: s_lshr_b32 s2, s1, 24 +; GFX6-NEXT: s_lshr_b32 s1, s3, 8 +; GFX6-NEXT: ds_write_b8 v0, v1 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: s_lshr_b32 s1, s3, 24 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:5 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: s_lshr_b32 s1, s1, 16 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: s_lshr_b32 s1, s3, 16 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:7 ; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: s_lshr_b32 s1, s0, 8 +; GFX6-NEXT: s_lshr_b32 s1, s2, 8 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:6 ; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: s_lshr_b32 s1, s0, 24 +; GFX6-NEXT: s_lshr_b32 s1, s2, 24 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:1 ; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: s_lshr_b32 s0, s0, 16 +; GFX6-NEXT: s_lshr_b32 s1, s2, 16 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:3 -; GFX6-NEXT: v_mov_b32_e32 v1, s0 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 ; GFX6-NEXT: ds_write_b8 v0, v1 offset:2 +; GFX6-NEXT: v_mov_b32_e32 v1, s0 +; GFX6-NEXT: ds_write_b8 v0, v1 offset:8 ; GFX6-NEXT: s_endpgm store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 1 ret void @@ -173,66 +179,69 @@ ; GFX9-LABEL: store_lds_v3i32_align2: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x3c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: ds_write_b16 v0, v1 offset:4 +; GFX9-NEXT: ds_write_b16_d16_hi v0, v1 offset:6 ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: v_mov_b32_e32 v2, s1 -; GFX9-NEXT: ds_write_b16 v0, v1 offset:8 -; GFX9-NEXT: ds_write_b16_d16_hi v0, v1 offset:10 -; GFX9-NEXT: ds_write_b16 v0, v2 offset:4 -; GFX9-NEXT: ds_write_b16_d16_hi v0, v2 offset:6 -; GFX9-NEXT: v_mov_b32_e32 v1, s0 ; GFX9-NEXT: ds_write_b16 v0, v1 ; GFX9-NEXT: ds_write_b16_d16_hi v0, v1 offset:2 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: ds_write_b16 v0, v1 offset:8 +; GFX9-NEXT: ds_write_b16_d16_hi v0, v1 offset:10 ; GFX9-NEXT: s_endpgm ; ; GFX7-LABEL: store_lds_v3i32_align2: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v0, s4 +; GFX7-NEXT: s_lshr_b32 s1, s0, 16 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: ds_write_b16 v0, v1 offset:10 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: ds_write_b16 v0, v1 offset:4 ; GFX7-NEXT: v_mov_b32_e32 v1, s2 -; GFX7-NEXT: v_mov_b32_e32 v2, s1 -; GFX7-NEXT: ds_write_b16 v0, v1 offset:8 -; GFX7-NEXT: ds_write_b16 v0, v2 offset:4 -; GFX7-NEXT: v_mov_b32_e32 v1, s0 -; GFX7-NEXT: s_lshr_b32 s2, s2, 16 +; GFX7-NEXT: s_lshr_b32 s1, s3, 16 ; GFX7-NEXT: ds_write_b16 v0, v1 -; GFX7-NEXT: v_mov_b32_e32 v1, s2 -; GFX7-NEXT: s_lshr_b32 s1, s1, 16 -; GFX7-NEXT: ds_write_b16 v0, v1 offset:10 ; GFX7-NEXT: v_mov_b32_e32 v1, s1 -; GFX7-NEXT: s_lshr_b32 s0, s0, 16 +; GFX7-NEXT: s_lshr_b32 s1, s2, 16 ; GFX7-NEXT: ds_write_b16 v0, v1 offset:6 -; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 ; GFX7-NEXT: ds_write_b16 v0, v1 offset:2 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: ds_write_b16 v0, v1 offset:8 ; GFX7-NEXT: s_endpgm ; ; GFX6-LABEL: store_lds_v3i32_align2: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: s_lshr_b32 s1, s0, 16 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: ds_write_b16 v0, v1 offset:10 +; GFX6-NEXT: v_mov_b32_e32 v1, s3 +; GFX6-NEXT: ds_write_b16 v0, v1 offset:4 ; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: v_mov_b32_e32 v2, s1 -; GFX6-NEXT: ds_write_b16 v0, v1 offset:8 -; GFX6-NEXT: ds_write_b16 v0, v2 offset:4 -; GFX6-NEXT: v_mov_b32_e32 v1, s0 -; GFX6-NEXT: s_lshr_b32 s2, s2, 16 +; GFX6-NEXT: s_lshr_b32 s1, s3, 16 ; GFX6-NEXT: ds_write_b16 v0, v1 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: s_lshr_b32 s1, s1, 16 -; GFX6-NEXT: ds_write_b16 v0, v1 offset:10 ; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: s_lshr_b32 s0, s0, 16 +; GFX6-NEXT: s_lshr_b32 s1, s2, 16 ; GFX6-NEXT: ds_write_b16 v0, v1 offset:6 -; GFX6-NEXT: v_mov_b32_e32 v1, s0 +; GFX6-NEXT: v_mov_b32_e32 v1, s1 ; GFX6-NEXT: ds_write_b16 v0, v1 offset:2 +; GFX6-NEXT: v_mov_b32_e32 v1, s0 +; GFX6-NEXT: ds_write_b16 v0, v1 offset:8 ; GFX6-NEXT: s_endpgm store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 2 ret void @@ -242,41 +251,44 @@ ; GFX9-LABEL: store_lds_v3i32_align4: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x3c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s0 -; GFX9-NEXT: v_mov_b32_e32 v2, s1 -; GFX9-NEXT: v_mov_b32_e32 v3, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_mov_b32_e32 v2, s3 ; GFX9-NEXT: ds_write2_b32 v0, v1, v2 offset1:1 -; GFX9-NEXT: ds_write_b32 v0, v3 offset:8 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: ds_write_b32 v0, v1 offset:8 ; GFX9-NEXT: s_endpgm ; ; GFX7-LABEL: store_lds_v3i32_align4: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v0, s4 -; GFX7-NEXT: v_mov_b32_e32 v1, s0 -; GFX7-NEXT: v_mov_b32_e32 v2, s1 -; GFX7-NEXT: ds_write2_b32 v0, v1, v2 offset1:1 ; GFX7-NEXT: v_mov_b32_e32 v1, s2 +; GFX7-NEXT: v_mov_b32_e32 v2, s3 +; GFX7-NEXT: ds_write2_b32 v0, v1, v2 offset1:1 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 ; GFX7-NEXT: ds_write_b32 v0, v1 offset:8 ; GFX7-NEXT: s_endpgm ; ; GFX6-LABEL: store_lds_v3i32_align4: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, s4 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 -; GFX6-NEXT: v_mov_b32_e32 v2, s0 +; GFX6-NEXT: v_mov_b32_e32 v1, s3 +; GFX6-NEXT: v_mov_b32_e32 v2, s2 ; GFX6-NEXT: ds_write2_b32 v0, v2, v1 offset1:1 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 +; GFX6-NEXT: v_mov_b32_e32 v1, s0 ; GFX6-NEXT: ds_write_b32 v0, v1 offset:8 ; GFX6-NEXT: s_endpgm store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 4 @@ -287,42 +299,45 @@ ; GFX9-LABEL: store_lds_v3i32_align8: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x3c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v2, s4 -; GFX9-NEXT: v_mov_b32_e32 v3, s2 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: ds_write_b32 v2, v3 offset:8 +; GFX9-NEXT: v_mov_b32_e32 v0, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 ; GFX9-NEXT: ds_write_b64 v2, v[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: ds_write_b32 v2, v0 offset:8 ; GFX9-NEXT: s_endpgm ; ; GFX7-LABEL: store_lds_v3i32_align8: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s4 -; GFX7-NEXT: v_mov_b32_e32 v1, s2 -; GFX7-NEXT: ds_write_b32 v2, v1 offset:8 -; GFX7-NEXT: v_mov_b32_e32 v0, s0 -; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 ; GFX7-NEXT: ds_write_b64 v2, v[0:1] +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: ds_write_b32 v2, v0 offset:8 ; GFX7-NEXT: s_endpgm ; ; GFX6-LABEL: store_lds_v3i32_align8: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: ds_write_b32 v2, v1 offset:8 -; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: v_mov_b32_e32 v0, s2 +; GFX6-NEXT: v_mov_b32_e32 v1, s3 ; GFX6-NEXT: ds_write_b64 v2, v[0:1] +; GFX6-NEXT: v_mov_b32_e32 v0, s0 +; GFX6-NEXT: ds_write_b32 v2, v0 offset:8 ; GFX6-NEXT: s_endpgm store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 8 ret void @@ -332,39 +347,42 @@ ; GFX9-LABEL: store_lds_v3i32_align16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 -; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dword s0, s[0:1], 0x3c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v3, s4 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v0, s2 +; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: v_mov_b32_e32 v2, s0 ; GFX9-NEXT: ds_write_b96 v3, v[0:2] ; GFX9-NEXT: s_endpgm ; ; GFX7-LABEL: store_lds_v3i32_align16: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX7-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX7-NEXT: s_mov_b32 m0, -1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v3, s4 -; GFX7-NEXT: v_mov_b32_e32 v0, s0 -; GFX7-NEXT: v_mov_b32_e32 v1, s1 -; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_mov_b32_e32 v0, s2 +; GFX7-NEXT: v_mov_b32_e32 v1, s3 +; GFX7-NEXT: v_mov_b32_e32 v2, s0 ; GFX7-NEXT: ds_write_b96 v3, v[0:2] ; GFX7-NEXT: s_endpgm ; ; GFX6-LABEL: store_lds_v3i32_align16: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dword s4, s[0:1], 0x9 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GFX6-NEXT: s_load_dword s0, s[0:1], 0xf ; GFX6-NEXT: s_mov_b32 m0, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_mov_b32_e32 v1, s2 -; GFX6-NEXT: ds_write_b32 v2, v1 offset:8 ; GFX6-NEXT: v_mov_b32_e32 v0, s0 -; GFX6-NEXT: v_mov_b32_e32 v1, s1 +; GFX6-NEXT: ds_write_b32 v2, v0 offset:8 +; GFX6-NEXT: v_mov_b32_e32 v0, s2 +; GFX6-NEXT: v_mov_b32_e32 v1, s3 ; GFX6-NEXT: ds_write_b64 v2, v[0:1] ; GFX6-NEXT: s_endpgm store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 16 diff --git a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll --- a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll @@ -34,35 +34,37 @@ define amdgpu_kernel void @extract_insert_different_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx0, i32 %idx1) #1 { ; GCN-LABEL: extract_insert_different_dynelt_v4i32: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 -; GCN-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0xd +; GCN-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x9 +; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd ; GCN-NEXT: v_mov_b32_e32 v5, 0 -; GCN-NEXT: s_mov_b32 s3, 0xf000 -; GCN-NEXT: s_mov_b32 s2, 0 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b64 s[0:1], s[6:7] +; GCN-NEXT: s_mov_b64 s[4:5], s[10:11] ; GCN-NEXT: v_lshlrev_b32_e32 v4, 4, v0 -; GCN-NEXT: buffer_load_dwordx4 v[1:4], v[4:5], s[0:3], 0 addr64 +; GCN-NEXT: buffer_load_dwordx4 v[1:4], v[4:5], s[4:7], 0 addr64 +; GCN-NEXT: s_load_dword s0, s[0:1], 0xf ; GCN-NEXT: v_lshlrev_b32_e32 v6, 2, v0 -; GCN-NEXT: v_mov_b32_e32 v0, s8 -; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 3 +; GCN-NEXT: v_mov_b32_e32 v0, s2 +; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s3, 3 ; GCN-NEXT: v_mov_b32_e32 v7, v5 -; GCN-NEXT: s_mov_b64 s[6:7], s[2:3] +; GCN-NEXT: s_mov_b64 s[10:11], s[6:7] ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc -; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 2 +; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s3, 2 ; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc -; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 1 +; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s3, 1 ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc -; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s9, 0 +; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0 ; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s0, 1 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 2 +; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s0, 2 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s10, 3 +; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s0, 3 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc -; GCN-NEXT: buffer_store_dword v0, v[6:7], s[4:7], 0 addr64 +; GCN-NEXT: buffer_store_dword v0, v[6:7], s[8:11], 0 addr64 ; GCN-NEXT: s_endpgm %id = call i32 @llvm.amdgcn.workitem.id.x() %id.ext = sext i32 %id to i64