Index: lib/Target/AMDGPU/AMDGPU.td =================================================================== --- lib/Target/AMDGPU/AMDGPU.td +++ lib/Target/AMDGPU/AMDGPU.td @@ -61,6 +61,12 @@ "Support flat address space" >; +def FeatureUnalignedBufferAccess : SubtargetFeature<"unaligned-buffer-access", + "UnalignedBufferAccess", + "true", + "Support unaligned global loads and stores" +>; + def FeatureXNACK : SubtargetFeature<"xnack", "EnableXNACK", "true", Index: lib/Target/AMDGPU/AMDGPUSubtarget.h =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.h +++ lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -73,6 +73,7 @@ bool CaymanISA; bool FlatAddressSpace; bool FlatForGlobal; + bool UnalignedBufferAccess; bool EnableIRStructurizer; bool EnablePromoteAlloca; bool EnableIfCvt; @@ -179,6 +180,10 @@ return FlatAddressSpace; } + bool hasUnalignedBufferAccess() const { + return UnalignedBufferAccess; + } + bool hasSMemRealTime() const { return HasSMemRealTime; } Index: lib/Target/AMDGPU/AMDGPUSubtarget.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -58,7 +58,7 @@ SmallString<256> FullFS("+promote-alloca,+fp64-denormals,"); if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA. - FullFS += "+flat-for-global,"; + FullFS += "+flat-for-global,+unaligned-buffer-access,"; FullFS += FS; ParseSubtargetFeatures(GPU, FullFS); @@ -85,7 +85,9 @@ TexVTXClauseSize(0), Gen(AMDGPUSubtarget::R600), FP64(false), FP64Denormals(false), FP32Denormals(false), FPExceptions(false), FastFMAF32(false), HalfRate64Ops(false), CaymanISA(false), - FlatAddressSpace(false), FlatForGlobal(false), EnableIRStructurizer(true), + FlatAddressSpace(false), FlatForGlobal(false), + UnalignedBufferAccess(false), + EnableIRStructurizer(true), EnablePromoteAlloca(false), EnableIfCvt(true), EnableLoadStoreOpt(false), EnableUnsafeDSOffsetFolding(false), Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -428,24 +428,30 @@ if (!VT.isSimple() || VT == MVT::Other) return false; - // TODO - CI+ supports unaligned memory accesses, but this requires driver - // support. - - // XXX - The only mention I see of this in the ISA manual is for LDS direct - // reads the "byte address and must be dword aligned". Is it also true for the - // normal loads and stores? - if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) { + if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || + AddrSpace == AMDGPUAS::REGION_ADDRESS) { // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte // aligned, 8 byte access in a single operation using ds_read2/write2_b32 // with adjacent offsets. bool AlignedBy4 = (Align % 4 == 0); if (IsFast) *IsFast = AlignedBy4; + return AlignedBy4; } + if (Subtarget->hasUnalignedBufferAccess()) { + // If we have an uniform constant load, it still requires using a slow + // buffer instruction if unaligned. + if (IsFast) { + *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ? + (Align % 4 == 0) : true; + } + + return true; + } + // Smaller than dword value must be aligned. - // FIXME: This should be allowed on CI+ if (VT.bitsLT(MVT::i32)) return false; Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -179,8 +179,10 @@ def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{ - return isConstantLoad(cast(N), -1) && - static_cast(getTargetLowering())->isMemOpUniform(N); + auto Ld = cast(N); + return Ld->getAlignment() >= 4 && + isConstantLoad(Ld, -1) && + static_cast(getTargetLowering())->isMemOpUniform(N); }]>; //===----------------------------------------------------------------------===// Index: test/CodeGen/AMDGPU/amdgpu.private-memory.ll =================================================================== --- test/CodeGen/AMDGPU/amdgpu.private-memory.ll +++ test/CodeGen/AMDGPU/amdgpu.private-memory.ll @@ -1,9 +1,9 @@ ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE +; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA -; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC +; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA +; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC +; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=HSAOPT -check-prefix=OPT %s ; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=NOHSAOPT -check-prefix=OPT %s Index: test/CodeGen/AMDGPU/load.ll =================================================================== --- test/CodeGen/AMDGPU/load.ll +++ test/CodeGen/AMDGPU/load.ll @@ -128,10 +128,7 @@ ; SI-NOHSA: buffer_load_ubyte ; SI-NOHSA: buffer_load_ubyte -; CI-HSA: flat_load_ubyte -; CI-HSA: flat_load_ubyte -; CI-HSA: flat_load_ubyte -; CI-HSA: flat_load_ubyte +; CI-HSA: flat_load_dword define void @load_v4i8_align1(<4 x i32> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) { entry: %0 = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1 Index: test/CodeGen/AMDGPU/unaligned-load-store.ll =================================================================== --- test/CodeGen/AMDGPU/unaligned-load-store.ll +++ test/CodeGen/AMDGPU/unaligned-load-store.ll @@ -1,5 +1,6 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s +; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=ALIGNED %s +; RUN: llc -march=amdgcn -mcpu=bonaire -mattr=+unaligned-buffer-access -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=UNALIGNED %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=ALIGNED %s ; SI-LABEL: {{^}}unaligned_load_store_i16_local: ; SI: ds_read_u8 @@ -14,10 +15,13 @@ } ; SI-LABEL: {{^}}unaligned_load_store_i16_global: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte + +; UNALIGNED: buffer_load_ushort +; UNALIGNED: buffer_store_short ; SI: s_endpgm define void @unaligned_load_store_i16_global(i16 addrspace(1)* %p, i16 addrspace(1)* %r) nounwind { %v = load i16, i16 addrspace(1)* %p, align 1 @@ -44,14 +48,17 @@ } ; SI-LABEL: {{^}}unaligned_load_store_i32_global: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte + +; UNALIGNED: buffer_load_dword +; UNALIGNED: buffer_store_dword define void @unaligned_load_store_i32_global(i32 addrspace(1)* %p, i32 addrspace(1)* %r) nounwind { %v = load i32, i32 addrspace(1)* %p, align 1 store i32 %v, i32 addrspace(1)* %r, align 1 @@ -59,10 +66,13 @@ } ; SI-LABEL: {{^}}align2_load_store_i32_global: -; SI: buffer_load_ushort -; SI: buffer_load_ushort -; SI: buffer_store_short -; SI: buffer_store_short +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short + +; UNALIGNED: buffer_load_dword +; UNALIGNED: buffer_store_dword define void @align2_load_store_i32_global(i32 addrspace(1)* %p, i32 addrspace(1)* %r) nounwind { %v = load i32, i32 addrspace(1)* %p, align 2 store i32 %v, i32 addrspace(1)* %r, align 2 @@ -175,26 +185,29 @@ } ; SI-LABEL: {{^}}align2_load_store_i64_global: -; SI: buffer_load_ushort -; SI: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort -; SI-NOT: v_or_ -; SI-NOT: v_lshl +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl -; SI: buffer_load_ushort +; ALIGNED: buffer_load_ushort -; SI-NOT: v_or_ -; SI-NOT: v_lshl +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl -; SI: buffer_load_ushort +; ALIGNED: buffer_load_ushort -; SI-NOT: v_or_ -; SI-NOT: v_lshl +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl + +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short -; SI: buffer_store_short -; SI: buffer_store_short -; SI: buffer_store_short -; SI: buffer_store_short +; UNALIGNED: buffer_load_dwordx2 +; UNALIGNED: buffer_store_dwordx2 define void @align2_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) { %v = load i64, i64 addrspace(1)* %p, align 2 store i64 %v, i64 addrspace(1)* %r, align 2 @@ -202,26 +215,29 @@ } ; SI-LABEL: {{^}}unaligned_load_store_i64_global: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte - -; SI-NOT: v_or_ -; SI-NOT: v_lshl - -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl + +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte + +; UNALIGNED: buffer_load_dwordx2 +; UNALIGNED: buffer_store_dwordx2 define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) { %v = load i64, i64 addrspace(1)* %p, align 1 store i64 %v, i64 addrspace(1)* %r, align 1 @@ -276,39 +292,42 @@ } ; SI-LABEL: {{^}}unaligned_load_store_v4i32_global -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte - -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte + +; UNALIGNED: buffer_load_dwordx4 +; UNALIGNED: buffer_store_dwordx4 define void @unaligned_load_store_v4i32_global(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) nounwind { %v = load <4 x i32>, <4 x i32> addrspace(1)* %p, align 1 store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 1 @@ -393,3 +412,146 @@ store i64 0, i64 addrspace(3)* %out, align 4 ret void } + +; SI-LABEL: {{^}}unaligned_load_i32_constant: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; UNALIGNED: s_load_dword + +; SI: buffer_store_dword +define void @unaligned_load_i32_constant(i32 addrspace(2)* %p, i32 addrspace(1)* %r) nounwind { + %v = load i32, i32 addrspace(2)* %p, align 1 + store i32 %v, i32 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}align2_load_i32_constant: +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort + +; UNALIGNED: s_load_dword +; UNALIGNED: buffer_store_dword +define void @align2_load_i32_constant(i32 addrspace(2)* %p, i32 addrspace(1)* %r) nounwind { + %v = load i32, i32 addrspace(2)* %p, align 2 + store i32 %v, i32 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}align2_load_i64_constant: +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort + +; UNALIGNED: s_load_dwordx2 +; UNALIGNED: buffer_store_dwordx2 +define void @align2_load_i64_constant(i64 addrspace(2)* %p, i64 addrspace(1)* %r) nounwind { + %v = load i64, i64 addrspace(2)* %p, align 2 + store i64 %v, i64 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}align4_load_i64_constant: +; SI: s_load_dwordx2 +; SI: buffer_store_dwordx2 +define void @align4_load_i64_constant(i64 addrspace(2)* %p, i64 addrspace(1)* %r) nounwind { + %v = load i64, i64 addrspace(2)* %p, align 4 + store i64 %v, i64 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}align4_load_v4i32_constant: +; SI: s_load_dwordx4 +; SI: buffer_store_dwordx4 +define void @align4_load_v4i32_constant(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) nounwind { + %v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 4 + store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}unaligned_load_v2i32_constant: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; UNALIGNED: buffer_load_dwordx2 + +; SI: buffer_store_dwordx2 +define void @unaligned_load_v2i32_constant(<2 x i32> addrspace(2)* %p, <2 x i32> addrspace(1)* %r) nounwind { + %v = load <2 x i32>, <2 x i32> addrspace(2)* %p, align 1 + store <2 x i32> %v, <2 x i32> addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}unaligned_load_v4i32_constant: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; UNALIGNED: buffer_load_dwordx4 + +; SI: buffer_store_dwordx4 +define void @unaligned_load_v4i32_constant(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) nounwind { + %v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 1 + store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}align4_load_i8_constant: +; SI: buffer_load_ubyte +; SI: buffer_store_byte +define void @align4_load_i8_constant(i8 addrspace(2)* %p, i8 addrspace(1)* %r) nounwind { + %v = load i8, i8 addrspace(2)* %p, align 4 + store i8 %v, i8 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}align2_load_i8_constant: +; SI: buffer_load_ubyte +; SI: buffer_store_byte +define void @align2_load_i8_constant(i8 addrspace(2)* %p, i8 addrspace(1)* %r) nounwind { + %v = load i8, i8 addrspace(2)* %p, align 2 + store i8 %v, i8 addrspace(1)* %r, align 2 + ret void +} + +; SI-LABEL: {{^}}align4_merge_load_2_i32_constant: +; SI: s_load_dwordx2 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}} +; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[LO]] +; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[HI]] +; SI: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}} +define void @align4_merge_load_2_i32_constant(i32 addrspace(2)* %p, i32 addrspace(1)* %r) nounwind { + %gep0 = getelementptr i32, i32 addrspace(2)* %p, i64 1 + %v0 = load i32, i32 addrspace(2)* %p, align 4 + %v1 = load i32, i32 addrspace(2)* %gep0, align 4 + + %gep1 = getelementptr i32, i32 addrspace(1)* %r, i64 1 + store i32 %v0, i32 addrspace(1)* %r, align 4 + store i32 %v1, i32 addrspace(1)* %gep1, align 4 + ret void +}