Index: lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -325,7 +325,7 @@ case Instruction::Load: { LoadInst *LI = cast(Inst); // Currently only handle the case where the Pointer Operand is a GEP so check for that case. - return isa(LI->getPointerOperand()) && !LI->isVolatile(); + return isa(LI->getPointerOperand()) && LI->isSimple(); } case Instruction::BitCast: case Instruction::AddrSpaceCast: @@ -334,7 +334,7 @@ // Must be the stored pointer operand, not a stored value, plus // since it should be canonical form, the User should be a GEP. StoreInst *SI = cast(Inst); - return (SI->getPointerOperand() == User) && isa(User) && !SI->isVolatile(); + return (SI->getPointerOperand() == User) && isa(User) && SI->isSimple(); } default: return false; Index: test/CodeGen/AMDGPU/vector-alloca-atomic.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/vector-alloca-atomic.ll @@ -0,0 +1,65 @@ +; RUN: opt -S -mtriple=amdgcn-- -data-layout=A5 -amdgpu-promote-alloca -sroa -instcombine < %s | FileCheck -check-prefix=OPT %s + +; Show that what the alloca promotion pass will do for non-atomic load/store. + +; OPT-LABEL: @vector_alloca_not_atomic( +; +; OPT: extractelement <3 x i32> , i64 %index +define amdgpu_kernel void @vector_alloca_not_atomic(i32 addrspace(1)* %out, i64 %index) { +entry: + %alloca = alloca [3 x i32], addrspace(5) + %a0 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 0 + %a1 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 1 + %a2 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 2 + store i32 0, i32 addrspace(5)* %a0 + store i32 1, i32 addrspace(5)* %a1 + store i32 2, i32 addrspace(5)* %a2 + %tmp = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i64 0, i64 %index + %data = load i32, i32 addrspace(5)* %tmp + store i32 %data, i32 addrspace(1)* %out + ret void +} + +; OPT-LABEL: @vector_alloca_atomic_read( +; +; OPT: alloca [3 x i32] +; OPT: store i32 0 +; OPT: store i32 1 +; OPT: store i32 2 +; OPT: load atomic i32 +define amdgpu_kernel void @vector_alloca_atomic_read(i32 addrspace(1)* %out, i64 %index) { +entry: + %alloca = alloca [3 x i32], addrspace(5) + %a0 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 0 + %a1 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 1 + %a2 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 2 + store i32 0, i32 addrspace(5)* %a0 + store i32 1, i32 addrspace(5)* %a1 + store i32 2, i32 addrspace(5)* %a2 + %tmp = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i64 0, i64 %index + %data = load atomic i32, i32 addrspace(5)* %tmp acquire, align 4 + store i32 %data, i32 addrspace(1)* %out + ret void +} + +; OPT-LABEL: @vector_alloca_atomic_write( +; +; OPT: alloca [3 x i32] +; OPT: store atomic i32 0 +; OPT: store atomic i32 1 +; OPT: store atomic i32 2 +; OPT: load i32 +define amdgpu_kernel void @vector_alloca_atomic_write(i32 addrspace(1)* %out, i64 %index) { +entry: + %alloca = alloca [3 x i32], addrspace(5) + %a0 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 0 + %a1 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 1 + %a2 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 2 + store atomic i32 0, i32 addrspace(5)* %a0 release, align 4 + store atomic i32 1, i32 addrspace(5)* %a1 release, align 4 + store atomic i32 2, i32 addrspace(5)* %a2 release, align 4 + %tmp = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i64 0, i64 %index + %data = load i32, i32 addrspace(5)* %tmp + store i32 %data, i32 addrspace(1)* %out + ret void +}