Index: lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -444,9 +444,22 @@ return false; if (StoreInst *SI = dyn_cast_or_null(UseInst)) { + if (SI->isVolatile()) + return false; + // Reject if the stored value is not the pointer operand. if (SI->getPointerOperand() != Val) return false; + } else if (LoadInst *LI = dyn_cast_or_null(UseInst)) { + if (LI->isVolatile()) + return false; + } else if (AtomicRMWInst *RMW = dyn_cast_or_null(UseInst)) { + if (RMW->isVolatile()) + return false; + } else if (AtomicCmpXchgInst *CAS + = dyn_cast_or_null(UseInst)) { + if (CAS->isVolatile()) + return false; } if (!User->getType()->isPointerTy()) Index: test/CodeGen/AMDGPU/promote-alloca-volatile.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/promote-alloca-volatile.ll @@ -0,0 +1,26 @@ +; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-promote-alloca < %s | FileCheck %s + +; CHECK-LABEL: @volatile_load( +; CHECK: alloca [5 x i32] +; CHECK load volatile i32, i32* +define void @volatile_load(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) { +entry: + %stack = alloca [5 x i32], align 4 + %tmp = load i32, i32 addrspace(1)* %in, align 4 + %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %tmp + %load = load volatile i32, i32* %arrayidx1 + store i32 %load, i32 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @volatile_store( +; CHECK: alloca [5 x i32] +; CHECK store volatile i32 %tmp, i32* +define void @volatile_store(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) { +entry: + %stack = alloca [5 x i32], align 4 + %tmp = load i32, i32 addrspace(1)* %in, align 4 + %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %tmp + store volatile i32 %tmp, i32* %arrayidx1 + ret void +}