Index: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -186,6 +186,13 @@ if (const LoadInst *Load = dyn_cast(V)) return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; + // Atomics are divergent because they are executed sequentially: when an + // atomic operation refers to the same address in each thread, then each + // thread after the first sees the value written by the previous thread as + // original value. + if (isa(V) || isa(V)) + return true; + if (const IntrinsicInst *Intrinsic = dyn_cast(V)) { const TargetMachine &TM = getTLI()->getTargetMachine(); return isIntrinsicSourceOfDivergence(TM.getIntrinsicInfo(), Intrinsic); Index: llvm/trunk/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll =================================================================== --- llvm/trunk/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll +++ llvm/trunk/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll @@ -0,0 +1,15 @@ +; RUN: opt -mtriple=amdgcn-- -analyze -divergence %s | FileCheck %s + +; CHECK: DIVERGENT: %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst +define i32 @test1(i32* %ptr, i32 %val) #0 { + %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst + ret i32 %orig +} + +; CHECK: DIVERGENT: %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst +define {i32, i1} @test2(i32* %ptr, i32 %cmp, i32 %new) { + %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst + ret {i32, i1} %orig +} + +attributes #0 = { "ShaderType"="0" }