Index: lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -545,14 +545,16 @@ if (const Argument *A = dyn_cast(V)) return !isArgPassedInSGPR(A); - // Loads from the private address space are divergent, because threads - // can execute the load instruction with the same inputs and get different - // results. + // Loads from the private and flat address spaces are divergent, because + // threads can execute the load instruction with the same inputs and get + // different results. // // All other loads are not divergent, because if threads issue loads with the // same arguments, they will always get the same result. if (const LoadInst *Load = dyn_cast(V)) - return Load->getPointerAddressSpace() == ST->getAMDGPUAS().PRIVATE_ADDRESS; + return Load->getPointerAddressSpace() == + ST->getAMDGPUAS().PRIVATE_ADDRESS || + Load->getPointerAddressSpace() == ST->getAMDGPUAS().FLAT_ADDRESS; // Atomics are divergent because they are executed sequentially: when an // atomic operation refers to the same address in each thread, then each Index: test/Analysis/DivergenceAnalysis/AMDGPU/divergent-flat.ll =================================================================== --- /dev/null +++ test/Analysis/DivergenceAnalysis/AMDGPU/divergent-flat.ll @@ -0,0 +1,14 @@ +; RUN: opt -mtriple=amdgcn-- -analyze -divergence %s | FileCheck %s + +; Test that we do not consider loads from flat addrspace to be uniform. + +; CHECK: DIVERGENT: %val = load i32, i32* %flat, align 4 +define amdgpu_kernel void @spam(i32 addrspace(5)* %priv) { + %flat = addrspacecast i32 addrspace(5)* %priv to i32* + %idx = call i32 @llvm.amdgcn.workitem.id.x() + store i32 %idx, i32* %flat, align 4 + %val = load i32, i32* %flat, align 4 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x()