diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -123,6 +123,14 @@ } }; +unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) { + if (!TM.getTargetTriple().isAMDGCN()) + return 128; + + const GCNSubtarget &ST = TM.getSubtarget(F); + return ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); +} + } // end anonymous namespace char AMDGPUPromoteAlloca::ID = 0; @@ -175,16 +183,7 @@ if (!ST.isPromoteAllocaEnabled()) return false; - if (IsAMDGCN) { - const GCNSubtarget &ST = TM.getSubtarget(F); - MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); - // A non-entry function has only 32 caller preserved registers. - // Do not promote alloca which will force spilling. - if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) - MaxVGPRs = std::min(MaxVGPRs, 32u); - } else { - MaxVGPRs = 128; - } + MaxVGPRs = getMaxVGPRs(TM, F); bool SufficientLDS = hasSufficientLocalMem(F); bool Changed = false; @@ -400,11 +399,12 @@ VectorTy = arrayTypeToVecType(ArrayTy); } - // Use up to 1/4 of available register budget for vectorization. + // Use up to 1/2 of available register budget for vectorization if we have + // >=64 MaxVGPRs, otherwise use 1/4. unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8 : (MaxVGPRs * 32); - - if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) { + const unsigned SizeFactor = (MaxVGPRs >= 64 ? 2 : 4); + if (DL.getTypeSizeInBits(AllocaTy) * SizeFactor > Limit) { LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with " << MaxVGPRs << " registers available\n"); return false; @@ -1175,17 +1175,7 @@ if (!ST.isPromoteAllocaEnabled()) return false; - unsigned MaxVGPRs; - if (TM.getTargetTriple().getArch() == Triple::amdgcn) { - const GCNSubtarget &ST = TM.getSubtarget(F); - MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); - // A non-entry function has only 32 caller preserved registers. - // Do not promote alloca which will force spilling. - if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) - MaxVGPRs = std::min(MaxVGPRs, 32u); - } else { - MaxVGPRs = 128; - } + const unsigned MaxVGPRs = getMaxVGPRs(TM, F); bool Changed = false; BasicBlock &EntryBB = *F.begin(); diff --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-limits.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-limits.ll --- a/llvm/test/CodeGen/AMDGPU/vector-alloca-limits.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-limits.ll @@ -3,42 +3,15 @@ target datalayout = "A5" -; OPT-LABEL: @alloca_8xi64_max1024( -; OPT-NOT: alloca -; OPT: <8 x i64> -; LIMIT32: alloca -; LIMIT32-NOT: <8 x i64> -define amdgpu_kernel void @alloca_8xi64_max1024(ptr addrspace(1) %out, i32 %index) #0 { -entry: - %tmp = alloca [8 x i64], addrspace(5) - store i64 0, ptr addrspace(5) %tmp - %tmp1 = getelementptr [8 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index - %tmp2 = load i64, ptr addrspace(5) %tmp1 - store i64 %tmp2, ptr addrspace(1) %out - ret void -} +; >= 64 VGPRs -> limit is 1/2 of available VGPRs. +; < 64 VGPRS -> limit is 1/4 of available VGPRs. -; OPT-LABEL: @alloca_9xi64_max1024( -; OPT: alloca [9 x i64] -; OPT-NOT: <9 x i64> -; LIMIT32: alloca -; LIMIT32-NOT: <9 x i64> -define amdgpu_kernel void @alloca_9xi64_max1024(ptr addrspace(1) %out, i32 %index) #0 { -entry: - %tmp = alloca [9 x i64], addrspace(5) - store i64 0, ptr addrspace(5) %tmp - %tmp1 = getelementptr [9 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index - %tmp2 = load i64, ptr addrspace(5) %tmp1 - store i64 %tmp2, ptr addrspace(1) %out - ret void -} - -; OPT-LABEL: @alloca_16xi64_max512( +; OPT-LABEL: @alloca_16xi64_v128( ; OPT-NOT: alloca ; OPT: <16 x i64> ; LIMIT32: alloca ; LIMIT32-NOT: <16 x i64> -define amdgpu_kernel void @alloca_16xi64_max512(ptr addrspace(1) %out, i32 %index) #1 { +define amdgpu_kernel void @alloca_16xi64_v128(ptr addrspace(1) %out, i32 %index) #0 { entry: %tmp = alloca [16 x i64], addrspace(5) store i64 0, ptr addrspace(5) %tmp @@ -48,12 +21,12 @@ ret void } -; OPT-LABEL: @alloca_17xi64_max512( +; OPT-LABEL: @alloca_17xi64_v128( ; OPT: alloca [17 x i64] ; OPT-NOT: <17 x i64> ; LIMIT32: alloca ; LIMIT32-NOT: <17 x i64> -define amdgpu_kernel void @alloca_17xi64_max512(ptr addrspace(1) %out, i32 %index) #1 { +define amdgpu_kernel void @alloca_17xi64_v128(ptr addrspace(1) %out, i32 %index) #0 { entry: %tmp = alloca [17 x i64], addrspace(5) store i64 0, ptr addrspace(5) %tmp @@ -63,27 +36,27 @@ ret void } -; OPT-LABEL: @alloca_9xi128_max512( -; OPT: alloca [9 x i128] -; OPT-NOT: <9 x i128> +; OPT-LABEL: @alloca_8xi128_v128( +; OPT-NOT: alloca +; OPT: <8 x i128> ; LIMIT32: alloca -; LIMIT32-NOT: <9 x i128> -define amdgpu_kernel void @alloca_9xi128_max512(ptr addrspace(1) %out, i32 %index) #1 { +; LIMIT32-NOT: <8 x i128> +define amdgpu_kernel void @alloca_8xi128_v128(ptr addrspace(1) %out, i32 %index) #0 { entry: - %tmp = alloca [9 x i128], addrspace(5) + %tmp = alloca [8 x i128], addrspace(5) store i128 0, ptr addrspace(5) %tmp - %tmp1 = getelementptr [9 x i128], ptr addrspace(5) %tmp, i32 0, i32 %index + %tmp1 = getelementptr [8 x i128], ptr addrspace(5) %tmp, i32 0, i32 %index %tmp2 = load i128, ptr addrspace(5) %tmp1 store i128 %tmp2, ptr addrspace(1) %out ret void } -; OPT-LABEL: @alloca_9xi128_max256( +; OPT-LABEL: @alloca_9xi128_v128( ; OPT-NOT: alloca ; OPT: <9 x i128> ; LIMIT32: alloca ; LIMIT32-NOT: <9 x i128> -define amdgpu_kernel void @alloca_9xi128_max256(ptr addrspace(1) %out, i32 %index) #2 { +define amdgpu_kernel void @alloca_9xi128_v128(ptr addrspace(1) %out, i32 %index) #0 { entry: %tmp = alloca [9 x i128], addrspace(5) store i128 0, ptr addrspace(5) %tmp @@ -93,66 +66,66 @@ ret void } -; OPT-LABEL: @alloca_16xi128_max256( +; OPT-LABEL: @alloca_16xi64_v64( ; OPT-NOT: alloca -; OPT: <16 x i128> +; OPT: <16 x i64> ; LIMIT32: alloca -; LIMIT32-NOT: <16 x i128> -define amdgpu_kernel void @alloca_16xi128_max256(ptr addrspace(1) %out, i32 %index) #2 { +; LIMIT32-NOT: <16 x i64> +define amdgpu_kernel void @alloca_16xi64_v64(ptr addrspace(1) %out, i32 %index) #1 { entry: - %tmp = alloca [16 x i128], addrspace(5) - store i128 0, ptr addrspace(5) %tmp - %tmp1 = getelementptr [16 x i128], ptr addrspace(5) %tmp, i32 0, i32 %index - %tmp2 = load i128, ptr addrspace(5) %tmp1 - store i128 %tmp2, ptr addrspace(1) %out + %tmp = alloca [16 x i64], addrspace(5) + store i64 0, ptr addrspace(5) %tmp + %tmp1 = getelementptr [16 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index + %tmp2 = load i64, ptr addrspace(5) %tmp1 + store i64 %tmp2, ptr addrspace(1) %out ret void } -; OPT-LABEL: @alloca_9xi256_max256( -; OPT: alloca [9 x i256] -; OPT-NOT: <9 x i256> +; OPT-LABEL: @alloca_17xi64_v64( +; OPT: alloca [17 x i64] +; OPT-NOT: <17 x i64> ; LIMIT32: alloca -; LIMIT32-NOT: <9 x i256> -define amdgpu_kernel void @alloca_9xi256_max256(ptr addrspace(1) %out, i32 %index) #2 { +; LIMIT32-NOT: <17 x i64> +define amdgpu_kernel void @alloca_17xi64_v64(ptr addrspace(1) %out, i32 %index) #1 { entry: - %tmp = alloca [9 x i256], addrspace(5) - store i256 0, ptr addrspace(5) %tmp - %tmp1 = getelementptr [9 x i256], ptr addrspace(5) %tmp, i32 0, i32 %index - %tmp2 = load i256, ptr addrspace(5) %tmp1 - store i256 %tmp2, ptr addrspace(1) %out + %tmp = alloca [17 x i64], addrspace(5) + store i64 0, ptr addrspace(5) %tmp + %tmp1 = getelementptr [17 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index + %tmp2 = load i64, ptr addrspace(5) %tmp1 + store i64 %tmp2, ptr addrspace(1) %out ret void } -; OPT-LABEL: @alloca_9xi64_max256( +; OPT-LABEL: @alloca_4xi64_v32( ; OPT-NOT: alloca -; OPT: <9 x i64> +; OPT: <4 x i64> ; LIMIT32: alloca -; LIMIT32-NOT: <9 x i64> -define amdgpu_kernel void @alloca_9xi64_max256(ptr addrspace(1) %out, i32 %index) #2 { +; LIMIT32-NOT: <4 x i64> +define amdgpu_kernel void @alloca_4xi64_v32(ptr addrspace(1) %out, i32 %index) #2 { entry: - %tmp = alloca [9 x i64], addrspace(5) + %tmp = alloca [4 x i64], addrspace(5) store i64 0, ptr addrspace(5) %tmp - %tmp1 = getelementptr [9 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index + %tmp1 = getelementptr [4 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index %tmp2 = load i64, ptr addrspace(5) %tmp1 store i64 %tmp2, ptr addrspace(1) %out ret void } -; OPT-LABEL: @func_alloca_9xi64_max256( -; OPT: alloca -; OPT-NOT: <9 x i64> +; OPT-LABEL: @alloca_5xi64_v32( +; OPT: alloca [5 x i64] +; OPT-NOT: <5 x i64> ; LIMIT32: alloca -; LIMIT32-NOT: <9 x i64> -define void @func_alloca_9xi64_max256(ptr addrspace(1) %out, i32 %index) #2 { +; LIMIT32-NOT: <5 x i64> +define amdgpu_kernel void @alloca_5xi64_v32(ptr addrspace(1) %out, i32 %index) #2 { entry: - %tmp = alloca [9 x i64], addrspace(5) + %tmp = alloca [5 x i64], addrspace(5) store i64 0, ptr addrspace(5) %tmp - %tmp1 = getelementptr [9 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index + %tmp1 = getelementptr [5 x i64], ptr addrspace(5) %tmp, i32 0, i32 %index %tmp2 = load i64, ptr addrspace(5) %tmp1 store i64 %tmp2, ptr addrspace(1) %out ret void } -attributes #0 = { "amdgpu-flat-work-group-size"="1,1024" } -attributes #1 = { "amdgpu-flat-work-group-size"="1,512" } -attributes #2 = { "amdgpu-flat-work-group-size"="1,256" } +attributes #0 = { "amdgpu-flat-work-group-size"="1,512" } +attributes #1 = { "amdgpu-flat-work-group-size"="1,1024" } +attributes #2 = { "amdgpu-flat-work-group-size"="1,1024" "amdgpu-waves-per-eu"="8,8" }