Index: lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -483,7 +483,9 @@ } void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) { - if (!I.isStaticAlloca()) + // Array allocations are probably not worth handling, since an allocation of + // the array type is the canonical form. + if (!I.isStaticAlloca() || I.isArrayAllocation()) return; IRBuilder<> Builder(&I); Index: test/CodeGen/AMDGPU/array-ptr-calc-i32.ll =================================================================== --- test/CodeGen/AMDGPU/array-ptr-calc-i32.ll +++ test/CodeGen/AMDGPU/array-ptr-calc-i32.ll @@ -17,19 +17,19 @@ ; with the appropriate offset. We should fold this into the store. ; SI-ALLOCA: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 0, v{{[0-9]+}} -; SI-ALLOCA: buffer_store_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:16 +; SI-ALLOCA: buffer_store_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:64 ; SI-ALLOCA: s_barrier -; SI-ALLOCA: buffer_load_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:16 +; SI-ALLOCA: buffer_load_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:64 ; ; FIXME: The AMDGPUPromoteAlloca pass should be able to convert this ; alloca to a vector. It currently fails because it does not know how ; to interpret: -; getelementptr inbounds [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b +; getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 1, i32 %b -; SI-PROMOTE: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 16 +; SI-PROMOTE: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 64 ; SI-PROMOTE: ds_write_b32 [[PTRREG]] define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) #0 { - %alloca = alloca [4 x i32], i32 4, align 16 + %alloca = alloca [16 x i32], align 16 %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0); %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %mbcnt.lo) %a_ptr = getelementptr inbounds i32, i32 addrspace(1)* %inA, i32 %tid @@ -37,7 +37,7 @@ %a = load i32, i32 addrspace(1)* %a_ptr %b = load i32, i32 addrspace(1)* %b_ptr %result = add i32 %a, %b - %alloca_ptr = getelementptr inbounds [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b + %alloca_ptr = getelementptr inbounds [16 x i32], [16 x i32]* %alloca, i32 1, i32 %b store i32 %result, i32* %alloca_ptr, align 4 ; Dummy call call void @llvm.amdgcn.s.barrier() Index: test/CodeGen/AMDGPU/indirect-private-64.ll =================================================================== --- test/CodeGen/AMDGPU/indirect-private-64.ll +++ test/CodeGen/AMDGPU/indirect-private-64.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=SI -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s +; RUN: llc -march=amdgcn -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s +; RUN: llc -march=amdgcn -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s @@ -14,8 +14,8 @@ ; SI-PROMOTE: ds_read_b64 define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) #1 { %val = load double, double addrspace(1)* %in, align 8 - %array = alloca double, i32 16, align 8 - %ptr = getelementptr inbounds double, double* %array, i32 %b + %array = alloca [16 x double], align 8 + %ptr = getelementptr inbounds [16 x double], [16 x double]* %array, i32 0, i32 %b store double %val, double* %ptr, align 8 call void @llvm.amdgcn.s.barrier() %result = load double, double* %ptr, align 8 @@ -34,8 +34,8 @@ ; SI-PROMOTE: ds_read_b64 define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) #1 { %val = load <2 x double>, <2 x double> addrspace(1)* %in, align 16 - %array = alloca <2 x double>, i32 16, align 16 - %ptr = getelementptr inbounds <2 x double>, <2 x double>* %array, i32 %b + %array = alloca [8 x <2 x double>], align 16 + %ptr = getelementptr inbounds [8 x <2 x double>], [8 x <2 x double>]* %array, i32 0, i32 %b store <2 x double> %val, <2 x double>* %ptr, align 16 call void @llvm.amdgcn.s.barrier() %result = load <2 x double>, <2 x double>* %ptr, align 16 @@ -52,8 +52,8 @@ ; SI-PROMOTE: ds_read_b64 define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) #1 { %val = load i64, i64 addrspace(1)* %in, align 8 - %array = alloca i64, i32 16, align 8 - %ptr = getelementptr inbounds i64, i64* %array, i32 %b + %array = alloca [8 x i64], align 8 + %ptr = getelementptr inbounds [8 x i64], [8 x i64]* %array, i32 0, i32 %b store i64 %val, i64* %ptr, align 8 call void @llvm.amdgcn.s.barrier() %result = load i64, i64* %ptr, align 8 @@ -72,8 +72,8 @@ ; SI-PROMOTE: ds_read_b64 define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) #1 { %val = load <2 x i64>, <2 x i64> addrspace(1)* %in, align 16 - %array = alloca <2 x i64>, i32 16, align 16 - %ptr = getelementptr inbounds <2 x i64>, <2 x i64>* %array, i32 %b + %array = alloca [8 x <2 x i64>], align 16 + %ptr = getelementptr inbounds [8 x <2 x i64>], [8 x <2 x i64>]* %array, i32 0, i32 %b store <2 x i64> %val, <2 x i64>* %ptr, align 16 call void @llvm.amdgcn.s.barrier() %result = load <2 x i64>, <2 x i64>* %ptr, align 16 Index: test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/promote-alloca-array-allocation.ll @@ -0,0 +1,50 @@ +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-promote-alloca < %s | FileCheck %s + +; Make sure this allocates the correct size if the alloca has a non-0 +; number of elements. + +; CHECK-LABEL: @array_alloca( +; CHECK: %stack = alloca i32, i32 5, align 4 +define void @array_alloca(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 { +entry: + %stack = alloca i32, i32 5, align 4 + %ld0 = load i32, i32 addrspace(1)* %in, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0 + store i32 4, i32* %arrayidx1, align 4 + %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 + %ld1 = load i32, i32 addrspace(1)* %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds i32, i32* %stack, i32 %ld1 + store i32 5, i32* %arrayidx3, align 4 + %arrayidx10 = getelementptr inbounds i32, i32* %stack, i32 0 + %ld2 = load i32, i32* %arrayidx10, align 4 + store i32 %ld2, i32 addrspace(1)* %out, align 4 + %arrayidx12 = getelementptr inbounds i32, i32* %stack, i32 1 + %ld3 = load i32, i32* %arrayidx12 + %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1 + store i32 %ld3, i32 addrspace(1)* %arrayidx13 + ret void +} + +; CHECK-LABEL: @array_alloca_dynamic( +; CHECK: %stack = alloca i32, i32 %size, align 4 +define void @array_alloca_dynamic(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in, i32 %size) #0 { +entry: + %stack = alloca i32, i32 %size, align 4 + %ld0 = load i32, i32 addrspace(1)* %in, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0 + store i32 4, i32* %arrayidx1, align 4 + %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1 + %ld1 = load i32, i32 addrspace(1)* %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds i32, i32* %stack, i32 %ld1 + store i32 5, i32* %arrayidx3, align 4 + %arrayidx10 = getelementptr inbounds i32, i32* %stack, i32 0 + %ld2 = load i32, i32* %arrayidx10, align 4 + store i32 %ld2, i32 addrspace(1)* %out, align 4 + %arrayidx12 = getelementptr inbounds i32, i32* %stack, i32 1 + %ld3 = load i32, i32* %arrayidx12 + %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1 + store i32 %ld3, i32 addrspace(1)* %arrayidx13 + ret void +} + +attributes #0 = { nounwind }