Index: lib/Transforms/Vectorize/LoadStoreVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -67,6 +67,22 @@ unsigned getAddressSpaceOperand(Value *I); + unsigned getAlignment(LoadInst *LI) const { + unsigned Align = LI->getAlignment(); + if (Align != 0) + return Align; + + return DL.getABITypeAlignment(LI->getType()); + } + + unsigned getAlignment(StoreInst *SI) const { + unsigned Align = SI->getAlignment(); + if (Align != 0) + return Align; + + return DL.getABITypeAlignment(SI->getValueOperand()->getType()); + } + /// Checks if it is a consecutive access. bool isConsecutiveAccess(Value *A, Value *B); @@ -655,7 +671,7 @@ }); // Check alignment restrictions. - unsigned Alignment = S0->getAlignment(); + unsigned Alignment = getAlignment(S0); // If the store is going to be misaligned, don't vectorize it. // TODO: Check TLI.allowsMisalignedMemoryAccess @@ -768,7 +784,7 @@ } // Check alignment restrictions. - unsigned Alignment = L0->getAlignment(); + unsigned Alignment = getAlignment(L0); // If the load is going to be misaligned, don't vectorize it. // TODO: Check TLI.allowsMisalignedMemoryAccess Index: test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll =================================================================== --- test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll +++ test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll @@ -2,6 +2,8 @@ ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-8 -load-store-vectorizer -S -o - %s | FileCheck -check-prefix=ELT8 -check-prefix=ALL %s ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mattr=+max-private-element-size-16 -load-store-vectorizer -S -o - %s | FileCheck -check-prefix=ELT16 -check-prefix=ALL %s +target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32 ; ELT4: store i32 ; ELT4: store i32 @@ -31,7 +33,7 @@ %out.gep.2 = getelementptr i8, i8* %out, i32 2 %out.gep.3 = getelementptr i8, i8* %out, i32 3 - store i8 9, i8* %out + store i8 9, i8* %out, align 4 store i8 1, i8* %out.gep.1 store i8 23, i8* %out.gep.2 store i8 19, i8* %out.gep.3 @@ -43,7 +45,7 @@ define void @merge_private_store_4_vector_elts_loads_v2i16(i16* %out) #0 { %out.gep.1 = getelementptr i16, i16* %out, i32 1 - store i16 9, i16* %out + store i16 9, i16* %out, align 4 store i16 12, i16* %out.gep.1 ret void } Index: test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll =================================================================== --- test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll +++ test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll @@ -1,6 +1,8 @@ ; RUN: opt -mtriple=amdgcn-amd-amdhsa -load-store-vectorizer -S -o - %s | FileCheck %s ; Copy of test/CodeGen/AMDGPU/merge-stores.ll with some additions +target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + ; TODO: Vector element tests ; TODO: Non-zero base offset for load and store combinations ; TODO: Same base addrspacecasted @@ -17,7 +19,8 @@ } ; CHECK-LABEL: @merge_global_store_2_constants_i8_natural_align -; CHECK: store <2 x i8> , <2 x i8> addrspace(1)* %{{[0-9]+$}} +; CHECK: store i8 +; CHECK: store i8 define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 @@ -47,7 +50,8 @@ } ; CHECK-LABEL: @merge_global_store_2_constants_i16_natural_align -; CHECK: store <2 x i16> , <2 x i16> addrspace(1)* %{{[0-9]+$}} +; CHECK: store i16 +; CHECK: store i16 define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 @@ -57,7 +61,8 @@ } ; CHECK-LABEL: @merge_global_store_2_constants_half_natural_align -; CHECK: store <2 x half> , <2 x half> addrspace(1)* %{{[0-9]+$}} +; CHECK: store half +; CHECK: store half define void @merge_global_store_2_constants_half_natural_align(half addrspace(1)* %out) #0 { %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 @@ -67,7 +72,7 @@ } ; CHECK-LABEL: @merge_global_store_2_constants_i32 -; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4 define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 @@ -77,7 +82,7 @@ } ; CHECK-LABEL: @merge_global_store_2_constants_i32_f32 -; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4 define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 %out.gep.1.bc = bitcast i32 addrspace(1)* %out.gep.1 to float addrspace(1)* @@ -97,7 +102,7 @@ } ; CHECK-LABEL: @merge_global_store_4_constants_i32 -; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4 define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 @@ -126,7 +131,7 @@ ; First store is out of order. ; CHECK-LABEL: @merge_global_store_4_constants_f32 -; CHECK: store <4 x float> , <4 x float> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <4 x float> , <4 x float> addrspace(1)* %{{[0-9]+}}, align 4 define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 { %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2 @@ -140,7 +145,7 @@ } ; CHECK-LABEL: @merge_global_store_4_constants_mixed_i32_f32 -; CHECK: store <4 x float> , <4 x float> addrspace(1)* %{{[0-9]+}} +; CHECK: store <4 x float> , <4 x float> addrspace(1)* %{{[0-9]+}}, align 4 define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 { %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2 @@ -157,7 +162,7 @@ } ; CHECK-LABEL: @merge_global_store_3_constants_i32 -; CHECK: store <3 x i32> , <3 x i32> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <3 x i32> , <3 x i32> addrspace(1)* %{{[0-9]+}}, align 4 define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 @@ -169,7 +174,7 @@ } ; CHECK-LABEL: @merge_global_store_2_constants_i64 -; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+}}, align 8 define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1 @@ -179,8 +184,8 @@ } ; CHECK-LABEL: @merge_global_store_4_constants_i64 -; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+$}} -; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+}}, align 8 +; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+}}, align 8 define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 { %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1 %out.gep.2 = getelementptr i64, i64 addrspace(1)* %out, i64 2 @@ -427,8 +432,14 @@ } ; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i8_natural_align -; CHECK: load <4 x i8> -; CHECK: store <4 x i8> +; CHECK: load i8 +; CHECK: load i8 +; CHECK: load i8 +; CHECK: load i8 +; CHECK: store i8 +; CHECK: store i8 +; CHECK: store i8 +; CHECK: store i8 define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 { %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1 %out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2 @@ -481,7 +492,7 @@ } ; CHECK-LABEL: @merge_local_store_2_constants_i32 -; CHECK: store <2 x i32> , <2 x i32> addrspace(3)* %{{[0-9]+$}} +; CHECK: store <2 x i32> , <2 x i32> addrspace(3)* %{{[0-9]+}}, align 4 define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 { %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1 Index: test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll =================================================================== --- /dev/null +++ test/Transforms/LoadStoreVectorizer/AMDGPU/missing-alignment.ll @@ -0,0 +1,30 @@ +; RUN: opt -mtriple=amdgcn-- -load-store-vectorizer -S -o - %s | FileCheck %s + +@lds = internal addrspace(3) global [512 x float] undef, align 4 + +; The original load has an implicit alignment of 4, and should not +; increase to an align 8 load. + +; CHECK-LABEL: @load_keep_base_alignment_missing_align( +; CHECK: load <2 x float>, <2 x float> addrspace(3)* %{{[0-9]+}}, align 4 +define void @load_keep_base_alignment_missing_align(float addrspace(1)* %out) { + %ptr0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 11 + %val0 = load float, float addrspace(3)* %ptr0 + + %ptr1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 12 + %val1 = load float, float addrspace(3)* %ptr1 + %add = fadd float %val0, %val1 + store float %add, float addrspace(1)* %out + ret void +} + + +; CHECK-LABEL: @store_keep_base_alignment_missing_align( +; CHECK: store <2 x float> zeroinitializer, <2 x float> addrspace(3)* %{{[0-9]+}}, align 4 +define void @store_keep_base_alignment_missing_align() { + %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 1 + %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 2 + store float 0.0, float addrspace(3)* %arrayidx0 + store float 0.0, float addrspace(3)* %arrayidx1 + ret void +}