Index: lib/Transforms/Vectorize/LoadStoreVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -115,6 +115,11 @@ /// Collects load and store instructions to vectorize. std::pair collectInstructions(BasicBlock *BB); + /// See if pointer \p V is based on an an alloca that can have its alignment + /// increased. \returns the alloca which can be modified, otherwise null. + AllocaInst *canAdjustAllocaAlignment(Value *V, unsigned EltSize, + unsigned Align); + /// Processes the collected instructions, the \p Map. The values of \p Map /// should be all loads or all stores. bool vectorizeChains(InstrListMap &Map); @@ -694,6 +699,31 @@ return Changed; } +AllocaInst *Vectorizer::canAdjustAllocaAlignment(Value *BasePtr, + unsigned EltSize, + unsigned Align) { + // If we're storing to an object on the stack, we control its alignment, so we + // can cheat and change it! + if (EltSize % StackAdjustedAlignment == 0) { + // If the element size is a multiple of the stack element alignment, we can + // always change the alignment. + + Value *V = GetUnderlyingObject(BasePtr, DL); + return dyn_cast(V); + } + + // If the element size is smaller than the natural stack alignment, we need to + // make sure an odd dynamic offset isn't being added. + int64_t Offset = 0; + Value *V = GetPointerBaseWithConstantOffset(BasePtr, Offset, DL); + if (AllocaInst *AI = dyn_cast(V)) { + if (Offset % StackAdjustedAlignment == 0) + return AI; + } + + return nullptr; +} + bool Vectorizer::vectorizeStoreChain( ArrayRef Chain, SmallPtrSet *InstructionsProcessed) { @@ -742,7 +772,8 @@ // Store size should be 1B, 2B or multiple of 4B. // TODO: Target hook for size constraint? - unsigned SzInBytes = (Sz / 8) * ChainSize; + unsigned EltSzInBytes = Sz / 8; + unsigned SzInBytes = EltSzInBytes * ChainSize; if (SzInBytes > 2 && SzInBytes % 4 != 0) { DEBUG(dbgs() << "LSV: Size should be 1B, 2B " "or multiple of 4B. Splitting.\n"); @@ -790,15 +821,13 @@ if (S0->getPointerAddressSpace() != 0) return false; - // If we're storing to an object on the stack, we control its alignment, - // so we can cheat and change it! - Value *V = GetUnderlyingObject(S0->getPointerOperand(), DL); - if (AllocaInst *AI = dyn_cast_or_null(V)) { - AI->setAlignment(StackAdjustedAlignment); + if (AllocaInst *AI = canAdjustAllocaAlignment(S0->getPointerOperand(), + EltSzInBytes, Alignment)) { Alignment = StackAdjustedAlignment; - } else { + if (AI->getAlignment() < Alignment) + AI->setAlignment(Alignment); + } else return false; - } } BasicBlock::iterator First, Last; @@ -899,7 +928,8 @@ // Load size should be 1B, 2B or multiple of 4B. // TODO: Should size constraint be a target hook? - unsigned SzInBytes = (Sz / 8) * ChainSize; + unsigned EltSzInBytes = Sz / 8; + unsigned SzInBytes = EltSzInBytes * ChainSize; if (SzInBytes > 2 && SzInBytes % 4 != 0) { DEBUG(dbgs() << "LSV: Size should be 1B, 2B " "or multiple of 4B. Splitting.\n"); @@ -940,15 +970,13 @@ if (L0->getPointerAddressSpace() != 0) return false; - // If we're loading from an object on the stack, we control its alignment, - // so we can cheat and change it! - Value *V = GetUnderlyingObject(L0->getPointerOperand(), DL); - if (AllocaInst *AI = dyn_cast_or_null(V)) { - AI->setAlignment(StackAdjustedAlignment); + if (AllocaInst *AI = canAdjustAllocaAlignment(L0->getPointerOperand(), + EltSzInBytes, Alignment)) { Alignment = StackAdjustedAlignment; - } else { + if (AI->getAlignment() < Alignment) + AI->setAlignment(Alignment); + } else return false; - } } DEBUG({ @@ -1025,8 +1053,9 @@ bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, unsigned Alignment) { - if (Alignment % SzInBytes == 0) + if (Alignment >= SzInBytes) return false; + bool Fast = false; bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(), SzInBytes * 8, AddressSpace, Index: test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll =================================================================== --- /dev/null +++ test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll @@ -0,0 +1,124 @@ +; RUN: opt -S -load-store-vectorizer -mattr=-unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=ALIGNED -check-prefix=ALL %s +; RUN: opt -S -load-store-vectorizer -mattr=+unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=UNALIGNED -check-prefix=ALL %s + +target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" +target triple = "amdgcn--" + +; ALL-LABEL: @load_unknown_offset_align1_i8( +; ALL: alloca [128 x i8], align 1 +; UNALIGNED: load <2 x i8>, <2 x i8>* %{{[0-9]+}}, align 1{{$}} + +; ALIGNED: load i8, i8* %ptr0, align 1{{$}} +; ALIGNED: load i8, i8* %ptr1, align 1{{$}} +define void @load_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 { + %alloca = alloca [128 x i8], align 1 + %ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset + %val0 = load i8, i8* %ptr0, align 1 + %ptr1 = getelementptr inbounds i8, i8* %ptr0, i32 1 + %val1 = load i8, i8* %ptr1, align 1 + %add = add i8 %val0, %val1 + store i8 %add, i8 addrspace(1)* %out + ret void +} + +; ALL-LABEL: @load_unknown_offset_align1_i16( +; ALL: alloca [128 x i16], align 1{{$}} +; UNALIGNED: load <2 x i16>, <2 x i16>* %{{[0-9]+}}, align 1{{$}} + +; ALIGNED: load i16, i16* %ptr0, align 1{{$}} +; ALIGNED: load i16, i16* %ptr1, align 1{{$}} +define void @load_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 { + %alloca = alloca [128 x i16], align 1 + %ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset + %val0 = load i16, i16* %ptr0, align 1 + %ptr1 = getelementptr inbounds i16, i16* %ptr0, i32 1 + %val1 = load i16, i16* %ptr1, align 1 + %add = add i16 %val0, %val1 + store i16 %add, i16 addrspace(1)* %out + ret void +} + +; Although the offset is unknown here, we know it is a multiple of the element size. +; ALL-LABEL: @load_unknown_offset_align1_i32( +; UNALIGNED: alloca [128 x i32], align 1 +; UNALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 1{{$}} + +; ALIGNED: alloca [128 x i32], align 4 +; ALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 4{{$}} +define void @load_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 { + %alloca = alloca [128 x i32], align 1 + %ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset + %val0 = load i32, i32* %ptr0, align 1 + %ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1 + %val1 = load i32, i32* %ptr1, align 1 + %add = add i32 %val0, %val1 + store i32 %add, i32 addrspace(1)* %out + ret void +} + +; FIXME: Should always increase alignment of the load +; Make sure alloca alignment isn't decreased +; ALL-LABEL: @load_alloca16_unknown_offset_align1_i32( +; ALL: alloca [128 x i32], align 16 + +; UNALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 1{{$}} +; ALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 4{{$}} +define void @load_alloca16_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 { + %alloca = alloca [128 x i32], align 16 + %ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset + %val0 = load i32, i32* %ptr0, align 1 + %ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1 + %val1 = load i32, i32* %ptr1, align 1 + %add = add i32 %val0, %val1 + store i32 %add, i32 addrspace(1)* %out + ret void +} + +; ALL-LABEL: @store_unknown_offset_align1_i8( +; ALL: alloca [128 x i8], align 1 +; UNALIGNED: store <2 x i8> , <2 x i8>* %{{[0-9]+}}, align 1{{$}} + +; ALIGNED: store i8 9, i8* %ptr0, align 1{{$}} +; ALIGNED: store i8 10, i8* %ptr1, align 1{{$}} +define void @store_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 { + %alloca = alloca [128 x i8], align 1 + %ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset + store i8 9, i8* %ptr0, align 1 + %ptr1 = getelementptr inbounds i8, i8* %ptr0, i32 1 + store i8 10, i8* %ptr1, align 1 + ret void +} + +; ALL-LABEL: @store_unknown_offset_align1_i16( +; ALL: alloca [128 x i16], align 1 +; UNALIGNED: store <2 x i16> , <2 x i16>* %{{[0-9]+}}, align 1{{$}} + +; ALIGNED: store i16 9, i16* %ptr0, align 1{{$}} +; ALIGNED: store i16 10, i16* %ptr1, align 1{{$}} +define void @store_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 { + %alloca = alloca [128 x i16], align 1 + %ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset + store i16 9, i16* %ptr0, align 1 + %ptr1 = getelementptr inbounds i16, i16* %ptr0, i32 1 + store i16 10, i16* %ptr1, align 1 + ret void +} + +; Although the offset is unknown here, we know it is a multiple of the element size. +; ALL-LABEL: @store_unknown_offset_align1_i32( +; UNALIGNED: alloca [128 x i32], align 1 +; UNALIGNED: store <2 x i32> , <2 x i32>* %{{[0-9]+}}, align 1{{$}} + +; ALIGNED: alloca [128 x i32], align 4 +; ALIGNED: store <2 x i32> , <2 x i32>* %{{[0-9]+}}, align 4{{$}} +define void @store_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 { + %alloca = alloca [128 x i32], align 1 + %ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset + store i32 9, i32* %ptr0, align 1 + %ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1 + store i32 10, i32* %ptr1, align 1 + ret void +} + +attributes #0 = { nounwind } +