Index: include/llvm/Target/TargetLowering.h =================================================================== --- include/llvm/Target/TargetLowering.h +++ include/llvm/Target/TargetLowering.h @@ -976,6 +976,15 @@ return false; } + /// Return true if the pointer arguments to CI should be aligned by aligning + /// the object whose address is being passed. If so then MinSize is set to the + /// minimum size the object must be to be aligned and PrefAlign is set to the + /// preferred alignment. + virtual bool shouldAlignPointerArgs(CallInst */*CI*/, unsigned &/*MinSize*/, + unsigned &/*PrefAlign*/) const { + return false; + } + //===--------------------------------------------------------------------===// /// \name Helpers for TargetTransformInfo implementations /// @{ Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -1228,6 +1228,42 @@ return true; } + const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; + + // Align the pointer arguments to this call if the target thinks it's a good + // idea + unsigned MinSize, PrefAlign; + if (TLI && TD && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { + for (auto &Arg : CI->arg_operands()) { + // We want to align both objects whose address is used directly and + // objects whose address is used in casts and GEPs, though it only makes + // sense for GEPs if the offset is a multiple of the desired alignment and + // if size - offset meets the size threshold. + if (!Arg->getType()->isPointerTy()) + continue; + APInt Offset(TD->getPointerSizeInBits( + cast(Arg->getType())->getAddressSpace()), 0); + Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*TD, Offset); + uint64_t Offset2 = Offset.getLimitedValue(); + AllocaInst *AI; + if ((Offset2 & (PrefAlign-1)) == 0 && + (AI = dyn_cast(Val)) && + AI->getAlignment() < PrefAlign && + TD->getTypeAllocSize(AI->getAllocatedType()) - Offset2 >= MinSize) + AI->setAlignment(PrefAlign); + // TODO: Also align GlobalVariables + } + // If this is a memcpy (or similar) then we may be able to improve the + // alignment + if (MemIntrinsic *MI = dyn_cast(CI)) { + unsigned Align = getKnownAlignment(MI->getDest(), *TD); + if (MemTransferInst *MTI = dyn_cast(MI)) + Align = std::min(Align, getKnownAlignment(MTI->getSource(), *TD)); + if (Align > MI->getAlignment()) + MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); + } + } + IntrinsicInst *II = dyn_cast(CI); if (II) { switch (II->getIntrinsicID()) { Index: lib/Target/ARM/ARMISelLowering.h =================================================================== --- lib/Target/ARM/ARMISelLowering.h +++ lib/Target/ARM/ARMISelLowering.h @@ -362,6 +362,9 @@ return true; } + bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, + unsigned &PrefAlign) const override; + /// createFastISel - This method returns a target specific FastISel object, /// or null if the target does not support "fast" ISel. FastISel *createFastISel(FunctionLoweringInfo &funcInfo, Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -42,6 +42,7 @@ #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Type.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/Support/CommandLine.h" @@ -1163,6 +1164,18 @@ return TargetLowering::getRegClassFor(VT); } +// memcpy, and other memory intrinsics, typically tries to use LDM/STM if the +// source/dest is aligned and the copy size is large enough. We therefore want +// to align such objects passed to memory intrinsics. +bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, + unsigned &PrefAlign) const { + if (!isa(CI)) + return false; + MinSize = 8; + PrefAlign = 4; + return true; +} + // Create a fast isel object. FastISel * ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, Index: test/CodeGen/ARM/memfunc.ll =================================================================== --- test/CodeGen/ARM/memfunc.ll +++ test/CodeGen/ARM/memfunc.ll @@ -1,31 +1,104 @@ -; RUN: llc < %s -mtriple=armv7-apple-ios -o - | FileCheck %s -; RUN: llc < %s -mtriple=thumbv7m-none-macho -o - | FileCheck %s --check-prefix=DARWIN -; RUN: llc < %s -mtriple=arm-none-eabi -o - | FileCheck --check-prefix=EABI %s -; RUN: llc < %s -mtriple=arm-none-eabihf -o - | FileCheck --check-prefix=EABI %s +; RUN: llc < %s -mtriple=armv7-apple-ios -o - | FileCheck %s --check-prefix=CHECK-IOS --check-prefix=CHECK +; RUN: llc < %s -mtriple=thumbv7m-none-macho -o - | FileCheck %s --check-prefix=CHECK-DARWIN --check-prefix=CHECK +; RUN: llc < %s -mtriple=arm-none-eabi -o - | FileCheck %s --check-prefix=CHECK-EABI --check-prefix=CHECK +; RUN: llc < %s -mtriple=arm-none-eabihf -o - | FileCheck %s --check-prefix=CHECK-EABI --check-prefix=CHECK @from = common global [500 x i32] zeroinitializer, align 4 @to = common global [500 x i32] zeroinitializer, align 4 -define void @f() { +define void @f1() { entry: + ; CHECK-LABEL: f1 - ; CHECK: memmove - ; EABI: __aeabi_memmove - call void @llvm.memmove.p0i8.p0i8.i32(i8* bitcast ([500 x i32]* @from to i8*), i8* bitcast ([500 x i32]* @to to i8*), i32 500, i32 0, i1 false) - - ; CHECK: memcpy - ; EABI: __aeabi_memcpy - call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([500 x i32]* @from to i8*), i8* bitcast ([500 x i32]* @to to i8*), i32 500, i32 0, i1 false) - - ; EABI memset swaps arguments - ; CHECK: mov r1, #0 - ; CHECK: memset - ; DARWIN: movs r1, #0 - ; DARWIN: memset - ; EABI: mov r2, #0 - ; EABI: __aeabi_memset - call void @llvm.memset.p0i8.i32(i8* bitcast ([500 x i32]* @from to i8*), i8 0, i32 500, i32 0, i1 false) - unreachable + ; CHECK-IOS: memmove + ; CHECK-DARWIN: memmove + ; CHECK-EABI: __aeabi_memmove + call void @llvm.memmove.p0i8.p0i8.i32(i8* bitcast ([500 x i32]* @from to i8*), i8* bitcast ([500 x i32]* @to to i8*), i32 500, i32 0, i1 false) + + ; CHECK-IOS: memcpy + ; CHECK-DARWIN: memcpy + ; CHECK-EABI: __aeabi_memcpy + call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([500 x i32]* @from to i8*), i8* bitcast ([500 x i32]* @to to i8*), i32 500, i32 0, i1 false) + + ; EABI memset swaps arguments + ; CHECK-IOS: mov r1, #0 + ; CHECK-IOS: memset + ; CHECK-DARWIN: movs r1, #0 + ; CHECK-DARWIN: memset + ; CHECK-EABI: mov r2, #0 + ; CHECK-EABI: __aeabi_memset + call void @llvm.memset.p0i8.i32(i8* bitcast ([500 x i32]* @from to i8*), i8 0, i32 500, i32 0, i1 false) + unreachable +} + +; Check that alloca arguments to memory intrinsics are automatically aligned if at least 8 bytes in size +define void @f2(i8* %dest, i32 %n) { +entry: + ; CHECK-LABEL: f2 + + ; CHECK: {{add r1, sp, #28|sub r1, r7, #20}} + ; CHECK-IOS: memmove + ; CHECK-DARWIN: memmove + ; CHECK-EABI: __aeabi_memmove + %arr0 = alloca [9 x i8], align 1 + %0 = bitcast [9 x i8]* %arr0 to i8* + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + + ; CHECK: {{add r1, sp, #(16|12)}} + ; CHECK-IOS: memcpy + ; CHECK-DARWIN: memcpy + ; CHECK-EABI: __aeabi_memcpy + %arr1 = alloca [9 x i8], align 1 + %1 = bitcast [9 x i8]* %arr1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + + ; CHECK: {{add r0, sp, #4|mov r0, sp}} + ; CHECK-IOS: mov r1, #0 + ; CHECK-IOS: memset + ; CHECK-DARWIN: movs r1, #0 + ; CHECK-DARWIN: memset + ; CHECK-EABI: mov r2, #0 + ; CHECK-EABI: __aeabi_memset + %arr2 = alloca [9 x i8], align 1 + %2 = bitcast [9 x i8]* %arr2 to i8* + call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) + + unreachable +} + +; Check that alloca arguments are not aligned if less than 8 bytes in size +define void @f3(i8* %dest, i32 %n) { +entry: + ; CHECK-LABEL: f3 + + ; CHECK: {{add(.w)? r1, sp, #17|sub(.w)? r1, r7, #15}} + ; CHECK-IOS: memmove + ; CHECK-DARWIN: memmove + ; CHECK-EABI: __aeabi_memmove + %arr0 = alloca [7 x i8], align 1 + %0 = bitcast [7 x i8]* %arr0 to i8* + call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) + + ; CHECK: {{add(.w)? r1, sp, #10}} + ; CHECK-IOS: memcpy + ; CHECK-DARWIN: memcpy + ; CHECK-EABI: __aeabi_memcpy + %arr1 = alloca [7 x i8], align 1 + %1 = bitcast [7 x i8]* %arr1 to i8* + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) + + ; CHECK: {{add(.w)? r0, sp, #3}} + ; CHECK-IOS: mov r1, #0 + ; CHECK-IOS: memset + ; CHECK-DARWIN: movs r1, #0 + ; CHECK-DARWIN: memset + ; CHECK-EABI: mov r2, #0 + ; CHECK-EABI: __aeabi_memset + %arr2 = alloca [7 x i8], align 1 + %2 = bitcast [7 x i8]* %arr2 to i8* + call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) + + unreachable } declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind