Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -606,6 +606,10 @@ MaxStoresPerMemmoveOptSize = MaxStoresPerMemmove = 4; + MaxLoadsPerMemcmpOptSize = 4; + MaxLoadsPerMemcmp = Subtarget->requiresStrictAlign() + ? MaxLoadsPerMemcmpOptSize : 8; + setStackPointerRegisterToSaveRestore(AArch64::SP); setSchedulingPreference(Sched::Hybrid); Index: llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h =================================================================== --- llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -130,6 +130,9 @@ int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I = nullptr); + TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, + bool IsZeroCmp) const; + int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace, const Instruction *I = nullptr); Index: llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -618,6 +618,19 @@ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); } +AArch64TTIImpl::TTI::MemCmpExpansionOptions +AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { + TTI::MemCmpExpansionOptions Options; + Options.AllowOverlappingLoads = !ST->requiresStrictAlign(); + Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); + Options.NumLoadsPerBlock = Options.MaxNumLoads; + // TODO: Though vector loads usually perform well on AArch64, in some targets + // they may wake up the FP unit, which raises the power consumption. Perhaps + // they could be used with no holds barred (-O3). + Options.LoadSizes = {8, 4, 2, 1}; + return Options; +} + int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, unsigned Alignment, unsigned AddressSpace, const Instruction *I) { Index: llvm/test/CodeGen/AArch64/bcmp-inline-small.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/bcmp-inline-small.ll @@ -0,0 +1,44 @@ +; RUN: llc -O2 < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECKN +; RUN: llc -O2 < %s -mtriple=aarch64-linux-gnu -mattr=strict-align | FileCheck %s --check-prefixes=CHECK,CHECKS + +declare i32 @bcmp(i8*, i8*, i64) nounwind readonly +declare i32 @memcmp(i8*, i8*, i64) nounwind readonly + +define i1 @bcmp_b2(i8* %s1, i8* %s2) { +entry: + %bcmp = call i32 @bcmp(i8* %s1, i8* %s2, i64 15) + %ret = icmp eq i32 %bcmp, 0 + ret i1 %ret + +; CHECK-LABEL: bcmp_b2: +; CHECK-NOT: bl bcmp +; CHECKN: ldr x +; CHECKN-NEXT: ldr x +; CHECKN-NEXT: ldur x +; CHECKN-NEXT: ldur x +; CHECKS: ldr x +; CHECKS-NEXT: ldr x +; CHECKS-NEXT: ldr w +; CHECKS-NEXT: ldr w +; CHECKS-NEXT: ldrh w +; CHECKS-NEXT: ldrh w +; CHECKS-NEXT: ldrb w +; CHECKS-NEXT: ldrb w +} + +define i1 @bcmp_bs(i8* %s1, i8* %s2) optsize { +entry: + %memcmp = call i32 @memcmp(i8* %s1, i8* %s2, i64 31) + %ret = icmp eq i32 %memcmp, 0 + ret i1 %ret + +; CHECK-LABEL: bcmp_bs: +; CHECKN-NOT: bl memcmp +; CHECKN: ldp x +; CHECKN-NEXT: ldp x +; CHECKN-NEXT: ldr x +; CHECKN-NEXT: ldr x +; CHECKN-NEXT: ldur x +; CHECKN-NEXT: ldur x +; CHECKS: bl memcmp +}