Index: llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -467,7 +467,8 @@ unsigned Alignment, unsigned AddressSpace) { std::pair LT = TLI->getTypeLegalizationCost(DL, Src); - if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 && + if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && + Src->isVectorTy() && Alignment != 16 && Src->getVectorElementType()->isIntegerTy(64)) { // Unaligned stores are extremely inefficient. We don't split // unaligned v2i64 stores because the negative impact that has shown in Index: llvm/trunk/test/Analysis/CostModel/AArch64/store.ll =================================================================== --- llvm/trunk/test/Analysis/CostModel/AArch64/store.ll +++ llvm/trunk/test/Analysis/CostModel/AArch64/store.ll @@ -1,10 +1,16 @@ -; RUN: opt < %s -cost-model -analyze -mtriple=arm64-apple-ios -mcpu=cyclone | FileCheck %s +; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios | FileCheck %s +; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE + target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" -; CHECK-LABEL: store -define void @store() { - ; Stores of <2 x i64> should be expensive because we don't split them and - ; and unaligned 16b stores have bad performance. - ; CHECK: cost of 12 {{.*}} store +; CHECK-LABEL: getMemoryOpCost +; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost +define void @getMemoryOpCost() { + ; If FeatureSlowMisaligned128Store is set, we penalize <2 x i64> stores. On + ; Cyclone, for example, such stores should be expensive because we don't + ; split them and misaligned 16b stores have bad performance. + ; + ; CHECK: cost of 1 {{.*}} store + ; SLOW_MISALIGNED_128_STORE: cost of 12 {{.*}} store store <2 x i64> undef, <2 x i64> * undef ; We scalarize the loads/stores because there is no vector register name for