Index: llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -27,8 +27,11 @@ #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/TargetSchedule.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/MC/MCInstrDesc.h" #include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSchedule.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -96,11 +99,19 @@ AliasAnalysis *AA; const AArch64InstrInfo *TII; const TargetRegisterInfo *TRI; + const TargetSubtargetInfo *STI; const AArch64Subtarget *Subtarget; // Track which registers have been modified and used. BitVector ModifiedRegs, UsedRegs; + // Target has a cost model. + bool HasCostModel; + TargetSchedModel TSM; + + // Function is being optimized for code size. + bool OptForSize; + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); MachineFunctionPass::getAnalysisUsage(AU); @@ -154,6 +165,10 @@ bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI, unsigned BaseReg, int Offset); + // Evaluate if the new instruction is a better choice than the old ones. + bool isProfitableMergeUpdate(unsigned NewOp, + MachineInstr &MIA, MachineInstr &MIB); + // Merge a pre- or post-index base register update into a ld/st instruction. MachineBasicBlock::iterator mergeUpdateInsn(MachineBasicBlock::iterator I, @@ -650,6 +665,45 @@ } } +bool AArch64LoadStoreOpt::isProfitableMergeUpdate(unsigned NewOp, + MachineInstr &MIA, + MachineInstr &MIB) { + // Default as profitable if optimizing for size or + // in the absence of a cost model. + if (OptForSize || !HasCostModel) + return true; + + const MCSchedClassDesc + *NewSD = TSM.getMCSchedModel()->getSchedClassDesc(TII->get(NewOp).getSchedClass()), + *SCA = TSM.resolveSchedClass(&MIA), + *SCB = TSM.resolveSchedClass(&MIB); + // Default as profitable if variant or invalid costs. + if (NewSD->isVariant() || !NewSD->isValid()) + return true; + + long NewLat = TSM.computeInstrLatency(NewOp), + ALat = TSM.computeInstrLatency(&MIA), + BLat = TSM.computeInstrLatency(&MIB); + long NewUops = NewSD->NumMicroOps, + AUops = TSM.getNumMicroOps(&MIA, SCA), + BUops = TSM.getNumMicroOps(&MIB, SCB); + // It is profitable if the new instruction is faster than both old ones. + if (NewLat < (ALat + BLat)) + return true; + // It is profitable if the new instruction is as fast as both old ones, and... + else if (NewLat == (ALat + BLat)) { + // ... the new instruction is not more complex than both old ones. + if (AUops > 1 || BUops > 1) + return NewUops <= (AUops + BUops); + // ... the new instruction is not complex. + else + return NewUops < (AUops + BUops); + } + + // It is not profitable. + return false; +} + MachineBasicBlock::iterator AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I, MachineBasicBlock::iterator MergeMI, @@ -1344,6 +1398,11 @@ unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode()) : getPostIndexedOpcode(I->getOpcode()); + + // Evaluate if the new instruction is a better choice than both old ones. + if (!isProfitableMergeUpdate(NewOpc, *I, *Update)) + return NextI; + MachineInstrBuilder MIB; if (!isPairedLdSt(*I)) { // Non-paired instruction. @@ -1767,6 +1826,16 @@ TRI = Subtarget->getRegisterInfo(); AA = &getAnalysis().getAAResults(); + OptForSize = Fn.getFunction()->optForSize(); + + const TargetSubtargetInfo &STI = Fn.getSubtarget(); + TSM.init(STI.getSchedModel(), &STI, STI.getInstrInfo()); + // TODO: For now, only support targets with a scheduling model. In order to + // support a target that has itineraries instead, then + // isProfitableMergeUpdate() has to be modified to calculate the latency + // and the number of uops. + HasCostModel = TSM.hasInstrSchedModel(); + // Resize the modified and used register bitfield trackers. We do this once // per function and then clear the bitfield each time we optimize a load or // store. Index: llvm/test/CodeGen/AArch64/ldst-opt.ll =================================================================== --- llvm/test/CodeGen/AArch64/ldst-opt.ll +++ llvm/test/CodeGen/AArch64/ldst-opt.ll @@ -1,5 +1,6 @@ -; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -o - %s | FileCheck --check-prefix=CHECK --check-prefix=NOSTRICTALIGN %s -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+strict-align -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -o - %s | FileCheck --check-prefix=CHECK --check-prefix=STRICTALIGN %s +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -o - %s | FileCheck %s --check-prefixes=CHECK,GENERIC,NOSTRICTALIGN +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -mattr=+strict-align -o - %s | FileCheck %s --check-prefixes=CHECK,GENERIC,STRICTALIGN +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -mcpu=exynos-m1 -o - %s | FileCheck %s --check-prefixes=CHECK,EXYNOS ; This file contains tests for the AArch64 load/store optimizer. @@ -7,8 +8,8 @@ %s.byte = type { i8, i8 } %s.halfword = type { i16, i16 } %s.word = type { i32, i32 } -%s.doubleword = type { i64, i32 } -%s.quadword = type { fp128, i32 } +%s.doubleword = type { i64, i64 } +%s.quadword = type { fp128, fp128 } %s.float = type { float, i32 } %s.double = type { double, i32 } %struct.byte = type { %padding, %s.byte } @@ -267,6 +268,39 @@ ret void } +define void @load-pair-pre-indexed-doubleword(%struct.doubleword* %ptr) nounwind { +; CHECK-LABEL: load-pair-pre-indexed-doubleword +; CHECK: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x0, #32]! +; CHECK-NOT: add x0, x0, #32 +entry: + %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0 + %a1 = load i64, i64* %a, align 8 + %b = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 1 + %b1 = load i64, i64* %b, align 8 + %add = add i64 %a1, %b1 + br label %bar +bar: + %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1 + tail call void @bar_doubleword(%s.doubleword* %c, i64 %add) + ret void +} + +define void @store-pair-pre-indexed-doubleword(%struct.doubleword* %ptr, i64 %val) nounwind { +; CHECK-LABEL: store-pair-pre-indexed-doubleword +; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [x0, #32]! +; CHECK-NOT: add x0, x0, #32 +entry: + %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0 + store i64 %val, i64* %a, align 8 + %b = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 1 + store i64 %val, i64* %b, align 8 + br label %bar +bar: + %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1 + tail call void @bar_doubleword(%s.doubleword* %c, i64 %val) + ret void +} + ; Check the following transform: ; ; add x8, x8, #16 @@ -1031,7 +1065,6 @@ define void @store-pair-post-indexed-word() nounwind { ; CHECK-LABEL: store-pair-post-indexed-word ; CHECK: stp w{{[0-9]+}}, w{{[0-9]+}}, [sp], #16 -; CHECK: ret %src = alloca { i32, i32 }, align 8 %dst = alloca { i32, i32 }, align 8 @@ -1050,7 +1083,6 @@ define void @store-pair-post-indexed-doubleword() nounwind { ; CHECK-LABEL: store-pair-post-indexed-doubleword ; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [sp], #32 -; CHECK: ret %src = alloca { i64, i64 }, align 8 %dst = alloca { i64, i64 }, align 8 @@ -1069,7 +1101,6 @@ define void @store-pair-post-indexed-float() nounwind { ; CHECK-LABEL: store-pair-post-indexed-float ; CHECK: stp s{{[0-9]+}}, s{{[0-9]+}}, [sp], #16 -; CHECK: ret %src = alloca { float, float }, align 8 %dst = alloca { float, float }, align 8 @@ -1088,7 +1119,6 @@ define void @store-pair-post-indexed-double() nounwind { ; CHECK-LABEL: store-pair-post-indexed-double ; CHECK: stp d{{[0-9]+}}, d{{[0-9]+}}, [sp], #32 -; CHECK: ret %src = alloca { double, double }, align 8 %dst = alloca { double, double }, align 8 @@ -1104,6 +1134,27 @@ ret void } +define void @store-pair-post-indexed-quadword() nounwind { +; CHECK-LABEL: store-pair-post-indexed-quadword +; GENERIC: stp q{{[0-9]+}}, q{{[0-9]+}}, [sp], #64 +; EXYNOS: str q{{[0-9]+}}, [sp] +; EXYNOS-NEXT: str q{{[0-9]+}}, [sp, #16] +; EXYNOS-NEXT: add sp, sp, #64 + %src = alloca { fp128, fp128 }, align 8 + %dst = alloca { fp128, fp128 }, align 8 + + %src.realp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %src, i32 0, i32 0 + %src.real = load fp128, fp128* %src.realp + %src.imagp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %src, i32 0, i32 1 + %src.imag = load fp128, fp128* %src.imagp + + %dst.realp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %dst, i32 0, i32 0 + %dst.imagp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %dst, i32 0, i32 1 + store fp128 %src.real, fp128* %dst.realp + store fp128 %src.imag, fp128* %dst.imagp + ret void +} + ; Check the following transform: ; ; (ldr|str) X, [x20] @@ -1340,9 +1391,9 @@ ; scalar stores which should get merged by AArch64LoadStoreOptimizer. define void @merge_zr32(i32* %p) { ; CHECK-LABEL: merge_zr32: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] +; EXYNOS: str xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store i32 0, i32* %p @@ -1354,10 +1405,10 @@ ; Same as merge_zr32 but the merged stores should also get paried. define void @merge_zr32_2(i32* %p) { ; CHECK-LABEL: merge_zr32_2: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] +; EXYNOS: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store i32 0, i32* %p @@ -1373,12 +1424,12 @@ ; Like merge_zr32_2, but checking the largest allowed stp immediate offset. define void @merge_zr32_2_offset(i32* %p) { ; CHECK-LABEL: merge_zr32_2_offset: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}, #504] -; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #504] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}, #504] +; STRICTALIGN: str wzr, [x{{[0-9]+}}, #504] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #508] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #512] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #516] +; EXYNOS: stp xzr, xzr, [x{{[0-9]+}}, #504] ; CHECK-NEXT: ret entry: %p0 = getelementptr i32, i32* %p, i32 126 @@ -1397,13 +1448,14 @@ ; instruction. define void @no_merge_zr32_2_offset(i32* %p) { ; CHECK-LABEL: no_merge_zr32_2_offset: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: str q[[REG]], [x{{[0-9]+}}, #4096] -; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4096] +; STRICTALIGN: str wzr, [x{{[0-9]+}}, #4096] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4100] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4104] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4108] +; EXYNOS: movi v[[REG:[0-9]]].2d, #0000000000000000 +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}, #4096] ; CHECK-NEXT: ret entry: %p0 = getelementptr i32, i32* %p, i32 1024 @@ -1422,13 +1474,15 @@ ; err on the side that allows for stp q instruction generation. define void @merge_zr32_3(i32* %p) { ; CHECK-LABEL: merge_zr32_3: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #16] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #24] +; EXYNOS: movi v[[REG:[0-9]]].2d, #0000000000000000 +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}] +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}, #16] ; CHECK-NEXT: ret entry: store i32 0, i32* %p @@ -1452,9 +1506,9 @@ ; Like merge_zr32, but with 2-vector type. define void @merge_zr32_2vec(<2 x i32>* %p) { ; CHECK-LABEL: merge_zr32_2vec: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] +; EXYNOS: str xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store <2 x i32> zeroinitializer, <2 x i32>* %p @@ -1464,11 +1518,12 @@ ; Like merge_zr32, but with 3-vector type. define void @merge_zr32_3vec(<3 x i32>* %p) { ; CHECK-LABEL: merge_zr32_3vec: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] ; NOSTRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #8] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #8] +; EXYNOS: str xzr, [x{{[0-9]+}}] +; EXYNOS-NEXT: str wzr, [x{{[0-9]+}}, #8] ; CHECK-NEXT: ret entry: store <3 x i32> zeroinitializer, <3 x i32>* %p @@ -1478,10 +1533,10 @@ ; Like merge_zr32, but with 4-vector type. define void @merge_zr32_4vec(<4 x i32>* %p) { ; CHECK-LABEL: merge_zr32_4vec: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] +; EXYNOS: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store <4 x i32> zeroinitializer, <4 x i32>* %p @@ -1491,9 +1546,9 @@ ; Like merge_zr32, but with 2-vector float type. define void @merge_zr32_2vecf(<2 x float>* %p) { ; CHECK-LABEL: merge_zr32_2vecf: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] +; EXYNOS: str xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store <2 x float> zeroinitializer, <2 x float>* %p @@ -1503,10 +1558,10 @@ ; Like merge_zr32, but with 4-vector float type. define void @merge_zr32_4vecf(<4 x float>* %p) { ; CHECK-LABEL: merge_zr32_4vecf: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] +; EXYNOS: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store <4 x float> zeroinitializer, <4 x float>* %p @@ -1516,8 +1571,7 @@ ; Similar to merge_zr32, but for 64-bit values. define void @merge_zr64(i64* %p) { ; CHECK-LABEL: merge_zr64: -; CHECK: // %entry -; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; CHECK: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store i64 0, i64* %p @@ -1529,8 +1583,7 @@ ; Similar to merge_zr32, but for 64-bit values and with unaligned stores. define void @merge_zr64_unalign(<2 x i64>* %p) { ; CHECK-LABEL: merge_zr64_unalign: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] ; STRICTALIGN: strb ; STRICTALIGN: strb ; STRICTALIGN: strb @@ -1547,6 +1600,7 @@ ; STRICTALIGN: strb ; STRICTALIGN: strb ; STRICTALIGN: strb +; EXYNOS: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store <2 x i64> zeroinitializer, <2 x i64>* %p, align 1 @@ -1557,11 +1611,13 @@ ; vector store since the zero constant vector has multiple uses. define void @merge_zr64_2(i64* %p) { ; CHECK-LABEL: merge_zr64_2: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}, #16] +; EXYNOS: movi v[[REG:[0-9]]].2d, #0000000000000000 +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}] +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}, #16] ; CHECK-NEXT: ret entry: store i64 0, i64* %p @@ -1577,8 +1633,7 @@ ; Like merge_zr64, but with 2-vector double type. define void @merge_zr64_2vecd(<2 x double>* %p) { ; CHECK-LABEL: merge_zr64_2vecd: -; CHECK: // %entry -; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; CHECK: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store <2 x double> zeroinitializer, <2 x double>* %p @@ -1588,8 +1643,7 @@ ; Like merge_zr64, but with 3-vector i64 type. define void @merge_zr64_3vec(<3 x i64>* %p) { ; CHECK-LABEL: merge_zr64_3vec: -; CHECK: // %entry -; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; CHECK: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: str xzr, [x{{[0-9]+}}, #16] ; CHECK-NEXT: ret entry: @@ -1600,10 +1654,10 @@ ; Like merge_zr64_2, but with 4-vector double type. define void @merge_zr64_4vecd(<4 x double>* %p) { ; CHECK-LABEL: merge_zr64_4vecd: -; CHECK: // %entry -; CHECK-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 -; CHECK-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] -; CHECK-NEXT: ret +; CHECK: movi v[[REG:[0-9]]].2d, #0000000000000000 +; GENERIC-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}, #16] +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}] entry: store <4 x double> zeroinitializer, <4 x double>* %p ret void @@ -1612,14 +1666,17 @@ ; Verify that non-consecutive merges do not generate q0 define void @merge_multiple_128bit_stores(i64* %p) { ; CHECK-LABEL: merge_multiple_128bit_stores -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: str q0, [x0] ; NOSTRICTALIGN-NEXT: stur q0, [x0, #24] ; NOSTRICTALIGN-NEXT: str q0, [x0, #48] -; STRICTALIGN-NEXT: stp xzr, xzr, [x0] +; STRICTALIGN: stp xzr, xzr, [x0] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #24] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #48] +; EXYNOS: movi v[[REG:[0-9]]].2d, #0000000000000000 +; EXYNOS-NEXT: str q0, [x0] +; EXYNOS-NEXT: stur q0, [x0, #24] +; EXYNOS-NEXT: str q0, [x0, #48] ; CHECK-NEXT: ret entry: store i64 0, i64* %p @@ -1639,14 +1696,18 @@ ; Verify that large stores generate stp q define void @merge_multiple_128bit_stores_consec(i64* %p) { ; CHECK-LABEL: merge_multiple_128bit_stores_consec -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}, #32] -; STRICTALIGN-NEXT: stp xzr, xzr, [x0] +; STRICTALIGN: stp xzr, xzr, [x0] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #16] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #32] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #48] +; EXYNOS: movi v[[REG:[0-9]]].2d, #0000000000000000 +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}] +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}, #16] +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}, #32] +; EXYNOS-NEXT: str q[[REG]], [x{{[0-9]+}}, #48] ; CHECK-NEXT: ret entry: store i64 0, i64* %p @@ -1669,8 +1730,7 @@ ; Check for bug 34674 where invalid add of xzr was being generated. ; CHECK-LABEL: bug34674: -; CHECK: // %entry -; CHECK-NEXT: mov [[ZREG:x[0-9]+]], xzr +; CHECK: mov [[ZREG:x[0-9]+]], {{#0|xzr}} ; CHECK-DAG: stp [[ZREG]], [[ZREG]], [x0] ; CHECK-DAG: add x{{[0-9]+}}, [[ZREG]], #1 define i64 @bug34674(<2 x i64>* %p) {