Index: llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -28,8 +28,11 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSchedule.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/MC/MCInstrDesc.h" #include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSchedule.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -96,11 +99,19 @@ AliasAnalysis *AA; const AArch64InstrInfo *TII; const TargetRegisterInfo *TRI; + const TargetSubtargetInfo *STI; const AArch64Subtarget *Subtarget; // Track which registers have been modified and used. BitVector ModifiedRegs, UsedRegs; + // Target has a cost model. + bool HasCostModel; + TargetSchedModel TSM; + + // Function is being optimized for code size. + bool OptForMinSize; + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); MachineFunctionPass::getAnalysisUsage(AU); @@ -154,6 +165,9 @@ bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI, unsigned BaseReg, int Offset); + // Evaluate if the new instruction is a better choice than the old ones. + bool isReplacementProfitable(unsigned New, unsigned OpA, unsigned OpB); + // Merge a pre- or post-index base register update into a ld/st instruction. MachineBasicBlock::iterator mergeUpdateInsn(MachineBasicBlock::iterator I, @@ -650,6 +664,18 @@ } } +bool AArch64LoadStoreOpt::isReplacementProfitable + (unsigned New, unsigned OpA, unsigned OpB) { + // Default as profitable if optimizing for size or + // in the absence of a cost model. + if (OptForMinSize || !HasCostModel) { + DEBUG(dbgs() << "Evaluating instructions: replacement by default.\n"); + return true; + } + + return (Subtarget->isReplacementProfitable(TSM, New, OpA, OpB)); +} + MachineBasicBlock::iterator AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I, MachineBasicBlock::iterator MergeMI, @@ -1344,6 +1370,11 @@ unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode()) : getPostIndexedOpcode(I->getOpcode()); + + // Evaluate if the new instruction is a better choice than both old ones. + if (!isReplacementProfitable(NewOpc, I->getOpcode(), Update->getOpcode())) + return NextI; + MachineInstrBuilder MIB; if (!isPairedLdSt(*I)) { // Non-paired instruction. @@ -1373,7 +1404,7 @@ ++NumPostFolded; DEBUG(dbgs() << "Creating post-indexed load/store."); } - DEBUG(dbgs() << " Replacing instructions:\n "); + DEBUG(dbgs() << " Replacing instructions:\n "); DEBUG(I->print(dbgs())); DEBUG(dbgs() << " "); DEBUG(Update->print(dbgs())); @@ -1767,6 +1798,16 @@ TRI = Subtarget->getRegisterInfo(); AA = &getAnalysis().getAAResults(); + OptForMinSize = Fn.getFunction().optForMinSize(); + + const TargetSubtargetInfo &STI = Fn.getSubtarget(); + TSM.init(STI.getSchedModel(), &STI, STI.getInstrInfo()); + // TODO: For now, only support targets with a scheduling model. In order to + // support a target that has itineraries instead, then + // isReplacementProfitable() has to be modified to calculate the latency + // and the number of uops. + HasCostModel = TSM.hasInstrSchedModel(); + // Resize the modified and used register bitfield trackers. We do this once // per function and then clear the bitfield each time we optimize a load or // store. Index: llvm/lib/Target/AArch64/AArch64Subtarget.h =================================================================== --- llvm/lib/Target/AArch64/AArch64Subtarget.h +++ llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -127,6 +127,9 @@ unsigned MaxJumpTableSize = 0; unsigned WideningBaseCost = 0; + bool (*IsReplacementProfitable)(const TargetSchedModel &, + unsigned, unsigned, unsigned) = nullptr; + // ReserveX18 - X18 is not available as a general purpose register. bool ReserveX18; @@ -304,6 +307,13 @@ } } + bool isReplacementProfitable(const TargetSchedModel &TSM, + unsigned New, unsigned OpA, unsigned OpB) const { + if (IsReplacementProfitable) + return IsReplacementProfitable(TSM, New, OpA, OpB); + + return true; + } /// ParseSubtargetFeatures - Parses features string setting specified /// subtarget options. Definition of function is auto generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef FS); Index: llvm/lib/Target/AArch64/AArch64Subtarget.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -47,6 +47,33 @@ cl::desc("Call nonlazybind functions via direct GOT load"), cl::init(false), cl::Hidden); +static bool IsExynosReplacementProfitable(const TargetSchedModel &TSM, + unsigned New, + unsigned OpA, unsigned OpB) { + auto *TII = static_cast + (TSM.getSubtargetInfo()->getInstrInfo()); + auto *SM = TSM.getMCSchedModel(); + auto *SCN = SM->getSchedClassDesc(TII->get(New).getSchedClass()), + *SCA = SM->getSchedClassDesc(TII->get(OpA).getSchedClass()), + *SCB = SM->getSchedClassDesc(TII->get(OpB).getSchedClass()); + long UopN = SCN->NumMicroOps, + UopA = SCA->NumMicroOps, + UopB = SCB->NumMicroOps; + + // The replacement instr is profitable if it is simpler. + if (UopN < UopA + UopB) + return true; + if (UopN > UopA + UopB) + return false; + + // The replacement instr is profitable if it is as complex. + if (UopA > 1 || UopB > 1) + return true; + + // The replacement instr is not profitable. + return false; +} + AArch64Subtarget & AArch64Subtarget::initializeSubtargetDependencies(StringRef FS, StringRef CPUString) { @@ -77,12 +104,14 @@ PrefFunctionAlignment = 4; break; case ExynosM1: + IsReplacementProfitable = IsExynosReplacementProfitable; MaxInterleaveFactor = 4; MaxJumpTableSize = 8; PrefFunctionAlignment = 4; PrefLoopAlignment = 3; break; case ExynosM3: + IsReplacementProfitable = IsExynosReplacementProfitable; MaxInterleaveFactor = 4; MaxJumpTableSize = 20; PrefFunctionAlignment = 5; Index: llvm/test/CodeGen/AArch64/ldst-opt.ll =================================================================== --- llvm/test/CodeGen/AArch64/ldst-opt.ll +++ llvm/test/CodeGen/AArch64/ldst-opt.ll @@ -1,5 +1,7 @@ -; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -o - %s | FileCheck --check-prefix=CHECK --check-prefix=NOSTRICTALIGN %s -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+strict-align -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -o - %s | FileCheck --check-prefix=CHECK --check-prefix=STRICTALIGN %s +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -o - %s | FileCheck %s --check-prefixes=CHECK,GENERIC,NOSTRICTALIGN +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -mattr=+strict-align -o - %s | FileCheck %s --check-prefixes=CHECK,GENERIC,STRICTALIGN +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -mcpu=exynos-m1 -o - %s | FileCheck %s --check-prefixes=CHECK,EXYNOS,EXYNOSM1 +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -mcpu=exynos-m3 -o - %s | FileCheck %s --check-prefixes=CHECK,EXYNOS,EXYNOSM3,NOSTRICTALIGN ; This file contains tests for the AArch64 load/store optimizer. @@ -7,8 +9,8 @@ %s.byte = type { i8, i8 } %s.halfword = type { i16, i16 } %s.word = type { i32, i32 } -%s.doubleword = type { i64, i32 } -%s.quadword = type { fp128, i32 } +%s.doubleword = type { i64, i64 } +%s.quadword = type { fp128, fp128 } %s.float = type { float, i32 } %s.double = type { double, i32 } %struct.byte = type { %padding, %s.byte } @@ -236,8 +238,10 @@ define void @load-pair-pre-indexed-word(%struct.word* %ptr) nounwind { ; CHECK-LABEL: load-pair-pre-indexed-word -; CHECK: ldp w{{[0-9]+}}, w{{[0-9]+}}, [x0, #32]! -; CHECK-NOT: add x0, x0, #32 +; GENERIC: ldp w{{[0-9]+}}, w{{[0-9]+}}, [x0, #32]! +; GENERIC-NOT: add x0, x0, #32 +; EXYNOS: ldp w{{[0-9]+}}, w{{[0-9]+}}, [x0, #32]{{$}} +; EXYNOS: add x0, x0, #32 entry: %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0 %a1 = load i32, i32* %a, align 4 @@ -267,6 +271,41 @@ ret void } +define void @load-pair-pre-indexed-doubleword(%struct.doubleword* %ptr) nounwind { +; CHECK-LABEL: load-pair-pre-indexed-doubleword +; GENERIC: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x0, #32]! +; GENERIC-NOT: add x0, x0, #32 +; EXYNOS: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x0, #32]{{$}} +; EXYNOS: add x0, x0, #32 +entry: + %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0 + %a1 = load i64, i64* %a, align 8 + %b = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 1 + %b1 = load i64, i64* %b, align 8 + %add = add i64 %a1, %b1 + br label %bar +bar: + %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1 + tail call void @bar_doubleword(%s.doubleword* %c, i64 %add) + ret void +} + +define void @store-pair-pre-indexed-doubleword(%struct.doubleword* %ptr, i64 %val) nounwind { +; CHECK-LABEL: store-pair-pre-indexed-doubleword +; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [x0, #32]! +; CHECK-NOT: add x0, x0, #32 +entry: + %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0 + store i64 %val, i64* %a, align 8 + %b = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 1 + store i64 %val, i64* %b, align 8 + br label %bar +bar: + %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1 + tail call void @bar_doubleword(%s.doubleword* %c, i64 %val) + ret void +} + ; Check the following transform: ; ; add x8, x8, #16 @@ -1031,7 +1070,6 @@ define void @store-pair-post-indexed-word() nounwind { ; CHECK-LABEL: store-pair-post-indexed-word ; CHECK: stp w{{[0-9]+}}, w{{[0-9]+}}, [sp], #16 -; CHECK: ret %src = alloca { i32, i32 }, align 8 %dst = alloca { i32, i32 }, align 8 @@ -1050,7 +1088,6 @@ define void @store-pair-post-indexed-doubleword() nounwind { ; CHECK-LABEL: store-pair-post-indexed-doubleword ; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [sp], #32 -; CHECK: ret %src = alloca { i64, i64 }, align 8 %dst = alloca { i64, i64 }, align 8 @@ -1069,7 +1106,6 @@ define void @store-pair-post-indexed-float() nounwind { ; CHECK-LABEL: store-pair-post-indexed-float ; CHECK: stp s{{[0-9]+}}, s{{[0-9]+}}, [sp], #16 -; CHECK: ret %src = alloca { float, float }, align 8 %dst = alloca { float, float }, align 8 @@ -1088,7 +1124,6 @@ define void @store-pair-post-indexed-double() nounwind { ; CHECK-LABEL: store-pair-post-indexed-double ; CHECK: stp d{{[0-9]+}}, d{{[0-9]+}}, [sp], #32 -; CHECK: ret %src = alloca { double, double }, align 8 %dst = alloca { double, double }, align 8 @@ -1104,6 +1139,27 @@ ret void } +define void @store-pair-post-indexed-quadword() nounwind { +; CHECK-LABEL: store-pair-post-indexed-quadword +; GENERIC: stp q{{[0-9]+}}, q{{[0-9]+}}, [sp], #64 +; EXYNOSM1: str q{{[0-9]+}}, [sp] +; EXYNOSM1: str q{{[0-9]+}}, [sp, #16] +; EXYNOSM3: stp q{{[0-9]+}}, q{{[0-9]+}}, [sp]{{$}} + %src = alloca { fp128, fp128 }, align 8 + %dst = alloca { fp128, fp128 }, align 8 + + %src.realp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %src, i32 0, i32 0 + %src.real = load fp128, fp128* %src.realp + %src.imagp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %src, i32 0, i32 1 + %src.imag = load fp128, fp128* %src.imagp + + %dst.realp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %dst, i32 0, i32 0 + %dst.imagp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %dst, i32 0, i32 1 + store fp128 %src.real, fp128* %dst.realp + store fp128 %src.imag, fp128* %dst.imagp + ret void +} + ; Check the following transform: ; ; (ldr|str) X, [x20] @@ -1287,7 +1343,8 @@ define void @post-indexed-paired-min-offset(i64* %a, i64* %b, i64 %count) nounwind { ; CHECK-LABEL: post-indexed-paired-min-offset -; CHECK: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}], #-512 +; GENERIC: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}], #-512 +; EXYNOS: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}]{{$}} ; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}], #-512 br label %for.body for.body: @@ -1340,10 +1397,8 @@ ; scalar stores which should get merged by AArch64LoadStoreOptimizer. define void @merge_zr32(i32* %p) { ; CHECK-LABEL: merge_zr32: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] -; CHECK-NEXT: ret +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] entry: store i32 0, i32* %p %p1 = getelementptr i32, i32* %p, i32 1 @@ -1354,11 +1409,9 @@ ; Same as merge_zr32 but the merged stores should also get paried. define void @merge_zr32_2(i32* %p) { ; CHECK-LABEL: merge_zr32_2: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] -; CHECK-NEXT: ret entry: store i32 0, i32* %p %p1 = getelementptr i32, i32* %p, i32 1 @@ -1373,13 +1426,11 @@ ; Like merge_zr32_2, but checking the largest allowed stp immediate offset. define void @merge_zr32_2_offset(i32* %p) { ; CHECK-LABEL: merge_zr32_2_offset: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}, #504] -; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #504] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}, #504] +; STRICTALIGN: str wzr, [x{{[0-9]+}}, #504] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #508] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #512] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #516] -; CHECK-NEXT: ret entry: %p0 = getelementptr i32, i32* %p, i32 126 store i32 0, i32* %p0 @@ -1397,14 +1448,12 @@ ; instruction. define void @no_merge_zr32_2_offset(i32* %p) { ; CHECK-LABEL: no_merge_zr32_2_offset: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: str q[[REG]], [x{{[0-9]+}}, #4096] -; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4096] +; STRICTALIGN: str wzr, [x{{[0-9]+}}, #4096] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4100] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4104] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4108] -; CHECK-NEXT: ret entry: %p0 = getelementptr i32, i32* %p, i32 1024 store i32 0, i32* %p0 @@ -1422,14 +1471,12 @@ ; err on the side that allows for stp q instruction generation. define void @merge_zr32_3(i32* %p) { ; CHECK-LABEL: merge_zr32_3: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #16] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #24] -; CHECK-NEXT: ret entry: store i32 0, i32* %p %p1 = getelementptr i32, i32* %p, i32 1 @@ -1452,10 +1499,8 @@ ; Like merge_zr32, but with 2-vector type. define void @merge_zr32_2vec(<2 x i32>* %p) { ; CHECK-LABEL: merge_zr32_2vec: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] -; CHECK-NEXT: ret +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] entry: store <2 x i32> zeroinitializer, <2 x i32>* %p ret void @@ -1464,12 +1509,10 @@ ; Like merge_zr32, but with 3-vector type. define void @merge_zr32_3vec(<3 x i32>* %p) { ; CHECK-LABEL: merge_zr32_3vec: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] ; NOSTRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #8] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #8] -; CHECK-NEXT: ret entry: store <3 x i32> zeroinitializer, <3 x i32>* %p ret void @@ -1478,11 +1521,9 @@ ; Like merge_zr32, but with 4-vector type. define void @merge_zr32_4vec(<4 x i32>* %p) { ; CHECK-LABEL: merge_zr32_4vec: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] -; CHECK-NEXT: ret entry: store <4 x i32> zeroinitializer, <4 x i32>* %p ret void @@ -1491,10 +1532,8 @@ ; Like merge_zr32, but with 2-vector float type. define void @merge_zr32_2vecf(<2 x float>* %p) { ; CHECK-LABEL: merge_zr32_2vecf: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] -; CHECK-NEXT: ret +; NOSTRICTALIGN: str xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] entry: store <2 x float> zeroinitializer, <2 x float>* %p ret void @@ -1503,11 +1542,9 @@ ; Like merge_zr32, but with 4-vector float type. define void @merge_zr32_4vecf(<4 x float>* %p) { ; CHECK-LABEL: merge_zr32_4vecf: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp wzr, wzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8] -; CHECK-NEXT: ret entry: store <4 x float> zeroinitializer, <4 x float>* %p ret void @@ -1516,8 +1553,7 @@ ; Similar to merge_zr32, but for 64-bit values. define void @merge_zr64(i64* %p) { ; CHECK-LABEL: merge_zr64: -; CHECK: // %entry -; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; CHECK: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: ret entry: store i64 0, i64* %p @@ -1529,8 +1565,7 @@ ; Similar to merge_zr32, but for 64-bit values and with unaligned stores. define void @merge_zr64_unalign(<2 x i64>* %p) { ; CHECK-LABEL: merge_zr64_unalign: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; NOSTRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] ; STRICTALIGN: strb ; STRICTALIGN: strb ; STRICTALIGN: strb @@ -1547,7 +1582,6 @@ ; STRICTALIGN: strb ; STRICTALIGN: strb ; STRICTALIGN: strb -; CHECK-NEXT: ret entry: store <2 x i64> zeroinitializer, <2 x i64>* %p, align 1 ret void @@ -1557,12 +1591,10 @@ ; vector store since the zero constant vector has multiple uses. define void @merge_zr64_2(i64* %p) { ; CHECK-LABEL: merge_zr64_2: -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] -; STRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; STRICTALIGN: stp xzr, xzr, [x{{[0-9]+}}] ; STRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}, #16] -; CHECK-NEXT: ret entry: store i64 0, i64* %p %p1 = getelementptr i64, i64* %p, i64 1 @@ -1577,9 +1609,7 @@ ; Like merge_zr64, but with 2-vector double type. define void @merge_zr64_2vecd(<2 x double>* %p) { ; CHECK-LABEL: merge_zr64_2vecd: -; CHECK: // %entry -; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}] -; CHECK-NEXT: ret +; CHECK: stp xzr, xzr, [x{{[0-9]+}}] entry: store <2 x double> zeroinitializer, <2 x double>* %p ret void @@ -1588,10 +1618,8 @@ ; Like merge_zr64, but with 3-vector i64 type. define void @merge_zr64_3vec(<3 x i64>* %p) { ; CHECK-LABEL: merge_zr64_3vec: -; CHECK: // %entry -; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}] +; CHECK: stp xzr, xzr, [x{{[0-9]+}}] ; CHECK-NEXT: str xzr, [x{{[0-9]+}}, #16] -; CHECK-NEXT: ret entry: store <3 x i64> zeroinitializer, <3 x i64>* %p ret void @@ -1600,10 +1628,11 @@ ; Like merge_zr64_2, but with 4-vector double type. define void @merge_zr64_4vecd(<4 x double>* %p) { ; CHECK-LABEL: merge_zr64_4vecd: -; CHECK: // %entry -; CHECK-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 -; CHECK-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] -; CHECK-NEXT: ret +; CHECK: movi v[[REG:[0-9]]].2d, #0000000000000000 +; GENERIC-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] +; EXYNOSM1-NEXT: str q[[REG]], [x{{[0-9]+}}, #16] +; EXYNOSM1-NEXT: str q[[REG]], [x{{[0-9]+}}] +; EXYNOSM3-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] entry: store <4 x double> zeroinitializer, <4 x double>* %p ret void @@ -1612,15 +1641,13 @@ ; Verify that non-consecutive merges do not generate q0 define void @merge_multiple_128bit_stores(i64* %p) { ; CHECK-LABEL: merge_multiple_128bit_stores -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: str q0, [x0] ; NOSTRICTALIGN-NEXT: stur q0, [x0, #24] ; NOSTRICTALIGN-NEXT: str q0, [x0, #48] -; STRICTALIGN-NEXT: stp xzr, xzr, [x0] +; STRICTALIGN: stp xzr, xzr, [x0] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #24] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #48] -; CHECK-NEXT: ret entry: store i64 0, i64* %p %p1 = getelementptr i64, i64* %p, i64 1 @@ -1639,15 +1666,13 @@ ; Verify that large stores generate stp q define void @merge_multiple_128bit_stores_consec(i64* %p) { ; CHECK-LABEL: merge_multiple_128bit_stores_consec -; CHECK: // %entry -; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000 +; NOSTRICTALIGN: movi v[[REG:[0-9]]].2d, #0000000000000000 ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}] ; NOSTRICTALIGN-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}, #32] -; STRICTALIGN-NEXT: stp xzr, xzr, [x0] +; STRICTALIGN: stp xzr, xzr, [x0] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #16] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #32] ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #48] -; CHECK-NEXT: ret entry: store i64 0, i64* %p %p1 = getelementptr i64, i64* %p, i64 1 @@ -1669,8 +1694,7 @@ ; Check for bug 34674 where invalid add of xzr was being generated. ; CHECK-LABEL: bug34674: -; CHECK: // %entry -; CHECK-NEXT: mov [[ZREG:x[0-9]+]], xzr +; CHECK: mov [[ZREG:x[0-9]+]], {{#0|xzr}} ; CHECK-DAG: stp xzr, xzr, [x0] ; CHECK-DAG: add x{{[0-9]+}}, [[ZREG]], #1 define i64 @bug34674(<2 x i64>* %p) { Index: llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll =================================================================== --- llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll +++ llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll @@ -95,7 +95,7 @@ ret void } -attributes #0 = { noredzone nounwind ssp uwtable "no-frame-pointer-elim"="false" "target-cpu"="cyclone" } +attributes #0 = { optsize minsize noredzone nounwind ssp uwtable "no-frame-pointer-elim"="false" "target-cpu"="cyclone" } !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4, !5, !6} Index: llvm/test/CodeGen/AArch64/machine-outliner.ll =================================================================== --- llvm/test/CodeGen/AArch64/machine-outliner.ll +++ llvm/test/CodeGen/AArch64/machine-outliner.ll @@ -61,4 +61,4 @@ ; CHECK-NEXT: str w8, [sp], #16 ; CHECK-NEXT: ret -attributes #0 = { noredzone nounwind ssp uwtable "no-frame-pointer-elim"="false" "target-cpu"="cyclone" } +attributes #0 = { optsize minsize noredzone nounwind ssp uwtable "no-frame-pointer-elim"="false" "target-cpu"="cyclone" }