Index: lib/Target/AArch64/AArch64.h =================================================================== --- lib/Target/AArch64/AArch64.h +++ lib/Target/AArch64/AArch64.h @@ -46,8 +46,21 @@ FunctionPass *createAArch64CollectLOHPass(); +void initializeAArch64A53Fix835769Pass(PassRegistry&); +void initializeAArch64A57FPLoadBalancingPass(PassRegistry&); +void initializeAArch64AddressTypePromotionPass(PassRegistry&); +void initializeAArch64AdvSIMDScalarPass(PassRegistry&); +void initializeAArch64BranchRelaxationPass(PassRegistry&); +void initializeAArch64CollectLOHPass(PassRegistry&); +void initializeAArch64ConditionalComparesPass(PassRegistry&); +void initializeAArch64ConditionOptimizerPass(PassRegistry&); +void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry&); void initializeAArch64ExpandPseudoPass(PassRegistry&); void initializeAArch64LoadStoreOptPass(PassRegistry&); +void initializeAArch64PromoteConstantPass(PassRegistry&); +void initializeAArch64RedundantCopyEliminationPass(PassRegistry&); +void initializeAArch64StorePairSuppressPass(PassRegistry&); +void initializeLDTLSCleanupPass(PassRegistry&); } // end namespace llvm #endif Index: lib/Target/AArch64/AArch64A53Fix835769.cpp =================================================================== --- lib/Target/AArch64/AArch64A53Fix835769.cpp +++ lib/Target/AArch64/AArch64A53Fix835769.cpp @@ -82,7 +82,9 @@ public: static char ID; - explicit AArch64A53Fix835769() : MachineFunctionPass(ID) {} + explicit AArch64A53Fix835769() : MachineFunctionPass(ID) { + initializeAArch64A53Fix835769Pass(*PassRegistry::getPassRegistry()); + } bool runOnMachineFunction(MachineFunction &F) override; @@ -107,6 +109,9 @@ } // end anonymous namespace +INITIALIZE_PASS(AArch64A53Fix835769, "aarch64-fix-cortex-a53-835769-pass", + "AArch64 fix for A53 erratum 835769", false, false) + //===----------------------------------------------------------------------===// bool Index: lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp =================================================================== --- lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -95,10 +95,6 @@ } } -namespace llvm { -static void initializeAArch64A57FPLoadBalancingPass(PassRegistry &); -} - //===----------------------------------------------------------------------===// namespace { Index: lib/Target/AArch64/AArch64AddressTypePromotion.cpp =================================================================== --- lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -47,10 +47,6 @@ #define DEBUG_TYPE "aarch64-type-promotion" static cl::opt -EnableAddressTypePromotion("aarch64-type-promotion", cl::Hidden, - cl::desc("Enable the type promotion pass"), - cl::init(true)); -static cl::opt EnableMerge("aarch64-type-promotion-merge", cl::Hidden, cl::desc("Enable merging of redundant sexts when one is dominating" " the other."), @@ -62,10 +58,6 @@ // AArch64AddressTypePromotion //===----------------------------------------------------------------------===// -namespace llvm { -void initializeAArch64AddressTypePromotionPass(PassRegistry &); -} - namespace { class AArch64AddressTypePromotion : public FunctionPass { @@ -481,7 +473,7 @@ if (skipFunction(F)) return false; - if (!EnableAddressTypePromotion || F.isDeclaration()) + if (F.isDeclaration()) return false; Func = &F; ConsideredSExtType = Type::getInt64Ty(Func->getContext()); Index: lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp =================================================================== --- lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp +++ lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp @@ -61,10 +61,6 @@ STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted"); STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted"); -namespace llvm { -void initializeAArch64AdvSIMDScalarPass(PassRegistry &); -} - #define AARCH64_ADVSIMD_NAME "AdvSIMD Scalar Operation Optimization" namespace { Index: lib/Target/AArch64/AArch64BranchRelaxation.cpp =================================================================== --- lib/Target/AArch64/AArch64BranchRelaxation.cpp +++ lib/Target/AArch64/AArch64BranchRelaxation.cpp @@ -26,10 +26,6 @@ #define DEBUG_TYPE "aarch64-branch-relax" -static cl::opt -BranchRelaxation("aarch64-branch-relax", cl::Hidden, cl::init(true), - cl::desc("Relax out of range conditional branches")); - static cl::opt TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14), cl::desc("Restrict range of TB[N]Z instructions (DEBUG)")); @@ -480,10 +476,6 @@ bool AArch64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) { MF = &mf; - // If the pass is disabled, just bail early. - if (!BranchRelaxation) - return false; - DEBUG(dbgs() << "***** AArch64BranchRelaxation *****\n"); TII = (const AArch64InstrInfo *)MF->getSubtarget().getInstrInfo(); Index: lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp =================================================================== --- lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp +++ lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp @@ -33,10 +33,14 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" using namespace llvm; +#define TLSCLEANUP_PASS_NAME "AArch64 Local Dynamic TLS Access Clean-up" + namespace { struct LDTLSCleanup : public MachineFunctionPass { static char ID; - LDTLSCleanup() : MachineFunctionPass(ID) {} + LDTLSCleanup() : MachineFunctionPass(ID) { + initializeLDTLSCleanupPass(*PassRegistry::getPassRegistry()); + } bool runOnMachineFunction(MachineFunction &MF) override { if (skipFunction(*MF.getFunction())) @@ -128,9 +132,7 @@ return Copy; } - const char *getPassName() const override { - return "Local Dynamic TLS Access Clean-up"; - } + const char *getPassName() const override { return TLSCLEANUP_PASS_NAME; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); @@ -140,6 +142,9 @@ }; } +INITIALIZE_PASS(LDTLSCleanup, "aarch64-local-dynamic-tls-cleanup", + TLSCLEANUP_PASS_NAME, false, false) + char LDTLSCleanup::ID = 0; FunctionPass *llvm::createAArch64CleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); Index: lib/Target/AArch64/AArch64CollectLOH.cpp =================================================================== --- lib/Target/AArch64/AArch64CollectLOH.cpp +++ lib/Target/AArch64/AArch64CollectLOH.cpp @@ -164,10 +164,6 @@ STATISTIC(NumADRSimpleCandidate, "Number of simplifiable ADRP + ADD"); STATISTIC(NumADRComplexCandidate, "Number of too complex ADRP + ADD"); -namespace llvm { -void initializeAArch64CollectLOHPass(PassRegistry &); -} - #define AARCH64_COLLECT_LOH_NAME "AArch64 Collect Linker Optimization Hint (LOH)" namespace { Index: lib/Target/AArch64/AArch64ConditionOptimizer.cpp =================================================================== --- lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -95,7 +95,9 @@ typedef std::tuple CmpInfo; static char ID; - AArch64ConditionOptimizer() : MachineFunctionPass(ID) {} + AArch64ConditionOptimizer() : MachineFunctionPass(ID) { + initializeAArch64ConditionOptimizerPass(*PassRegistry::getPassRegistry()); + } void getAnalysisUsage(AnalysisUsage &AU) const override; MachineInstr *findSuitableCompare(MachineBasicBlock *MBB); CmpInfo adjustCmp(MachineInstr *CmpMI, AArch64CC::CondCode Cmp); @@ -111,10 +113,6 @@ char AArch64ConditionOptimizer::ID = 0; -namespace llvm { -void initializeAArch64ConditionOptimizerPass(PassRegistry &); -} - INITIALIZE_PASS_BEGIN(AArch64ConditionOptimizer, "aarch64-condopt", "AArch64 CondOpt Pass", false, false) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) Index: lib/Target/AArch64/AArch64ConditionalCompares.cpp =================================================================== --- lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -732,7 +732,9 @@ public: static char ID; - AArch64ConditionalCompares() : MachineFunctionPass(ID) {} + AArch64ConditionalCompares() : MachineFunctionPass(ID) { + initializeAArch64ConditionalComparesPass(*PassRegistry::getPassRegistry()); + } void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnMachineFunction(MachineFunction &MF) override; const char *getPassName() const override { @@ -750,10 +752,6 @@ char AArch64ConditionalCompares::ID = 0; -namespace llvm { -void initializeAArch64ConditionalComparesPass(PassRegistry &); -} - INITIALIZE_PASS_BEGIN(AArch64ConditionalCompares, "aarch64-ccmp", "AArch64 CCMP Pass", false, false) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) Index: lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp =================================================================== --- lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp +++ lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp @@ -26,10 +26,6 @@ STATISTIC(NumDeadDefsReplaced, "Number of dead definitions replaced"); -namespace llvm { -void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry &); -} - #define AARCH64_DEAD_REG_DEF_NAME "AArch64 Dead register definitions" namespace { Index: lib/Target/AArch64/AArch64PromoteConstant.cpp =================================================================== --- lib/Target/AArch64/AArch64PromoteConstant.cpp +++ lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -101,7 +101,9 @@ }; static char ID; - AArch64PromoteConstant() : ModulePass(ID) {} + AArch64PromoteConstant() : ModulePass(ID) { + initializeAArch64PromoteConstantPass(*PassRegistry::getPassRegistry()); + } const char *getPassName() const override { return "AArch64 Promote Constant"; } @@ -214,10 +216,6 @@ char AArch64PromoteConstant::ID = 0; -namespace llvm { -void initializeAArch64PromoteConstantPass(PassRegistry &); -} - INITIALIZE_PASS_BEGIN(AArch64PromoteConstant, "aarch64-promote-const", "AArch64 Promote Constant Pass", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) Index: lib/Target/AArch64/AArch64RedundantCopyElimination.cpp =================================================================== --- lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -39,10 +39,6 @@ STATISTIC(NumCopiesRemoved, "Number of copies removed."); -namespace llvm { -void initializeAArch64RedundantCopyEliminationPass(PassRegistry &); -} - namespace { class AArch64RedundantCopyElimination : public MachineFunctionPass { const MachineRegisterInfo *MRI; @@ -50,7 +46,10 @@ public: static char ID; - AArch64RedundantCopyElimination() : MachineFunctionPass(ID) {} + AArch64RedundantCopyElimination() : MachineFunctionPass(ID) { + initializeAArch64RedundantCopyEliminationPass( + *PassRegistry::getPassRegistry()); + } bool optimizeCopy(MachineBasicBlock *MBB); bool runOnMachineFunction(MachineFunction &MF) override; MachineFunctionProperties getRequiredProperties() const override { Index: lib/Target/AArch64/AArch64StorePairSuppress.cpp =================================================================== --- lib/Target/AArch64/AArch64StorePairSuppress.cpp +++ lib/Target/AArch64/AArch64StorePairSuppress.cpp @@ -25,6 +25,8 @@ #define DEBUG_TYPE "aarch64-stp-suppress" +#define STPSUPPRESS_PASS_NAME "AArch64 Store Pair Suppression" + namespace { class AArch64StorePairSuppress : public MachineFunctionPass { const AArch64InstrInfo *TII; @@ -36,12 +38,12 @@ public: static char ID; - AArch64StorePairSuppress() : MachineFunctionPass(ID) {} - - const char *getPassName() const override { - return "AArch64 Store Pair Suppression"; + AArch64StorePairSuppress() : MachineFunctionPass(ID) { + initializeAArch64StorePairSuppressPass(*PassRegistry::getPassRegistry()); } + const char *getPassName() const override { return STPSUPPRESS_PASS_NAME; } + bool runOnMachineFunction(MachineFunction &F) override; private: @@ -59,6 +61,9 @@ char AArch64StorePairSuppress::ID = 0; } // anonymous +INITIALIZE_PASS(AArch64StorePairSuppress, "aarch64-stp-suppress", + STPSUPPRESS_PASS_NAME, false, false) + FunctionPass *llvm::createAArch64StorePairSuppressPass() { return new AArch64StorePairSuppress(); } Index: lib/Target/AArch64/AArch64TargetMachine.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetMachine.cpp +++ lib/Target/AArch64/AArch64TargetMachine.cpp @@ -34,53 +34,56 @@ #include "llvm/Transforms/Scalar.h" using namespace llvm; -static cl::opt -EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"), - cl::init(true), cl::Hidden); +static cl::opt EnableCCMP("aarch64-enable-ccmp", + cl::desc("Enable the CCMP formation pass"), + cl::init(true), cl::Hidden); -static cl::opt EnableMCR("aarch64-mcr", +static cl::opt EnableMCR("aarch64-enable-mcr", cl::desc("Enable the machine combiner pass"), cl::init(true), cl::Hidden); -static cl::opt -EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"), - cl::init(true), cl::Hidden); - -static cl::opt -EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar" - " integer instructions"), cl::init(false), cl::Hidden); - -static cl::opt -EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote " - "constant pass"), cl::init(true), cl::Hidden); +static cl::opt EnableStPairSuppress("aarch64-enable-stp-suppress", + cl::desc("Suppress STP for AArch64"), + cl::init(true), cl::Hidden); -static cl::opt -EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the" - " linker optimization hints (LOH)"), cl::init(true), - cl::Hidden); +static cl::opt EnableAdvSIMDScalar( + "aarch64-enable-simd-scalar", + cl::desc("Enable use of AdvSIMD scalar integer instructions"), + cl::init(false), cl::Hidden); static cl::opt -EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden, - cl::desc("Enable the pass that removes dead" - " definitons and replaces stores to" - " them with stores to the zero" - " register"), - cl::init(true)); + EnablePromoteConstant("aarch64-enable-promote-const", + cl::desc("Enable the promote constant pass"), + cl::init(true), cl::Hidden); -static cl::opt -EnableRedundantCopyElimination("aarch64-redundant-copy-elim", - cl::desc("Enable the redundant copy elimination pass"), - cl::init(true), cl::Hidden); +static cl::opt EnableCollectLOH( + "aarch64-enable-collect-loh", + cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), + cl::init(true), cl::Hidden); static cl::opt -EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair" - " optimization pass"), cl::init(true), cl::Hidden); - -static cl::opt -EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden, - cl::desc("Run SimplifyCFG after expanding atomic operations" - " to make use of cmpxchg flow-based information"), - cl::init(true)); + EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, + cl::desc("Enable the pass that removes dead" + " definitons and replaces stores to" + " them with stores to the zero" + " register"), + cl::init(true)); + +static cl::opt EnableRedundantCopyElimination( + "aarch64-enable-copyelim", + cl::desc("Enable the redundant copy elimination pass"), cl::init(true), + cl::Hidden); + +static cl::opt EnableLoadStoreOpt("aarch64-enable-ldst-opt", + cl::desc("Enable the load/store pair" + " optimization pass"), + cl::init(true), cl::Hidden); + +static cl::opt EnableAtomicTidy( + "aarch64-enable-atomic-cfg-tidy", cl::Hidden, + cl::desc("Run SimplifyCFG after expanding atomic operations" + " to make use of cmpxchg flow-based information"), + cl::init(true)); static cl::opt EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, @@ -88,9 +91,9 @@ cl::init(true)); static cl::opt -EnableCondOpt("aarch64-condopt", - cl::desc("Enable the condition optimizer pass"), - cl::init(true), cl::Hidden); + EnableCondOpt("aarch64-enable-condopt", + cl::desc("Enable the condition optimizer pass"), + cl::init(true), cl::Hidden); static cl::opt EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, @@ -98,17 +101,26 @@ cl::init(false)); static cl::opt -EnableGEPOpt("aarch64-gep-opt", cl::Hidden, - cl::desc("Enable optimizations on complex GEPs"), - cl::init(false)); + EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden, + cl::desc("Enable the type promotion pass"), + cl::init(true)); + +static cl::opt + EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, + cl::desc("Enable optimizations on complex GEPs"), + cl::init(false)); + +static cl::opt + BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), + cl::desc("Relax out of range conditional branches")); // FIXME: Unify control over GlobalMerge. static cl::opt -EnableGlobalMerge("aarch64-global-merge", cl::Hidden, - cl::desc("Enable the global merge pass")); + EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, + cl::desc("Enable the global merge pass")); static cl::opt - EnableLoopDataPrefetch("aarch64-loop-data-prefetch", cl::Hidden, + EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, cl::desc("Enable the loop data prefetch pass"), cl::init(true)); @@ -119,8 +131,21 @@ RegisterTargetMachine Z(TheARM64Target); auto PR = PassRegistry::getPassRegistry(); initializeGlobalISel(*PR); + initializeAArch64A53Fix835769Pass(*PR); + initializeAArch64A57FPLoadBalancingPass(*PR); + initializeAArch64AddressTypePromotionPass(*PR); + initializeAArch64AdvSIMDScalarPass(*PR); + initializeAArch64BranchRelaxationPass(*PR); + initializeAArch64CollectLOHPass(*PR); + initializeAArch64ConditionalComparesPass(*PR); + initializeAArch64ConditionOptimizerPass(*PR); + initializeAArch64DeadRegisterDefinitionsPass(*PR); initializeAArch64ExpandPseudoPass(*PR); initializeAArch64LoadStoreOptPass(*PR); + initializeAArch64PromoteConstantPass(*PR); + initializeAArch64RedundantCopyEliminationPass(*PR); + initializeAArch64StorePairSuppressPass(*PR); + initializeLDTLSCleanupPass(*PR); } //===----------------------------------------------------------------------===// @@ -374,7 +399,7 @@ addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize)); } - if (TM->getOptLevel() != CodeGenOpt::None) + if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion) addPass(createAArch64AddressTypePromotionPass()); return false; @@ -461,7 +486,8 @@ addPass(createAArch64A53Fix835769()); // Relax conditional branch instructions if they're otherwise out of // range of their destination. - addPass(createAArch64BranchRelaxation()); + if (BranchRelaxation) + addPass(createAArch64BranchRelaxation()); if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && TM->getTargetTriple().isOSBinFormatMachO()) addPass(createAArch64CollectLOHPass()); Index: test/CodeGen/AArch64/aarch64-gep-opt.ll =================================================================== --- test/CodeGen/AArch64/aarch64-gep-opt.ll +++ test/CodeGen/AArch64/aarch64-gep-opt.ll @@ -1,8 +1,8 @@ -; RUN: llc -O3 -aarch64-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s -; RUN: llc -O3 -aarch64-gep-opt=true -mattr=-use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s -; RUN: llc -O3 -aarch64-gep-opt=true -mattr=+use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s -; RUN: llc -O3 -aarch64-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s -; RUN: llc -O3 -aarch64-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -mattr=-use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -mattr=+use-aa -print-after=codegenprepare < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-NoAA <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck --check-prefix=CHECK-UseAA <%t %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" target triple = "aarch64-linux-gnueabi" Index: test/CodeGen/AArch64/aarch64-loop-gep-opt.ll =================================================================== --- test/CodeGen/AArch64/aarch64-loop-gep-opt.ll +++ test/CodeGen/AArch64/aarch64-loop-gep-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -aarch64-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck <%t %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck <%t %s ; REQUIRES: asserts target triple = "aarch64--linux-android" Index: test/CodeGen/AArch64/aarch64-stp-cluster.ll =================================================================== --- test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -1,5 +1,5 @@ ; REQUIRES: asserts -; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=misched -aarch64-stp-suppress=false -o - 2>&1 > /dev/null | FileCheck %s +; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=misched -aarch64-enable-stp-suppress=false -o - 2>&1 > /dev/null | FileCheck %s ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: stp_i64_scale:BB#0 Index: test/CodeGen/AArch64/addsub_ext.ll =================================================================== --- test/CodeGen/AArch64/addsub_ext.ll +++ test/CodeGen/AArch64/addsub_ext.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s @var8 = global i8 0 @var16 = global i16 0 Index: test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll =================================================================== --- test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll +++ test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll @@ -1,7 +1,7 @@ -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-enable-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone { ; CHECK-LABEL: bar: Index: test/CodeGen/AArch64/arm64-addr-mode-folding.ll =================================================================== --- test/CodeGen/AArch64/arm64-addr-mode-folding.ll +++ test/CodeGen/AArch64/arm64-addr-mode-folding.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -mtriple arm64-apple-ios3 -aarch64-gep-opt=false %s -o - | FileCheck %s +; RUN: llc -O3 -mtriple arm64-apple-ios3 -aarch64-enable-gep-opt=false %s -o - | FileCheck %s ; @block = common global i8* null, align 8 Index: test/CodeGen/AArch64/arm64-addr-type-promotion.ll =================================================================== --- test/CodeGen/AArch64/arm64-addr-type-promotion.ll +++ test/CodeGen/AArch64/arm64-addr-type-promotion.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-collect-loh=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-enable-collect-loh=false | FileCheck %s ; rdar://13452552 ; Disable the collecting of LOH so that the labels do not get in the ; way of the NEXT patterns. Index: test/CodeGen/AArch64/arm64-ands-bad-peephole.ll =================================================================== --- test/CodeGen/AArch64/arm64-ands-bad-peephole.ll +++ test/CodeGen/AArch64/arm64-ands-bad-peephole.ll @@ -1,4 +1,4 @@ -; RUN: llc %s -o - -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc %s -o - -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s ; Check that ANDS (tst) is not merged with ADD when the immediate ; is not 0. ; Index: test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll =================================================================== --- test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll +++ test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-apple-ios7.0.0 -aarch64-dead-def-elimination=false < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios7.0.0 -aarch64-enable-dead-defs=false < %s | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" Index: test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll =================================================================== --- test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll +++ test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -O1 -o - | FileCheck %s -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -O0 -fast-isel=true -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -O1 -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -O0 -fast-isel=true -o - | FileCheck %s ; CHECK-LABEL: test_i64_f64: define void @test_i64_f64(double* %p, i64* %q) { Index: test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll =================================================================== --- test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll +++ test/CodeGen/AArch64/arm64-big-endian-vector-callee.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -o - | FileCheck %s -; RUN: llc -mtriple aarch64_be < %s -fast-isel=true -aarch64-load-store-opt=false -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -fast-isel=true -aarch64-enable-ldst-opt=false -o - | FileCheck %s ; CHECK-LABEL: test_i64_f64: define i64 @test_i64_f64(double %p) { Index: test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll =================================================================== --- test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll +++ test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -o - | FileCheck %s -; RUN: llc -mtriple aarch64_be < %s -aarch64-load-store-opt=false -fast-isel=true -O0 -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -o - | FileCheck %s +; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -fast-isel=true -O0 -o - | FileCheck %s ; Note, we split the functions in to multiple BBs below to isolate the call ; instruction we want to test, from fast-isel failing to select instructions Index: test/CodeGen/AArch64/arm64-ccmp-heuristics.ll =================================================================== --- test/CodeGen/AArch64/arm64-ccmp-heuristics.ll +++ test/CodeGen/AArch64/arm64-ccmp-heuristics.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-ccmp | FileCheck %s +; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp | FileCheck %s target triple = "arm64-apple-ios7.0.0" @channelColumns = external global i64 Index: test/CodeGen/AArch64/arm64-ccmp.ll =================================================================== --- test/CodeGen/AArch64/arm64-ccmp.ll +++ test/CodeGen/AArch64/arm64-ccmp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-ccmp -aarch64-stress-ccmp | FileCheck %s +; RUN: llc < %s -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp -aarch64-stress-ccmp | FileCheck %s target triple = "arm64-apple-ios" ; CHECK: single_same Index: test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll =================================================================== --- test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll +++ test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-apple-ios -O3 -aarch64-collect-loh -aarch64-collect-loh-bb-only=true -aarch64-collect-loh-pre-collect-register=false < %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios -O3 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=true -aarch64-collect-loh-pre-collect-register=false < %s -o - | FileCheck %s ; Check that the LOH analysis does not crash when the analysed chained ; contains instructions that are filtered out. ; Index: test/CodeGen/AArch64/arm64-collect-loh-str.ll =================================================================== --- test/CodeGen/AArch64/arm64-collect-loh-str.ll +++ test/CodeGen/AArch64/arm64-collect-loh-str.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s ; Test case for . ; AdrpAddStr cannot be used when the store uses same ; register as address and value. Indeed, the related Index: test/CodeGen/AArch64/arm64-collect-loh.ll =================================================================== --- test/CodeGen/AArch64/arm64-collect-loh.ll +++ test/CodeGen/AArch64/arm64-collect-loh.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s -; RUN: llc -mtriple=arm64-linux-gnu -O2 -aarch64-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s --check-prefix=CHECK-ELF +; RUN: llc -mtriple=arm64-apple-ios -O2 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s +; RUN: llc -mtriple=arm64-linux-gnu -O2 -aarch64-enable-collect-loh -aarch64-collect-loh-bb-only=false < %s -o - | FileCheck %s --check-prefix=CHECK-ELF ; CHECK-ELF-NOT: .loh ; CHECK-ELF-NOT: AdrpAdrp Index: test/CodeGen/AArch64/arm64-cse.ll =================================================================== --- test/CodeGen/AArch64/arm64-cse.ll +++ test/CodeGen/AArch64/arm64-cse.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 < %s -aarch64-atomic-cfg-tidy=0 -aarch64-gep-opt=false -verify-machineinstrs | FileCheck %s +; RUN: llc -O3 < %s -aarch64-enable-atomic-cfg-tidy=0 -aarch64-enable-gep-opt=false -verify-machineinstrs | FileCheck %s target triple = "arm64-apple-ios" ; rdar://12462006 Index: test/CodeGen/AArch64/arm64-early-ifcvt.ll =================================================================== --- test/CodeGen/AArch64/arm64-early-ifcvt.ll +++ test/CodeGen/AArch64/arm64-early-ifcvt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -stress-early-ifcvt -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc < %s -stress-early-ifcvt -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s target triple = "arm64-apple-macosx" ; CHECK: mm2 Index: test/CodeGen/AArch64/arm64-fp128.ll =================================================================== --- test/CodeGen/AArch64/arm64-fp128.ll +++ test/CodeGen/AArch64/arm64-fp128.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s +; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -aarch64-enable-atomic-cfg-tidy=0 < %s | FileCheck %s @lhs = global fp128 zeroinitializer, align 16 @rhs = global fp128 zeroinitializer, align 16 Index: test/CodeGen/AArch64/arm64-frame-index.ll =================================================================== --- test/CodeGen/AArch64/arm64-frame-index.ll +++ test/CodeGen/AArch64/arm64-frame-index.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s ; rdar://11935841 define void @t1() nounwind ssp { Index: test/CodeGen/AArch64/arm64-neon-add-sub.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-add-sub.ll +++ test/CodeGen/AArch64/arm64-neon-add-sub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-simd-scalar| FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-enable-simd-scalar| FileCheck %s define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) { ;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b Index: test/CodeGen/AArch64/arm64-promote-const.ll =================================================================== --- test/CodeGen/AArch64/arm64-promote-const.ll +++ test/CodeGen/AArch64/arm64-promote-const.ll @@ -3,7 +3,7 @@ ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-stress-promote-const -mcpu=cyclone | FileCheck -check-prefix=PROMOTED %s ; The REGULAR run just checks that the inputs passed to promote const expose ; the appropriate patterns. -; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-promote-const=false -mcpu=cyclone | FileCheck -check-prefix=REGULAR %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -disable-machine-cse -aarch64-enable-promote-const=false -mcpu=cyclone | FileCheck -check-prefix=REGULAR %s %struct.uint8x16x4_t = type { [4 x <16 x i8>] } Index: test/CodeGen/AArch64/arm64-stp-aa.ll =================================================================== --- test/CodeGen/AArch64/arm64-stp-aa.ll +++ test/CodeGen/AArch64/arm64-stp-aa.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -aarch64-stp-suppress=false -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -aarch64-enable-stp-suppress=false -verify-machineinstrs | FileCheck %s ; The next set of tests makes sure we can combine the second instruction into ; the first. Index: test/CodeGen/AArch64/arm64-stp.ll =================================================================== --- test/CodeGen/AArch64/arm64-stp.ll +++ test/CodeGen/AArch64/arm64-stp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-enable-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s ; CHECK-LABEL: stp_int ; CHECK: stp w0, w1, [x2] Index: test/CodeGen/AArch64/arm64-xaluo.ll =================================================================== --- test/CodeGen/AArch64/arm64-xaluo.ll +++ test/CodeGen/AArch64/arm64-xaluo.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs | FileCheck %s -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-enable-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-enable-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs | FileCheck %s ; ; Get the actual value of the overflow bit. Index: test/CodeGen/AArch64/blockaddress.ll =================================================================== --- test/CodeGen/AArch64/blockaddress.ll +++ test/CodeGen/AArch64/blockaddress.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s @addr = global i8* null Index: test/CodeGen/AArch64/breg.ll =================================================================== --- test/CodeGen/AArch64/breg.ll +++ test/CodeGen/AArch64/breg.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s @stored_label = global i8* null Index: test/CodeGen/AArch64/cmp-const-max.ll =================================================================== --- test/CodeGen/AArch64/cmp-const-max.ll +++ test/CodeGen/AArch64/cmp-const-max.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -aarch64-atomic-cfg-tidy=0 < %s -mtriple=aarch64-none-eabihf -fast-isel=false | FileCheck %s +; RUN: llc -verify-machineinstrs -aarch64-enable-atomic-cfg-tidy=0 < %s -mtriple=aarch64-none-eabihf -fast-isel=false | FileCheck %s define i32 @ule_64_max(i64 %p) { Index: test/CodeGen/AArch64/directcond.ll =================================================================== --- test/CodeGen/AArch64/directcond.ll +++ test/CodeGen/AArch64/directcond.ll @@ -1,5 +1,5 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-NOFP %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -aarch64-enable-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-NOFP %s define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) { ; CHECK-LABEL: test_select_i32: Index: test/CodeGen/AArch64/fast-isel-branch_weights.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-branch_weights.ll +++ test/CodeGen/AArch64/fast-isel-branch_weights.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-darwin -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-darwin -aarch64-enable-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s ; Test if the BBs are reordred according to their branch weights. define i64 @branch_weights_test(i64 %a, i64 %b) { Index: test/CodeGen/AArch64/fast-isel-cbz.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-cbz.ll +++ test/CodeGen/AArch64/fast-isel-cbz.ll @@ -1,4 +1,4 @@ -; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s define i32 @icmp_eq_i1(i1 %a) { ; CHECK-LABEL: icmp_eq_i1 Index: test/CodeGen/AArch64/fast-isel-cmp-branch.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-cmp-branch.ll +++ test/CodeGen/AArch64/fast-isel-cmp-branch.ll @@ -1,5 +1,5 @@ -; RUN: llc -aarch64-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s -; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -aarch64-enable-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=0 -mtriple=aarch64-apple-darwin < %s | FileCheck %s define i32 @fcmp_oeq(float %x, float %y) { ; CHECK-LABEL: fcmp_oeq Index: test/CodeGen/AArch64/fast-isel-cmp-vec.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-cmp-vec.ll +++ test/CodeGen/AArch64/fast-isel-cmp-vec.ll @@ -1,5 +1,5 @@ ; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -verify-machineinstrs \ -; RUN: -aarch64-atomic-cfg-tidy=0 -disable-cgp -disable-branch-fold \ +; RUN: -aarch64-enable-atomic-cfg-tidy=0 -disable-cgp -disable-branch-fold \ ; RUN: < %s | FileCheck %s ; Index: test/CodeGen/AArch64/fast-isel-int-ext2.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-int-ext2.ll +++ test/CodeGen/AArch64/fast-isel-int-ext2.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=false -disable-cgp-branch-opts -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=false -disable-cgp-branch-opts -verify-machineinstrs < %s | FileCheck %s ; ; Test folding of the sign-/zero-extend into the load instruction. Index: test/CodeGen/AArch64/fast-isel-tbz.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-tbz.ll +++ test/CodeGen/AArch64/fast-isel-tbz.ll @@ -1,5 +1,5 @@ -; RUN: llc -disable-peephole -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s -; RUN: llc -disable-peephole -fast-isel -fast-isel-abort=1 -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck --check-prefix=CHECK --check-prefix=FAST %s +; RUN: llc -disable-peephole -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck %s +; RUN: llc -disable-peephole -fast-isel -fast-isel-abort=1 -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -mtriple=aarch64-apple-darwin < %s | FileCheck --check-prefix=CHECK --check-prefix=FAST %s define i32 @icmp_eq_i8(i8 zeroext %a) { ; CHECK-LABEL: icmp_eq_i8 Index: test/CodeGen/AArch64/flags-multiuse.ll =================================================================== --- test/CodeGen/AArch64/flags-multiuse.ll +++ test/CodeGen/AArch64/flags-multiuse.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s ; LLVM should be able to cope with multiple uses of the same flag-setting ; instruction at different points of a routine. Either by rematerializing the Index: test/CodeGen/AArch64/gep-nullptr.ll =================================================================== --- test/CodeGen/AArch64/gep-nullptr.ll +++ test/CodeGen/AArch64/gep-nullptr.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -aarch64-gep-opt=true < %s |FileCheck %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true < %s |FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128" target triple = "aarch64--linux-gnu" Index: test/CodeGen/AArch64/global-merge-1.ll =================================================================== --- test/CodeGen/AArch64/global-merge-1.ll +++ test/CodeGen/AArch64/global-merge-1.ll @@ -1,11 +1,11 @@ -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS @m = internal global i32 0, align 4 @n = internal global i32 0, align 4 Index: test/CodeGen/AArch64/global-merge-2.ll =================================================================== --- test/CodeGen/AArch64/global-merge-2.ll +++ test/CodeGen/AArch64/global-merge-2.ll @@ -1,6 +1,6 @@ -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -global-merge-on-external -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS @x = global i32 0, align 4 @y = global i32 0, align 4 Index: test/CodeGen/AArch64/global-merge-3.ll =================================================================== --- test/CodeGen/AArch64/global-merge-3.ll +++ test/CodeGen/AArch64/global-merge-3.ll @@ -1,6 +1,6 @@ -; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s -; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS +; RUN: llc %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-apple-ios -aarch64-enable-global-merge -global-merge-on-external -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-APPLE-IOS @x = global [1000 x i32] zeroinitializer, align 1 @y = global [1000 x i32] zeroinitializer, align 1 Index: test/CodeGen/AArch64/global-merge-4.ll =================================================================== --- test/CodeGen/AArch64/global-merge-4.ll +++ test/CodeGen/AArch64/global-merge-4.ll @@ -1,4 +1,4 @@ -; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-global-merge -o - | FileCheck %s +; RUN: llc %s -mtriple=aarch64-linux-gnuabi -aarch64-enable-global-merge -o - | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" target triple = "arm64-apple-ios7.0.0" Index: test/CodeGen/AArch64/global-merge-group-by-use.ll =================================================================== --- test/CodeGen/AArch64/global-merge-group-by-use.ll +++ test/CodeGen/AArch64/global-merge-group-by-use.ll @@ -1,6 +1,7 @@ -; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \ -; RUN: -aarch64-global-merge -global-merge-group-by-use -global-merge-ignore-single-use=false \ -; RUN: %s -o - | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false \ +; RUN: -aarch64-enable-collect-loh=false -aarch64-enable-global-merge \ +; RUN: -global-merge-group-by-use -global-merge-ignore-single-use=false %s \ +; RUN: -o - | FileCheck %s ; We assume that globals of the same size aren't reordered inside a set. Index: test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll =================================================================== --- test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll +++ test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll @@ -1,6 +1,6 @@ -; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \ -; RUN: -O1 -global-merge-group-by-use -global-merge-ignore-single-use \ -; RUN: %s -o - | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false \ +; RUN: -aarch64-enable-collect-loh=false -O1 -global-merge-group-by-use \ +; RUN: -global-merge-ignore-single-use %s -o - | FileCheck %s ; Check that, at -O1, we only merge globals used in minsize functions. ; We assume that globals of the same size aren't reordered inside a set. Index: test/CodeGen/AArch64/global-merge-ignore-single-use.ll =================================================================== --- test/CodeGen/AArch64/global-merge-ignore-single-use.ll +++ test/CodeGen/AArch64/global-merge-ignore-single-use.ll @@ -1,6 +1,7 @@ -; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false -aarch64-collect-loh=false \ -; RUN: -aarch64-global-merge -global-merge-group-by-use -global-merge-ignore-single-use \ -; RUN: %s -o - | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-ios -asm-verbose=false \ +; RUN: -aarch64-enable-collect-loh=false -aarch64-enable-global-merge \ +; RUN: -global-merge-group-by-use -global-merge-ignore-single-use %s -o - \ +; RUN: | FileCheck %s ; We assume that globals of the same size aren't reordered inside a set. Index: test/CodeGen/AArch64/jump-table.ll =================================================================== --- test/CodeGen/AArch64/jump-table.ll +++ test/CodeGen/AArch64/jump-table.ll @@ -1,6 +1,6 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck %s -; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-LARGE %s -; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -aarch64-atomic-cfg-tidy=0 -o - %s | FileCheck --check-prefix=CHECK-PIC %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -code-model=large -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck --check-prefix=CHECK-LARGE %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -aarch64-enable-atomic-cfg-tidy=0 -o - %s | FileCheck --check-prefix=CHECK-PIC %s define i32 @test_jumptable(i32 %in) { ; CHECK: test_jumptable Index: test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll =================================================================== --- test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll +++ test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-enable-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s ; CHECK-LABEL: test_strd_sturd: ; CHECK-NEXT: stp d0, d1, [x0, #-8] Index: test/CodeGen/AArch64/ldst-opt.ll =================================================================== --- test/CodeGen/AArch64/ldst-opt.ll +++ test/CodeGen/AArch64/ldst-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs -o - %s | FileCheck %s ; This file contains tests for the AArch64 load/store optimizer. Index: test/CodeGen/AArch64/sibling-call.ll =================================================================== --- test/CodeGen/AArch64/sibling-call.ll +++ test/CodeGen/AArch64/sibling-call.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -aarch64-load-store-opt=0 | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -aarch64-enable-ldst-opt=0 | FileCheck %s declare void @callee_stack0() declare void @callee_stack8([8 x i32], i64) Index: test/CodeGen/AArch64/tailcall-explicit-sret.ll =================================================================== --- test/CodeGen/AArch64/tailcall-explicit-sret.ll +++ test/CodeGen/AArch64/tailcall-explicit-sret.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-load-store-opt=false -asm-verbose=false -disable-post-ra | FileCheck %s +; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-enable-ldst-opt=false -asm-verbose=false -disable-post-ra | FileCheck %s ; Disable the load/store optimizer to avoid having LDP/STPs and simplify checks. target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" Index: test/CodeGen/AArch64/tailcall-implicit-sret.ll =================================================================== --- test/CodeGen/AArch64/tailcall-implicit-sret.ll +++ test/CodeGen/AArch64/tailcall-implicit-sret.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-load-store-opt=false -disable-post-ra -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple arm64-apple-darwin -aarch64-enable-ldst-opt=false -disable-post-ra -asm-verbose=false | FileCheck %s ; Disable the load/store optimizer to avoid having LDP/STPs and simplify checks. target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" Index: test/CodeGen/AArch64/tst-br.ll =================================================================== --- test/CodeGen/AArch64/tst-br.ll +++ test/CodeGen/AArch64/tst-br.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s ; We've got the usual issues with LLVM reordering blocks here. The ; tests are correct for the current order, but who knows when that