Index: llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll +++ llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @end_of_array = common global i8* null, align 8 Index: llvm/trunk/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll +++ llvm/trunk/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll @@ -1,8 +1,7 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu ; Make sure we are not crashing on this test. target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" -target triple = "aarch64-unknown-linux-gnu" declare void @extern(i8*) Index: llvm/trunk/test/CodeGen/AArch64/aarch64-addv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/aarch64-addv.ll +++ llvm/trunk/test/CodeGen/AArch64/aarch64-addv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -aarch64-neon-syntax=generic < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=generic | FileCheck %s define i8 @add_B(<16 x i8>* %arr) { ; CHECK-LABEL: add_B Index: llvm/trunk/test/CodeGen/AArch64/aarch64-minmaxv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/aarch64-minmaxv.ll +++ llvm/trunk/test/CodeGen/AArch64/aarch64-minmaxv.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=aarch64 -aarch64-neon-syntax=generic < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linu--gnu -aarch64-neon-syntax=generic | FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" -target triple = "aarch64-linu--gnu" ; CHECK-LABEL: smax_B ; CHECK: smaxv {{b[0-9]+}}, {{v[0-9]+}}.16b Index: llvm/trunk/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @foo(i64 %val) { ; CHECK: foo ; The stack frame store is not 64-bit aligned. Make sure we use an Index: llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; The target lowering for integer comparisons was replacing some DAG nodes ; during operation legalization, which resulted in dangling pointers, Index: llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i32 @foo(<4 x i32> %a, i32 %n) nounwind { ; CHECK-LABEL: foo: Index: llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s ; @b = private unnamed_addr constant [3 x i32] [i32 1768775988, i32 1685481784, i32 1836253201], align 4 Index: llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=arm64 -O0 -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -march=arm64 -O3 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -O0 -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -O3 -verify-machineinstrs | FileCheck %s @.str = private unnamed_addr constant [9 x i8] c"%lf %lu\0A\00", align 1 @.str1 = private unnamed_addr constant [8 x i8] c"%lf %u\0A\00", align 1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll @@ -1,8 +1,7 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -fp-contract=fast | FileCheck %s --check-prefix=FAST +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -aarch64-neon-syntax=apple -fp-contract=fast | FileCheck %s --check-prefix=FAST target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ;FAST-LABEL: _Z9example25v: ;FAST: fcmgt.4s Index: llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; Make sure we are not crashing on this test. define void @autogen_SD13158() { Index: llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; Make sure we are not crashing on this test. Index: llvm/trunk/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple ;CHECK-LABEL: Shuff: ;CHECK: tbl.8b Index: llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll @@ -1,7 +1,7 @@ -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone { ; CHECK-LABEL: bar: Index: llvm/trunk/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; This test case tests an infinite loop bug in DAG combiner. ; It just tries to do the following replacing endlessly: Index: llvm/trunk/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll @@ -1,4 +1,4 @@ -; RUN: llc -O0 -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -O0 -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs < %s | FileCheck %s ; The following 2 test cases test shufflevector with beginning UNDEF mask. define <8 x i16> @test_vext_undef_traverse(<8 x i16> %in) { Index: llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -1,5 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s -target triple = "arm64-apple-ios7.0.0" +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -mcpu=cyclone -enable-misched=false | FileCheck %s ; rdar://13625505 ; Here we have 9 fixed integer arguments the 9th argument in on stack, the Index: llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll @@ -1,6 +1,5 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false -disable-fp-elim | FileCheck %s -; RUN: llc < %s -O0 -disable-fp-elim | FileCheck -check-prefix=FAST %s -target triple = "arm64-apple-darwin" +; RUN: llc < %s -mtriple=arm64-apple-darwin -mcpu=cyclone -enable-misched=false -disable-fp-elim | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-darwin -O0 -disable-fp-elim | FileCheck -check-prefix=FAST %s ; rdar://12648441 ; Generated from arm64-arguments.c with -O2. Index: llvm/trunk/test/CodeGen/AArch64/arm64-addp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-addp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-addp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s define double @foo(<2 x double> %a) nounwind { ; CHECK-LABEL: foo: Index: llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll @@ -1,9 +1,8 @@ -; RUN: llc -march arm64 < %s -aarch64-collect-loh=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-collect-loh=false | FileCheck %s ; rdar://13452552 ; Disable the collecting of LOH so that the labels do not get in the ; way of the NEXT patterns. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios3.0.0" @block = common global i8* null, align 8 Index: llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi < %s | FileCheck %s ; rdar://10232252 @object = external hidden global i64, section "__DATA, __objc_ivar", align 8 Index: llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mcpu=cyclone < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -mcpu=cyclone < %s | FileCheck %s ; CHECK: foo ; CHECK: str w[[REG0:[0-9]+]], [x19, #264] Index: llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll @@ -1,7 +1,6 @@ -; RUN: llc -O1 -march=arm64 -enable-andcmp-sinking=true < %s | FileCheck %s +; RUN: llc -O1 -mtriple=arm64-apple-ios7.0.0 -enable-andcmp-sinking=true < %s | FileCheck %s ; ModuleID = 'and-cbz-extr-mr.bc' target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, i8* %str1, i8* %str2, i8* %str3, i8* %str4, i8* %str5, i8* %str6, i8* %str7, i8* %str8, i8* %str9, i8* %str10, i8* %str11, i8* %str12, i8* %str13, i32 %int1, i8* %str14) unnamed_addr #0 align 2 { ; CHECK: _foo: Index: llvm/trunk/test/CodeGen/AArch64/arm64-arith-saturating.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-arith-saturating.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-arith-saturating.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s define i32 @qadds(<4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp { ; CHECK-LABEL: qadds: Index: llvm/trunk/test/CodeGen/AArch64/arm64-arith.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-arith.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-arith.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false | FileCheck %s define i32 @t1(i32 %a, i32 %b) nounwind readnone ssp { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=arm64 -aarch64-dead-def-elimination=false < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios7.0.0 -aarch64-dead-def-elimination=false < %s | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ; Function Attrs: nounwind ssp uwtable define i32 @test1() #0 { Index: llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone | FileCheck %s @var = global i128 0 Index: llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 { ; CHECK-LABEL: val_compare_and_swap: Index: llvm/trunk/test/CodeGen/AArch64/arm64-big-imm-offsets.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-big-imm-offsets.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-big-imm-offsets.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s +; RUN: llc -mtriple=arm64-eabi < %s ; Make sure large offsets aren't mistaken for valid immediate offsets. Index: llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll @@ -1,5 +1,5 @@ ; RUN: opt -codegenprepare -mtriple=arm64-apple=ios -S -o - %s | FileCheck --check-prefix=OPT %s -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s %struct.X = type { i8, i8, [2 x i8] } %struct.Y = type { i32, i8 } %struct.Z = type { i8, i8, [2 x i8], i16 } Index: llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; Check that building up a vector w/ only one non-zero lane initializes ; intelligently. Index: llvm/trunk/test/CodeGen/AArch64/arm64-builtins-linux.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-builtins-linux.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-builtins-linux.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s ; Function Attrs: nounwind readnone declare i8* @llvm.thread.pointer() #1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -march=arm64 -mtriple arm64-apple-ios5.0.0 < %s | FileCheck %s +; RUN: llc -O3 -mtriple arm64-apple-ios5.0.0 < %s | FileCheck %s ; ; Zero truncation is not necessary when the values are extended properly ; already. Index: llvm/trunk/test/CodeGen/AArch64/arm64-clrsb.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-clrsb.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-clrsb.ll @@ -1,7 +1,6 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ; Function Attrs: nounwind readnone declare i32 @llvm.ctlz.i32(i32, i1) #0 Index: llvm/trunk/test/CodeGen/AArch64/arm64-coalesce-ext.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-coalesce-ext.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-coalesce-ext.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-darwin < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s ; Check that the peephole optimizer knows about sext and zext instructions. ; CHECK: test1sext define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind { Index: llvm/trunk/test/CodeGen/AArch64/arm64-complex-ret.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-complex-ret.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-complex-ret.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -o - %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -o - %s | FileCheck %s define { i192, i192, i21, i192 } @foo(i192) { ; CHECK-LABEL: foo: Index: llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) { Index: llvm/trunk/test/CodeGen/AArch64/arm64-crc32.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-crc32.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-crc32.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mattr=+crc -o - %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -mattr=+crc -o - %s | FileCheck %s define i32 @test_crc32b(i32 %cur, i8 %next) { ; CHECK-LABEL: test_crc32b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-crypto.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-crypto.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-crypto.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mattr=crypto -aarch64-neon-syntax=apple -o - %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -mattr=crypto -aarch64-neon-syntax=apple -o - %s | FileCheck %s declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data, <16 x i8> %key) declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data, <16 x i8> %key) Index: llvm/trunk/test/CodeGen/AArch64/arm64-cvt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-cvt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-cvt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; ; Floating-point scalar convert to signed integer (to nearest with ties to away) Index: llvm/trunk/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ; Function Attrs: nounwind ssp uwtable define i32 @test1() #0 { Index: llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define <8 x i8> @v_dup8(i8 %A) nounwind { ;CHECK-LABEL: v_dup8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextd: Index: llvm/trunk/test/CodeGen/AArch64/arm64-extend-int-to-fp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-extend-int-to-fp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-extend-int-to-fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <4 x float> @foo(<4 x i16> %a) nounwind { ; CHECK-LABEL: foo: Index: llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; rdar://12771555 define void @foo(i16* %ptr, i32 %a) nounwind { Index: llvm/trunk/test/CodeGen/AArch64/arm64-extract.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-extract.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-extract.ll @@ -1,5 +1,4 @@ -; RUN: llc -verify-machineinstrs < %s \ -; RUN: -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s define i64 @ror_i64(i64 %in) { ; CHECK-LABEL: ror_i64: Index: llvm/trunk/test/CodeGen/AArch64/arm64-extract_subvector.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-extract_subvector.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-extract_subvector.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; Extract of an upper half of a vector is an "ext.16b v0, v0, v0, #8" insn. Index: llvm/trunk/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @caller(i32* nocapture %p, i32 %a, i32 %b) nounwind optsize ssp { ; CHECK-NOT: stp Index: llvm/trunk/test/CodeGen/AArch64/arm64-fcmp-opt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fcmp-opt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fcmp-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s ; rdar://10263824 define i1 @fcmp_float1(float %a) nounwind ssp { Index: llvm/trunk/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; DAGCombine to transform a conversion of an extract_vector_elt to an ; extract_vector_elt of a conversion, which saves a round trip of copies Index: llvm/trunk/test/CodeGen/AArch64/arm64-fmadd.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fmadd.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fmadd.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi < %s | FileCheck %s define float @fma32(float %a, float %b, float %c) nounwind readnone ssp { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-fmax-safe.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fmax-safe.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fmax-safe.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define double @test_direct(float %in) { ; CHECK-LABEL: test_direct: Index: llvm/trunk/test/CodeGen/AArch64/arm64-fmax.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fmax.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fmax.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -enable-no-nans-fp-math < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-no-nans-fp-math | FileCheck %s define double @test_direct(float %in) { ; CHECK-LABEL: test_direct: Index: llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll @@ -1,4 +1,4 @@ -; RUN: llc -asm-verbose=false < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -asm-verbose=false -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define float @test_f32(float* %A, float* %B, float* %C) nounwind { ;CHECK-LABEL: test_f32: Index: llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; ; Index: llvm/trunk/test/CodeGen/AArch64/arm64-fp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define float @t1(i1 %a, float %b, float %c) nounwind { ; CHECK: t1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s declare void @bar(i8*, i8*, i32*) ; SelectionDAG used to try to fold some fp128 operations using the ppc128 type, Index: llvm/trunk/test/CodeGen/AArch64/arm64-frame-index.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-frame-index.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-frame-index.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 | FileCheck %s ; rdar://11935841 define void @t1() nounwind ssp { Index: llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define i32 @foo(<4 x i16>* %__a) nounwind { ; CHECK-LABEL: foo: Index: llvm/trunk/test/CodeGen/AArch64/arm64-icmp-opt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-icmp-opt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-icmp-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; Optimize (x > -1) to (x >= 0) etc. ; Optimize (cmp (add / sub), 0): eliminate the subs used to update flag Index: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-redzone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-redzone | FileCheck %s define void @store64(i64** nocapture %out, i64 %index, i64 %spacing) nounwind noinline ssp { ; CHECK-LABEL: store64: Index: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-I.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-I.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-I.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-L.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-L.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-L.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-M.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-M.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-M.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll @@ -1,4 +1,4 @@ -; RUN: not llc < %s -march=arm64 2>&1 | FileCheck %s +; RUN: not llc < %s -mtriple=arm64-eabi 2>&1 | FileCheck %s ; The 'z' constraint allocates either xzr or wzr, but obviously an input of 1 is Index: llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs -asm-verbose=false | FileCheck %s %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } Index: llvm/trunk/test/CodeGen/AArch64/arm64-ldp-aa.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-ldp-aa.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-ldp-aa.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -enable-misched=false -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -verify-machineinstrs | FileCheck %s ; The next set of tests makes sure we can combine the second instruction into ; the first. Index: llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s ; CHECK-LABEL: ldp_int ; CHECK: ldp Index: llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i64 @_f0(i64* %p) { ; CHECK: f0: Index: llvm/trunk/test/CodeGen/AArch64/arm64-leaf.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-leaf.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-leaf.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-ios < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s ; rdar://12829704 define void @t8() nounwind ssp { Index: llvm/trunk/test/CodeGen/AArch64/arm64-long-shift.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-long-shift.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-long-shift.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s define i128 @shl(i128 %r, i128 %s) nounwind readnone { ; CHECK-LABEL: shl: Index: llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-memcpy-inline.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } Index: llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @t1(i8* nocapture %c) nounwind optsize { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-movi.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-movi.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-movi.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ;==--------------------------------------------------------------------------== ; Tests for MOV-immediate implemented with ORR-immediate. Index: llvm/trunk/test/CodeGen/AArch64/arm64-mul.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-mul.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-mul.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; rdar://9296808 ; rdar://9349137 Index: llvm/trunk/test/CodeGen/AArch64/arm64-neon-v8.1a.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-neon-v8.1a.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-neon-v8.1a.ll @@ -1,6 +1,6 @@ -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V8a -; RUN: llc < %s -verify-machineinstrs -march=arm64 -mattr=+v8.1a -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V81a -; RUN: llc < %s -verify-machineinstrs -march=arm64 -mattr=+v8.1a -aarch64-neon-syntax=apple | FileCheck %s --check-prefix=CHECK-V81a-apple +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V8a +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -mattr=+v8.1a -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V81a +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -mattr=+v8.1a -aarch64-neon-syntax=apple | FileCheck %s --check-prefix=CHECK-V81a-apple declare <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16>, <4 x i16>) declare <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16>, <8 x i16>) Index: llvm/trunk/test/CodeGen/AArch64/arm64-popcnt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-popcnt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-popcnt.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -march=aarch64 -mattr -neon -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-NONEON %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -mattr -neon -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-NONEON %s define i32 @cnt32_advsimd(i32 %x) nounwind readnone { %cnt = tail call i32 @llvm.ctpop.i32(i32 %x) Index: llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll @@ -1,4 +1,4 @@ -; RUN: llc %s -march arm64 -o - | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s @a = common global i32* null, align 8 Index: llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-redzone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-redzone | FileCheck %s define i32 @foo(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: foo: Index: llvm/trunk/test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s ; We used to not mark NZCV as being used in the continuation basic-block ; when lowering a 128-bit "select" to branches. This meant a subsequent use Index: llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -o - %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; This is mostly a "don't assert" test. The type of the RHS of a shift depended ; on the phase of legalization, which led to the creation of an unexpected and Index: llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; 2x64 vector should be returned in Q0. Index: llvm/trunk/test/CodeGen/AArch64/arm64-returnaddr.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-returnaddr.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-returnaddr.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i8* @rt0(i32 %x) nounwind readnone { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define i32 @test_rev_w(i32 %a) nounwind { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -march=arm64 -mcpu=cortex-a57 | FileCheck --check-prefix=CHECK-A57 %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cortex-a57 | FileCheck --check-prefix=CHECK-A57 %s ; rdar://13082402 define float @t1(i32* nocapture %src) nounwind ssp { Index: llvm/trunk/test/CodeGen/AArch64/arm64-shifted-sext.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-shifted-sext.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-shifted-sext.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-ios < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s ; ; Index: llvm/trunk/test/CodeGen/AArch64/arm64-shrink-v1i64.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-shrink-v1i64.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-shrink-v1i64.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s +; RUN: llc < %s -mtriple=arm64-eabi ; The DAGCombiner tries to do following shrink: ; Convert x+y to (VT)((SmallVT)x+(SmallVT)y) Index: llvm/trunk/test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST define <16 x i8> @foo(<16 x i8> %a) nounwind optsize readnone ssp { ; CHECK: uaddlv.16b h0, v0 Index: llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -o - %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; ARM64ISelLowering.cpp was creating a new (floating-point) load for efficiency ; but not updating chain-successors of the old one. As a result, the two memory Index: llvm/trunk/test/CodeGen/AArch64/arm64-sli-sri-opt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-sli-sri-opt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-sli-sri-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -aarch64-shift-insert-generation=true -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -aarch64-shift-insert-generation=true -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define void @testLeftGood(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind { ; CHECK-LABEL: testLeftGood: Index: llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define signext i8 @test_vmaxv_s8(<8 x i8> %a1) { ; CHECK: test_vmaxv_s8 Index: llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define signext i8 @test_vminv_s8(<8 x i8> %a1) { ; CHECK: test_vminv_s8 Index: llvm/trunk/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -verify-machineinstrs -march=arm64 | FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi | FileCheck %s ; Check if sqshl/uqshl with constant shift amout can be selected. define i64 @test_vqshld_s64_i(i64 %a) { Index: llvm/trunk/test/CodeGen/AArch64/arm64-st1.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-st1.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-st1.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s define void @st1lane_16b(<16 x i8> %A, i8* %D) { ; CHECK-LABEL: st1lane_16b Index: llvm/trunk/test/CodeGen/AArch64/arm64-stp-aa.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-stp-aa.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-stp-aa.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -enable-misched=false -aarch64-stp-suppress=false -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -aarch64-stp-suppress=false -verify-machineinstrs | FileCheck %s ; The next set of tests makes sure we can combine the second instruction into ; the first. Index: llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s ; CHECK-LABEL: stp_int ; CHECK: stp w0, w1, [x2] Index: llvm/trunk/test/CodeGen/AArch64/arm64-stur.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-stur.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-stur.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s %struct.X = type <{ i32, i64, i64 }> define void @foo1(i32* %p, i64 %val) nounwind { Index: llvm/trunk/test/CodeGen/AArch64/arm64-subvector-extend.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-subvector-extend.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-subvector-extend.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s ; Test efficient codegen of vector extends up from legal type to 128 bit ; and 256 bit vector types. Index: llvm/trunk/test/CodeGen/AArch64/arm64-tbl.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-tbl.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-tbl.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @tbl1_8b(<16 x i8> %A, <8 x i8> %B) nounwind { ; CHECK: tbl1_8b Index: llvm/trunk/test/CodeGen/AArch64/arm64-this-return.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-this-return.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-this-return.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-this-return-forwarding | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-this-return-forwarding | FileCheck %s %struct.A = type { i8 } %struct.B = type { i32 } Index: llvm/trunk/test/CodeGen/AArch64/arm64-trap.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-trap.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-trap.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @foo() nounwind { ; CHECK: foo ; CHECK: brk #0x1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vtrni8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: vmax_u8x8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: vmin_u8x8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-umov.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-umov.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-umov.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define zeroext i8 @f1(<16 x i8> %a) { ; CHECK-LABEL: f1: Index: llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; rdar://r11231896 define void @t1(i8* nocapture %a, i8* nocapture %b) nounwind { Index: llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vuzpi8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vaargs.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vaargs.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vaargs.ll @@ -1,6 +1,5 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-darwin11.0.0 | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64" -target triple = "arm64-apple-darwin11.0.0" define float @t1(i8* nocapture %fmt, ...) nounwind ssp { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i16> @sabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind { Index: llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define <8 x i8> @addhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind { ;CHECK-LABEL: addhn8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vaddlv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vaddlv.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vaddlv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define i64 @test_vaddlv_s32(<2 x i32> %a1) nounwind readnone { ; CHECK: test_vaddlv_s32 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -asm-verbose=false -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false -mcpu=cyclone | FileCheck %s define signext i8 @test_vaddv_s8(<8 x i8> %a1) { ; CHECK-LABEL: test_vaddv_s8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @rbit_8b(<8 x i8>* %A) nounwind { ;CHECK-LABEL: rbit_8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vclz.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vclz.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vclz.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @test_vclz_u8(<8 x i8> %a) nounwind readnone ssp { ; CHECK-LABEL: test_vclz_u8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define void @fcmltz_4s(<4 x float> %a, <4 x i16>* %p) nounwind { Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @cls_8b(<8 x i8>* %A) nounwind { ;CHECK-LABEL: cls_8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; LowerCONCAT_VECTORS() was reversing the order of two parts. ; rdar://11558157 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind { ;CHECK-LABEL: fcvtas_2s: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_f.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_f.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_f.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -O0 -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -O0 -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x double> @test_vcvt_f64_f32(<2 x float> %x) nounwind readnone ssp { ; CHECK-LABEL: test_vcvt_f64_f32: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @ucvt(<2 x i32> %a) nounwind readnone ssp { ; CHECK-LABEL: ucvt: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_n.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_n.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_n.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @cvtf32fxpu(<2 x i32> %a) nounwind readnone ssp { ; CHECK-LABEL: cvtf32fxpu: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x i32> @c1(<2 x float> %a) nounwind readnone ssp { ; CHECK: c1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define float @fcvtxn(double %a) { ; CHECK-LABEL: fcvtxn: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vecCmpBr.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vecCmpBr.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vecCmpBr.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s ; ModuleID = 'arm64_vecCmpBr.c' target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios3.0.0" define i32 @anyZero64(<4 x i16> %a) #0 { Index: llvm/trunk/test/CodeGen/AArch64/arm64-vecFold.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vecFold.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vecFold.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -o - %s| FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <16 x i8> @foov16i8(<8 x i16> %a0, <8 x i16> %b0) nounwind readnone ssp { ; CHECK-LABEL: foov16i8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vector-ext.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vector-ext.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-ext.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ;CHECK: @func30 ;CHECK: movi.4h v1, #1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_orrimm: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mcpu=generic -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=generic -aarch64-neon-syntax=apple | FileCheck %s define void @test0f(float* nocapture %x, float %a) #0 { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s ; rdar://9428579 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define void @test_vext_s8() nounwind ssp { ; CHECK-LABEL: test_vext_s8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vfloatintrinsics.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ;;; Float vectors Index: llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @shadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: shadd8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @shsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: shsub8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @smax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: smax_8b: @@ -244,7 +244,7 @@ declare <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32>, <2 x i32>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @smaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: smaxp_8b: @@ -368,7 +368,7 @@ declare <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @sminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sminp_8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vminmaxnm.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vminmaxnm.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vminmaxnm.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @f1(<2 x float> %a, <2 x float> %b) nounwind readnone ssp { ; CHECK: fmaxnm.2s v0, v0, v1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vmovn.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vmovn.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vmovn.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @xtn8b(<8 x i16> %A) nounwind { ;CHECK-LABEL: xtn8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll @@ -1,4 +1,4 @@ -; RUN: llc -asm-verbose=false < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -asm-verbose=false -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i16> @smull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind { Index: llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i64 @normal_load(i64* nocapture %bar) nounwind readonly { ; CHECK: normal_load ; CHECK: ldp Index: llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll @@ -1,5 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s -target triple = "arm64-apple-ios" +; RUN: llc < %s -mtriple=arm64-apple-ios -mcpu=cyclone | FileCheck %s ; The non-byte ones used to fail with "Cannot select" Index: llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @sqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sqadd8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @sqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sqsub8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vselect.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vselect.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vselect.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ;CHECK: @func63 ;CHECK: cmeq.4h v0, v0, v1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vsetcc_fp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vsetcc_fp.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vsetcc_fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define <2 x i32> @fcmp_one(<2 x float> %x, <2 x float> %y) nounwind optsize readnone { ; CHECK-LABEL: fcmp_one: ; CHECK-NEXT: fcmgt.2s [[REG:v[0-9]+]], v0, v1 Index: llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -enable-misched=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -enable-misched=false | FileCheck %s define <8 x i8> @sqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sqshl8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s define <8 x i16> @testShiftRightArith_v8i16(<8 x i16> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: testShiftRightArith_v8i16: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @frecps_2s(<2 x float>* %A, <2 x float>* %B) nounwind { ;CHECK-LABEL: frecps_2s: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vsras8: Index: llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @subhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind { ;CHECK-LABEL: subhn8b: Index: llvm/trunk/test/CodeGen/AArch64/arm64-xaluo.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-xaluo.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-xaluo.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs | FileCheck %s ; ; Get the actual value of the overflow bit. Index: llvm/trunk/test/CodeGen/AArch64/arm64-zext.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-zext.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-zext.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i64 @foo(i32 %a, i32 %b) nounwind readnone ssp { entry: Index: llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s @var32 = global i32 0 Index: llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll +++ llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vzipi8: Index: llvm/trunk/test/CodeGen/AArch64/asm-large-immediate.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/asm-large-immediate.ll +++ llvm/trunk/test/CodeGen/AArch64/asm-large-immediate.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -no-integrated-as < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -no-integrated-as | FileCheck %s define void @test() { entry: Index: llvm/trunk/test/CodeGen/AArch64/branch-folder-merge-mmos.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/branch-folder-merge-mmos.ll +++ llvm/trunk/test/CodeGen/AArch64/branch-folder-merge-mmos.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -mtriple=aarch64-none-linux-gnu -stop-after branch-folder -o - < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -stop-after branch-folder | FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" ; Function Attrs: norecurse nounwind Index: llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll +++ llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s define i16 @test_1cmp_signed_1(i16* %ptr1) { ; CHECK-LABLE: @test_1cmp_signed_1 Index: llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll +++ llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s ; marked as external to prevent possible optimizations @a = external global i32 Index: llvm/trunk/test/CodeGen/AArch64/complex-fp-to-int.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/complex-fp-to-int.ll +++ llvm/trunk/test/CodeGen/AArch64/complex-fp-to-int.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x i64> @test_v2f32_to_signed_v2i64(<2 x float> %in) { ; CHECK-LABEL: test_v2f32_to_signed_v2i64: Index: llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll +++ llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; CHECK: autogen_SD19655 ; CHECK: scvtf Index: llvm/trunk/test/CodeGen/AArch64/div_minsize.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/div_minsize.ll +++ llvm/trunk/test/CodeGen/AArch64/div_minsize.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s define i32 @testsize1(i32 %x) minsize nounwind { entry: Index: llvm/trunk/test/CodeGen/AArch64/large_shift.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/large_shift.ll +++ llvm/trunk/test/CodeGen/AArch64/large_shift.ll @@ -1,5 +1,4 @@ -; RUN: llc -march=aarch64 -o - %s -target triple = "arm64-unknown-unknown" +; RUN: llc -mtriple=arm64-unknown-unknown -o - %s ; Make sure we don't run into an assert in the aarch64 code selection when ; DAGCombining fails. Index: llvm/trunk/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll +++ llvm/trunk/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -aarch64-neon-syntax=apple -aarch64-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s ; CHECK-LABEL: test_strd_sturd: ; CHECK-NEXT: stp d0, d1, [x0, #-8] Index: llvm/trunk/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll +++ llvm/trunk/test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -mcpu=bogus -o - %s +; RUN: llc < %s -mtriple=aarch64-eabi -mcpu=bogus ; Fix the bug in PR20557. Set mcpu to a bogus name, llc will crash in type ; legalization. Index: llvm/trunk/test/CodeGen/AArch64/lit.local.cfg =================================================================== --- llvm/trunk/test/CodeGen/AArch64/lit.local.cfg +++ llvm/trunk/test/CodeGen/AArch64/lit.local.cfg @@ -2,7 +2,3 @@ if not 'AArch64' in config.root.targets: config.unsupported = True - -# For now we don't test arm64-win32. -if re.search(r'cygwin|mingw32|win32|windows-gnu|windows-msvc', config.target_triple): - config.unsupported = True Index: llvm/trunk/test/CodeGen/AArch64/lower-range-metadata-func-call.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/lower-range-metadata-func-call.ll +++ llvm/trunk/test/CodeGen/AArch64/lower-range-metadata-func-call.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s ; and can be eliminated ; CHECK-LABEL: {{^}}test_call_known_max_range: Index: llvm/trunk/test/CodeGen/AArch64/memcpy-f128.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/memcpy-f128.ll +++ llvm/trunk/test/CodeGen/AArch64/memcpy-f128.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s %structA = type { i128 } @stubA = internal unnamed_addr constant %structA zeroinitializer, align 8 Index: llvm/trunk/test/CodeGen/AArch64/merge-store-dependency.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/merge-store-dependency.ll +++ llvm/trunk/test/CodeGen/AArch64/merge-store-dependency.ll @@ -1,4 +1,4 @@ -; RUN: llc -mcpu cortex-a53 -march aarch64 %s -o - | FileCheck %s --check-prefix=A53 +; RUN: llc < %s -mcpu cortex-a53 -mtriple=aarch64-eabi | FileCheck %s --check-prefix=A53 ; PR26827 - Merge stores causes wrong dependency. %struct1 = type { %struct1*, %struct1*, i32, i32, i16, i16, void (i32, i32, i8*)*, i8* } Index: llvm/trunk/test/CodeGen/AArch64/merge-store.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/merge-store.ll +++ llvm/trunk/test/CodeGen/AArch64/merge-store.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=aarch64-unknown-unknown %s -mcpu=cyclone -o - | FileCheck %s --check-prefix=CYCLONE --check-prefix=CHECK -; RUN: llc -march aarch64 %s -mattr=-slow-misaligned-128store -o - | FileCheck %s --check-prefix=MISALIGNED --check-prefix=CHECK +; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cyclone | FileCheck %s --check-prefix=CYCLONE --check-prefix=CHECK +; RUN: llc < %s -mtriple=aarch64-eabi -mattr=-slow-misaligned-128store | FileCheck %s --check-prefix=MISALIGNED --check-prefix=CHECK @g0 = external global <3 x float>, align 16 @g1 = external global <3 x float>, align 4 Index: llvm/trunk/test/CodeGen/AArch64/mul_pow2.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/mul_pow2.ll +++ llvm/trunk/test/CodeGen/AArch64/mul_pow2.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi | FileCheck %s ; Convert mul x, pow2 to shift. ; Convert mul x, pow2 +/- 1 to shift + add/sub. Index: llvm/trunk/test/CodeGen/AArch64/no-quad-ldp-stp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/no-quad-ldp-stp.ll +++ llvm/trunk/test/CodeGen/AArch64/no-quad-ldp-stp.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=aarch64 -mattr=+no-quad-ldst-pairs -verify-machineinstrs -asm-verbose=false | FileCheck %s -; RUN: llc < %s -march=aarch64 -mcpu=exynos-m1 -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+no-quad-ldst-pairs -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -mcpu=exynos-m1 -verify-machineinstrs -asm-verbose=false | FileCheck %s ; CHECK-LABEL: test_nopair_st ; CHECK: str Index: llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll +++ llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-eabi | FileCheck %s ; CHECK: mrs [[NZCV_SAVE:x[0-9]+]], NZCV ; CHECK: msr NZCV, [[NZCV_SAVE]] Index: llvm/trunk/test/CodeGen/AArch64/postra-mi-sched.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/postra-mi-sched.ll +++ llvm/trunk/test/CodeGen/AArch64/postra-mi-sched.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -O3 -march=aarch64 -mcpu=cortex-a53 | FileCheck %s +; RUN: llc < %s -O3 -mtriple=aarch64-eabi -mcpu=cortex-a53 | FileCheck %s ; With cortex-a53, each of fmul and fcvt have latency of 6 cycles. After the ; pre-RA MI scheduler, fmul, fcvt and fdiv will be consecutive. The top-down Index: llvm/trunk/test/CodeGen/AArch64/rem_crash.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/rem_crash.ll +++ llvm/trunk/test/CodeGen/AArch64/rem_crash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 +; RUN: llc < %s -mtriple=aarch64-eabi define i8 @test_minsize_uu8(i8 %x) minsize optsize { entry: Index: llvm/trunk/test/CodeGen/AArch64/tailmerging_in_mbp.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/tailmerging_in_mbp.ll +++ llvm/trunk/test/CodeGen/AArch64/tailmerging_in_mbp.ll @@ -1,4 +1,4 @@ -; RUN: llc <%s -march=aarch64 -verify-machine-dom-info | FileCheck %s +; RUN: llc <%s -mtriple=aarch64-eabi -verify-machine-dom-info | FileCheck %s ; CHECK-LABEL: test: ; CHECK: LBB0_7: Index: llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll +++ llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll @@ -1,4 +1,4 @@ -; RUN: llc -O1 -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -O1 -mtriple=aarch64-eabi | FileCheck %s declare void @t()