Index: test/CodeGen/AArch64/Redundantstore.ll =================================================================== --- test/CodeGen/AArch64/Redundantstore.ll +++ test/CodeGen/AArch64/Redundantstore.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @end_of_array = common global i8* null, align 8 Index: test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll =================================================================== --- test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll +++ test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll @@ -1,8 +1,7 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu ; Make sure we are not crashing on this test. target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" -target triple = "aarch64-unknown-linux-gnu" declare void @extern(i8*) Index: test/CodeGen/AArch64/aarch64-addv.ll =================================================================== --- test/CodeGen/AArch64/aarch64-addv.ll +++ test/CodeGen/AArch64/aarch64-addv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -aarch64-neon-syntax=generic < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-eabi -aarch64-neon-syntax=generic < %s | FileCheck %s define i8 @add_B(<16 x i8>* %arr) { ; CHECK-LABEL: add_B Index: test/CodeGen/AArch64/aarch64-minmaxv.ll =================================================================== --- test/CodeGen/AArch64/aarch64-minmaxv.ll +++ test/CodeGen/AArch64/aarch64-minmaxv.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=aarch64 -aarch64-neon-syntax=generic < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linu--gnu -aarch64-neon-syntax=generic | FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" -target triple = "aarch64-linu--gnu" ; CHECK-LABEL: smax_B ; CHECK: smaxv {{b[0-9]+}}, {{v[0-9]+}}.16b Index: test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll =================================================================== --- test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll +++ test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @foo(i64 %val) { ; CHECK: foo ; The stack frame store is not 64-bit aligned. Make sure we use an Index: test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll =================================================================== --- test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll +++ test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; The target lowering for integer comparisons was replacing some DAG nodes ; during operation legalization, which resulted in dangling pointers, Index: test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll =================================================================== --- test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll +++ test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i32 @foo(<4 x i32> %a, i32 %n) nounwind { ; CHECK-LABEL: foo: Index: test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll =================================================================== --- test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll +++ test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s ; @b = private unnamed_addr constant [3 x i32] [i32 1768775988, i32 1685481784, i32 1836253201], align 4 Index: test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll =================================================================== --- test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll +++ test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=arm64 -O0 -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -march=arm64 -O3 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -O0 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -O3 -verify-machineinstrs < %s | FileCheck %s @.str = private unnamed_addr constant [9 x i8] c"%lf %lu\0A\00", align 1 @.str1 = private unnamed_addr constant [8 x i8] c"%lf %u\0A\00", align 1 Index: test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll =================================================================== --- test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll +++ test/CodeGen/AArch64/arm64-2013-01-13-ffast-fcmp.ll @@ -1,8 +1,7 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -fp-contract=fast | FileCheck %s --check-prefix=FAST +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -aarch64-neon-syntax=apple -fp-contract=fast | FileCheck %s --check-prefix=FAST target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ;FAST-LABEL: _Z9example25v: ;FAST: fcmgt.4s Index: test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll =================================================================== --- test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll +++ test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; Make sure we are not crashing on this test. define void @autogen_SD13158() { Index: test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll =================================================================== --- test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll +++ test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; Make sure we are not crashing on this test. Index: test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll =================================================================== --- test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll +++ test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple ;CHECK-LABEL: Shuff: ;CHECK: tbl.8b Index: test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll =================================================================== --- test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll +++ test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll @@ -1,7 +1,7 @@ -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=apple -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=true | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-NOOPT +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic -aarch64-simd-scalar=true -asm-verbose=false -disable-adv-copy-opt=false | FileCheck %s -check-prefix=GENERIC -check-prefix=GENERIC-OPT define <2 x i64> @bar(<2 x i64> %a, <2 x i64> %b) nounwind readnone { ; CHECK-LABEL: bar: Index: test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll =================================================================== --- test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll +++ test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 +; RUN: llc < %s -mtriple=arm64-eabi ; This test case tests an infinite loop bug in DAG combiner. ; It just tries to do the following replacing endlessly: Index: test/CodeGen/AArch64/arm64-EXT-undef-mask.ll =================================================================== --- test/CodeGen/AArch64/arm64-EXT-undef-mask.ll +++ test/CodeGen/AArch64/arm64-EXT-undef-mask.ll @@ -1,4 +1,4 @@ -; RUN: llc -O0 -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -O0 -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs < %s | FileCheck %s ; The following 2 test cases test shufflevector with beginning UNDEF mask. define <8 x i16> @test_vext_undef_traverse(<8 x i16> %in) { Index: test/CodeGen/AArch64/arm64-abi-varargs.ll =================================================================== --- test/CodeGen/AArch64/arm64-abi-varargs.ll +++ test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -1,5 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s -target triple = "arm64-apple-ios7.0.0" +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 -mcpu=cyclone -enable-misched=false | FileCheck %s ; rdar://13625505 ; Here we have 9 fixed integer arguments the 9th argument in on stack, the Index: test/CodeGen/AArch64/arm64-abi_align.ll =================================================================== --- test/CodeGen/AArch64/arm64-abi_align.ll +++ test/CodeGen/AArch64/arm64-abi_align.ll @@ -1,6 +1,5 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false -disable-fp-elim | FileCheck %s -; RUN: llc < %s -O0 -disable-fp-elim | FileCheck -check-prefix=FAST %s -target triple = "arm64-apple-darwin" +; RUN: llc < %s -mtriple=arm64-apple-darwin -mcpu=cyclone -enable-misched=false -disable-fp-elim | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-darwin -O0 -disable-fp-elim | FileCheck -check-prefix=FAST %s ; rdar://12648441 ; Generated from arm64-arguments.c with -O2. Index: test/CodeGen/AArch64/arm64-addp.ll =================================================================== --- test/CodeGen/AArch64/arm64-addp.ll +++ test/CodeGen/AArch64/arm64-addp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s define double @foo(<2 x double> %a) nounwind { ; CHECK-LABEL: foo: Index: test/CodeGen/AArch64/arm64-addr-type-promotion.ll =================================================================== --- test/CodeGen/AArch64/arm64-addr-type-promotion.ll +++ test/CodeGen/AArch64/arm64-addr-type-promotion.ll @@ -1,9 +1,8 @@ -; RUN: llc -march arm64 < %s -aarch64-collect-loh=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-collect-loh=false | FileCheck %s ; rdar://13452552 ; Disable the collecting of LOH so that the labels do not get in the ; way of the NEXT patterns. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios3.0.0" @block = common global i8* null, align 8 Index: test/CodeGen/AArch64/arm64-addrmode.ll =================================================================== --- test/CodeGen/AArch64/arm64-addrmode.ll +++ test/CodeGen/AArch64/arm64-addrmode.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi < %s | FileCheck %s ; rdar://10232252 @object = external hidden global i64, section "__DATA, __objc_ivar", align 8 Index: test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll =================================================================== --- test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll +++ test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mcpu=cyclone < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -mcpu=cyclone < %s | FileCheck %s ; CHECK: foo ; CHECK: str w[[REG0:[0-9]+]], [x19, #264] Index: test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll =================================================================== --- test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll +++ test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll @@ -1,7 +1,6 @@ -; RUN: llc -O1 -march=arm64 -enable-andcmp-sinking=true < %s | FileCheck %s +; RUN: llc -O1 -mtriple=arm64-apple-ios7.0.0 -enable-andcmp-sinking=true < %s | FileCheck %s ; ModuleID = 'and-cbz-extr-mr.bc' target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, i8* %str1, i8* %str2, i8* %str3, i8* %str4, i8* %str5, i8* %str6, i8* %str7, i8* %str8, i8* %str9, i8* %str10, i8* %str11, i8* %str12, i8* %str13, i32 %int1, i8* %str14) unnamed_addr #0 align 2 { ; CHECK: _foo: Index: test/CodeGen/AArch64/arm64-arith-saturating.ll =================================================================== --- test/CodeGen/AArch64/arm64-arith-saturating.ll +++ test/CodeGen/AArch64/arm64-arith-saturating.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s define i32 @qadds(<4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp { ; CHECK-LABEL: qadds: Index: test/CodeGen/AArch64/arm64-arith.ll =================================================================== --- test/CodeGen/AArch64/arm64-arith.ll +++ test/CodeGen/AArch64/arm64-arith.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false | FileCheck %s define i32 @t1(i32 %a, i32 %b) nounwind readnone ssp { entry: Index: test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll =================================================================== --- test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll +++ test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=arm64 -aarch64-dead-def-elimination=false < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-ios7.0.0 -aarch64-dead-def-elimination=false < %s | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ; Function Attrs: nounwind ssp uwtable define i32 @test1() #0 { Index: test/CodeGen/AArch64/arm64-atomic-128.ll =================================================================== --- test/CodeGen/AArch64/arm64-atomic-128.ll +++ test/CodeGen/AArch64/arm64-atomic-128.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone | FileCheck %s @var = global i128 0 Index: test/CodeGen/AArch64/arm64-atomic.ll =================================================================== --- test/CodeGen/AArch64/arm64-atomic.ll +++ test/CodeGen/AArch64/arm64-atomic.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 { ; CHECK-LABEL: val_compare_and_swap: Index: test/CodeGen/AArch64/arm64-big-imm-offsets.ll =================================================================== --- test/CodeGen/AArch64/arm64-big-imm-offsets.ll +++ test/CodeGen/AArch64/arm64-big-imm-offsets.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s +; RUN: llc -mtriple=arm64-eabi < %s ; Make sure large offsets aren't mistaken for valid immediate offsets. Index: test/CodeGen/AArch64/arm64-bitfield-extract.ll =================================================================== --- test/CodeGen/AArch64/arm64-bitfield-extract.ll +++ test/CodeGen/AArch64/arm64-bitfield-extract.ll @@ -1,5 +1,5 @@ ; RUN: opt -codegenprepare -mtriple=arm64-apple=ios -S -o - %s | FileCheck --check-prefix=OPT %s -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s %struct.X = type { i8, i8, [2 x i8] } %struct.Y = type { i32, i8 } %struct.Z = type { i8, i8, [2 x i8], i16 } Index: test/CodeGen/AArch64/arm64-build-vector.ll =================================================================== --- test/CodeGen/AArch64/arm64-build-vector.ll +++ test/CodeGen/AArch64/arm64-build-vector.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; Check that building up a vector w/ only one non-zero lane initializes ; intelligently. Index: test/CodeGen/AArch64/arm64-builtins-linux.ll =================================================================== --- test/CodeGen/AArch64/arm64-builtins-linux.ll +++ test/CodeGen/AArch64/arm64-builtins-linux.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s ; Function Attrs: nounwind readnone declare i8* @llvm.thread.pointer() #1 Index: test/CodeGen/AArch64/arm64-cast-opt.ll =================================================================== --- test/CodeGen/AArch64/arm64-cast-opt.ll +++ test/CodeGen/AArch64/arm64-cast-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -march=arm64 -mtriple arm64-apple-ios5.0.0 < %s | FileCheck %s +; RUN: llc -O3 -mtriple arm64-apple-ios5.0.0 < %s | FileCheck %s ; ; Zero truncation is not necessary when the values are extended properly ; already. Index: test/CodeGen/AArch64/arm64-clrsb.ll =================================================================== --- test/CodeGen/AArch64/arm64-clrsb.ll +++ test/CodeGen/AArch64/arm64-clrsb.ll @@ -1,7 +1,6 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ; Function Attrs: nounwind readnone declare i32 @llvm.ctlz.i32(i32, i1) #0 Index: test/CodeGen/AArch64/arm64-coalesce-ext.ll =================================================================== --- test/CodeGen/AArch64/arm64-coalesce-ext.ll +++ test/CodeGen/AArch64/arm64-coalesce-ext.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-darwin < %s | FileCheck %s +; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s ; Check that the peephole optimizer knows about sext and zext instructions. ; CHECK: test1sext define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind { Index: test/CodeGen/AArch64/arm64-complex-ret.ll =================================================================== --- test/CodeGen/AArch64/arm64-complex-ret.ll +++ test/CodeGen/AArch64/arm64-complex-ret.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -o - %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -o - %s | FileCheck %s define { i192, i192, i21, i192 } @foo(i192) { ; CHECK-LABEL: foo: Index: test/CodeGen/AArch64/arm64-convert-v4f64.ll =================================================================== --- test/CodeGen/AArch64/arm64-convert-v4f64.ll +++ test/CodeGen/AArch64/arm64-convert-v4f64.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) { Index: test/CodeGen/AArch64/arm64-crc32.ll =================================================================== --- test/CodeGen/AArch64/arm64-crc32.ll +++ test/CodeGen/AArch64/arm64-crc32.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mattr=+crc -o - %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -mattr=+crc -o - %s | FileCheck %s define i32 @test_crc32b(i32 %cur, i8 %next) { ; CHECK-LABEL: test_crc32b: Index: test/CodeGen/AArch64/arm64-crypto.ll =================================================================== --- test/CodeGen/AArch64/arm64-crypto.ll +++ test/CodeGen/AArch64/arm64-crypto.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mattr=crypto -aarch64-neon-syntax=apple -o - %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi -mattr=crypto -aarch64-neon-syntax=apple -o - %s | FileCheck %s declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data, <16 x i8> %key) declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data, <16 x i8> %key) Index: test/CodeGen/AArch64/arm64-cvt.ll =================================================================== --- test/CodeGen/AArch64/arm64-cvt.ll +++ test/CodeGen/AArch64/arm64-cvt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; ; Floating-point scalar convert to signed integer (to nearest with ties to away) Index: test/CodeGen/AArch64/arm64-dead-def-frame-index.ll =================================================================== --- test/CodeGen/AArch64/arm64-dead-def-frame-index.ll +++ test/CodeGen/AArch64/arm64-dead-def-frame-index.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios7.0.0 | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" -target triple = "arm64-apple-ios7.0.0" ; Function Attrs: nounwind ssp uwtable define i32 @test1() #0 { Index: test/CodeGen/AArch64/arm64-dup.ll =================================================================== --- test/CodeGen/AArch64/arm64-dup.ll +++ test/CodeGen/AArch64/arm64-dup.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define <8 x i8> @v_dup8(i8 %A) nounwind { ;CHECK-LABEL: v_dup8: Index: test/CodeGen/AArch64/arm64-ext.ll =================================================================== --- test/CodeGen/AArch64/arm64-ext.ll +++ test/CodeGen/AArch64/arm64-ext.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextd: Index: test/CodeGen/AArch64/arm64-extend-int-to-fp.ll =================================================================== --- test/CodeGen/AArch64/arm64-extend-int-to-fp.ll +++ test/CodeGen/AArch64/arm64-extend-int-to-fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <4 x float> @foo(<4 x i16> %a) nounwind { ; CHECK-LABEL: foo: Index: test/CodeGen/AArch64/arm64-extload-knownzero.ll =================================================================== --- test/CodeGen/AArch64/arm64-extload-knownzero.ll +++ test/CodeGen/AArch64/arm64-extload-knownzero.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; rdar://12771555 define void @foo(i16* %ptr, i32 %a) nounwind { Index: test/CodeGen/AArch64/arm64-extract.ll =================================================================== --- test/CodeGen/AArch64/arm64-extract.ll +++ test/CodeGen/AArch64/arm64-extract.ll @@ -1,5 +1,4 @@ -; RUN: llc -verify-machineinstrs < %s \ -; RUN: -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s define i64 @ror_i64(i64 %in) { ; CHECK-LABEL: ror_i64: Index: test/CodeGen/AArch64/arm64-extract_subvector.ll =================================================================== --- test/CodeGen/AArch64/arm64-extract_subvector.ll +++ test/CodeGen/AArch64/arm64-extract_subvector.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; Extract of an upper half of a vector is an "ext.16b v0, v0, v0, #8" insn. Index: test/CodeGen/AArch64/arm64-fastcc-tailcall.ll =================================================================== --- test/CodeGen/AArch64/arm64-fastcc-tailcall.ll +++ test/CodeGen/AArch64/arm64-fastcc-tailcall.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @caller(i32* nocapture %p, i32 %a, i32 %b) nounwind optsize ssp { ; CHECK-NOT: stp Index: test/CodeGen/AArch64/arm64-fcmp-opt.ll =================================================================== --- test/CodeGen/AArch64/arm64-fcmp-opt.ll +++ test/CodeGen/AArch64/arm64-fcmp-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s ; rdar://10263824 define i1 @fcmp_float1(float %a) nounwind ssp { Index: test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll =================================================================== --- test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll +++ test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; DAGCombine to transform a conversion of an extract_vector_elt to an ; extract_vector_elt of a conversion, which saves a round trip of copies Index: test/CodeGen/AArch64/arm64-fmadd.ll =================================================================== --- test/CodeGen/AArch64/arm64-fmadd.ll +++ test/CodeGen/AArch64/arm64-fmadd.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc -mtriple=arm64-eabi < %s | FileCheck %s define float @fma32(float %a, float %b, float %c) nounwind readnone ssp { entry: Index: test/CodeGen/AArch64/arm64-fmax-safe.ll =================================================================== --- test/CodeGen/AArch64/arm64-fmax-safe.ll +++ test/CodeGen/AArch64/arm64-fmax-safe.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define double @test_direct(float %in) { ; CHECK-LABEL: test_direct: Index: test/CodeGen/AArch64/arm64-fmax.ll =================================================================== --- test/CodeGen/AArch64/arm64-fmax.ll +++ test/CodeGen/AArch64/arm64-fmax.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -enable-no-nans-fp-math < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-no-nans-fp-math | FileCheck %s define double @test_direct(float %in) { ; CHECK-LABEL: test_direct: Index: test/CodeGen/AArch64/arm64-fmuladd.ll =================================================================== --- test/CodeGen/AArch64/arm64-fmuladd.ll +++ test/CodeGen/AArch64/arm64-fmuladd.ll @@ -1,4 +1,4 @@ -; RUN: llc -asm-verbose=false < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -asm-verbose=false -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define float @test_f32(float* %A, float* %B, float* %C) nounwind { ;CHECK-LABEL: test_f32: Index: test/CodeGen/AArch64/arm64-fold-lsl.ll =================================================================== --- test/CodeGen/AArch64/arm64-fold-lsl.ll +++ test/CodeGen/AArch64/arm64-fold-lsl.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; ; Index: test/CodeGen/AArch64/arm64-fp.ll =================================================================== --- test/CodeGen/AArch64/arm64-fp.ll +++ test/CodeGen/AArch64/arm64-fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define float @t1(i1 %a, float %b, float %c) nounwind { ; CHECK: t1 Index: test/CodeGen/AArch64/arm64-fp128-folding.ll =================================================================== --- test/CodeGen/AArch64/arm64-fp128-folding.ll +++ test/CodeGen/AArch64/arm64-fp128-folding.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s declare void @bar(i8*, i8*, i32*) ; SelectionDAG used to try to fold some fp128 operations using the ppc128 type, Index: test/CodeGen/AArch64/arm64-frame-index.ll =================================================================== --- test/CodeGen/AArch64/arm64-frame-index.ll +++ test/CodeGen/AArch64/arm64-frame-index.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-atomic-cfg-tidy=0 | FileCheck %s ; rdar://11935841 define void @t1() nounwind ssp { Index: test/CodeGen/AArch64/arm64-i16-subreg-extract.ll =================================================================== --- test/CodeGen/AArch64/arm64-i16-subreg-extract.ll +++ test/CodeGen/AArch64/arm64-i16-subreg-extract.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define i32 @foo(<4 x i16>* %__a) nounwind { ; CHECK-LABEL: foo: Index: test/CodeGen/AArch64/arm64-icmp-opt.ll =================================================================== --- test/CodeGen/AArch64/arm64-icmp-opt.ll +++ test/CodeGen/AArch64/arm64-icmp-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; Optimize (x > -1) to (x >= 0) etc. ; Optimize (cmp (add / sub), 0): eliminate the subs used to update flag Index: test/CodeGen/AArch64/arm64-indexed-memory.ll =================================================================== --- test/CodeGen/AArch64/arm64-indexed-memory.ll +++ test/CodeGen/AArch64/arm64-indexed-memory.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-redzone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-redzone | FileCheck %s define void @store64(i64** nocapture %out, i64 %index, i64 %spacing) nounwind noinline ssp { ; CHECK-LABEL: store64: Index: test/CodeGen/AArch64/arm64-inline-asm-error-I.ll =================================================================== --- test/CodeGen/AArch64/arm64-inline-asm-error-I.ll +++ test/CodeGen/AArch64/arm64-inline-asm-error-I.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: test/CodeGen/AArch64/arm64-inline-asm-error-J.ll =================================================================== --- test/CodeGen/AArch64/arm64-inline-asm-error-J.ll +++ test/CodeGen/AArch64/arm64-inline-asm-error-J.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: test/CodeGen/AArch64/arm64-inline-asm-error-K.ll =================================================================== --- test/CodeGen/AArch64/arm64-inline-asm-error-K.ll +++ test/CodeGen/AArch64/arm64-inline-asm-error-K.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: test/CodeGen/AArch64/arm64-inline-asm-error-L.ll =================================================================== --- test/CodeGen/AArch64/arm64-inline-asm-error-L.ll +++ test/CodeGen/AArch64/arm64-inline-asm-error-L.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: test/CodeGen/AArch64/arm64-inline-asm-error-M.ll =================================================================== --- test/CodeGen/AArch64/arm64-inline-asm-error-M.ll +++ test/CodeGen/AArch64/arm64-inline-asm-error-M.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: test/CodeGen/AArch64/arm64-inline-asm-error-N.ll =================================================================== --- test/CodeGen/AArch64/arm64-inline-asm-error-N.ll +++ test/CodeGen/AArch64/arm64-inline-asm-error-N.ll @@ -1,4 +1,4 @@ -; RUN: not llc -march=arm64 < %s 2> %t +; RUN: not llc -mtriple=arm64-eabi < %s 2> %t ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s ; Check for at least one invalid constant. Index: test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll =================================================================== --- test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll +++ test/CodeGen/AArch64/arm64-inline-asm-zero-reg-error.ll @@ -1,4 +1,4 @@ -; RUN: not llc < %s -march=arm64 2>&1 | FileCheck %s +; RUN: not llc < %s -mtriple=arm64-eabi 2>&1 | FileCheck %s ; The 'z' constraint allocates either xzr or wzr, but obviously an input of 1 is Index: test/CodeGen/AArch64/arm64-ld1.ll =================================================================== --- test/CodeGen/AArch64/arm64-ld1.ll +++ test/CodeGen/AArch64/arm64-ld1.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs -asm-verbose=false | FileCheck %s %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } Index: test/CodeGen/AArch64/arm64-ldp-aa.ll =================================================================== --- test/CodeGen/AArch64/arm64-ldp-aa.ll +++ test/CodeGen/AArch64/arm64-ldp-aa.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -enable-misched=false -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -verify-machineinstrs | FileCheck %s ; The next set of tests makes sure we can combine the second instruction into ; the first. Index: test/CodeGen/AArch64/arm64-ldp.ll =================================================================== --- test/CodeGen/AArch64/arm64-ldp.ll +++ test/CodeGen/AArch64/arm64-ldp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s ; CHECK-LABEL: ldp_int ; CHECK: ldp Index: test/CodeGen/AArch64/arm64-ldur.ll =================================================================== --- test/CodeGen/AArch64/arm64-ldur.ll +++ test/CodeGen/AArch64/arm64-ldur.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i64 @_f0(i64* %p) { ; CHECK: f0: Index: test/CodeGen/AArch64/arm64-leaf.ll =================================================================== --- test/CodeGen/AArch64/arm64-leaf.ll +++ test/CodeGen/AArch64/arm64-leaf.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-ios < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s ; rdar://12829704 define void @t8() nounwind ssp { Index: test/CodeGen/AArch64/arm64-long-shift.ll =================================================================== --- test/CodeGen/AArch64/arm64-long-shift.ll +++ test/CodeGen/AArch64/arm64-long-shift.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s define i128 @shl(i128 %r, i128 %s) nounwind readnone { ; CHECK-LABEL: shl: Index: test/CodeGen/AArch64/arm64-memcpy-inline.ll =================================================================== --- test/CodeGen/AArch64/arm64-memcpy-inline.ll +++ test/CodeGen/AArch64/arm64-memcpy-inline.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 } Index: test/CodeGen/AArch64/arm64-memset-inline.ll =================================================================== --- test/CodeGen/AArch64/arm64-memset-inline.ll +++ test/CodeGen/AArch64/arm64-memset-inline.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @t1(i8* nocapture %c) nounwind optsize { entry: Index: test/CodeGen/AArch64/arm64-movi.ll =================================================================== --- test/CodeGen/AArch64/arm64-movi.ll +++ test/CodeGen/AArch64/arm64-movi.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ;==--------------------------------------------------------------------------== ; Tests for MOV-immediate implemented with ORR-immediate. Index: test/CodeGen/AArch64/arm64-mul.ll =================================================================== --- test/CodeGen/AArch64/arm64-mul.ll +++ test/CodeGen/AArch64/arm64-mul.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; rdar://9296808 ; rdar://9349137 Index: test/CodeGen/AArch64/arm64-neon-v8.1a.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-v8.1a.ll +++ test/CodeGen/AArch64/arm64-neon-v8.1a.ll @@ -1,6 +1,6 @@ -; RUN: llc < %s -verify-machineinstrs -march=arm64 -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V8a -; RUN: llc < %s -verify-machineinstrs -march=arm64 -mattr=+v8.1a -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V81a -; RUN: llc < %s -verify-machineinstrs -march=arm64 -mattr=+v8.1a -aarch64-neon-syntax=apple | FileCheck %s --check-prefix=CHECK-V81a-apple +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V8a +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -mattr=+v8.1a -aarch64-neon-syntax=generic | FileCheck %s --check-prefix=CHECK-V81a +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi -mattr=+v8.1a -aarch64-neon-syntax=apple | FileCheck %s --check-prefix=CHECK-V81a-apple declare <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16>, <4 x i16>) declare <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16>, <8 x i16>) Index: test/CodeGen/AArch64/arm64-popcnt.ll =================================================================== --- test/CodeGen/AArch64/arm64-popcnt.ll +++ test/CodeGen/AArch64/arm64-popcnt.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -march=aarch64 -mattr -neon -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-NONEON %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -mattr -neon -aarch64-neon-syntax=apple | FileCheck -check-prefix=CHECK-NONEON %s define i32 @cnt32_advsimd(i32 %x) nounwind readnone { %cnt = tail call i32 @llvm.ctpop.i32(i32 %x) Index: test/CodeGen/AArch64/arm64-prefetch.ll =================================================================== --- test/CodeGen/AArch64/arm64-prefetch.ll +++ test/CodeGen/AArch64/arm64-prefetch.ll @@ -1,4 +1,4 @@ -; RUN: llc %s -march arm64 -o - | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s @a = common global i32* null, align 8 Index: test/CodeGen/AArch64/arm64-redzone.ll =================================================================== --- test/CodeGen/AArch64/arm64-redzone.ll +++ test/CodeGen/AArch64/arm64-redzone.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-redzone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-redzone | FileCheck %s define i32 @foo(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: foo: Index: test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll =================================================================== --- test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll +++ test/CodeGen/AArch64/arm64-regress-f128csel-flags.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s ; We used to not mark NZCV as being used in the continuation basic-block ; when lowering a 128-bit "select" to branches. This meant a subsequent use Index: test/CodeGen/AArch64/arm64-regress-interphase-shift.ll =================================================================== --- test/CodeGen/AArch64/arm64-regress-interphase-shift.ll +++ test/CodeGen/AArch64/arm64-regress-interphase-shift.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -o - %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; This is mostly a "don't assert" test. The type of the RHS of a shift depended ; on the phase of legalization, which led to the creation of an unexpected and Index: test/CodeGen/AArch64/arm64-return-vector.ll =================================================================== --- test/CodeGen/AArch64/arm64-return-vector.ll +++ test/CodeGen/AArch64/arm64-return-vector.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; 2x64 vector should be returned in Q0. Index: test/CodeGen/AArch64/arm64-returnaddr.ll =================================================================== --- test/CodeGen/AArch64/arm64-returnaddr.ll +++ test/CodeGen/AArch64/arm64-returnaddr.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i8* @rt0(i32 %x) nounwind readnone { entry: Index: test/CodeGen/AArch64/arm64-rev.ll =================================================================== --- test/CodeGen/AArch64/arm64-rev.ll +++ test/CodeGen/AArch64/arm64-rev.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define i32 @test_rev_w(i32 %a) nounwind { entry: Index: test/CodeGen/AArch64/arm64-scvt.ll =================================================================== --- test/CodeGen/AArch64/arm64-scvt.ll +++ test/CodeGen/AArch64/arm64-scvt.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -march=arm64 -mcpu=cortex-a57 | FileCheck --check-prefix=CHECK-A57 %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cortex-a57 | FileCheck --check-prefix=CHECK-A57 %s ; rdar://13082402 define float @t1(i32* nocapture %src) nounwind ssp { Index: test/CodeGen/AArch64/arm64-shifted-sext.ll =================================================================== --- test/CodeGen/AArch64/arm64-shifted-sext.ll +++ test/CodeGen/AArch64/arm64-shifted-sext.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mtriple=arm64-apple-ios < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s ; ; Index: test/CodeGen/AArch64/arm64-shrink-v1i64.ll =================================================================== --- test/CodeGen/AArch64/arm64-shrink-v1i64.ll +++ test/CodeGen/AArch64/arm64-shrink-v1i64.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s +; RUN: llc < %s -mtriple=arm64-eabi ; The DAGCombiner tries to do following shrink: ; Convert x+y to (VT)((SmallVT)x+(SmallVT)y) Index: test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll =================================================================== --- test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll +++ test/CodeGen/AArch64/arm64-simd-scalar-to-vector.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -O0 -mcpu=cyclone | FileCheck %s --check-prefix=CHECK-FAST define <16 x i8> @foo(<16 x i8> %a) nounwind optsize readnone ssp { ; CHECK: uaddlv.16b h0, v0 Index: test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll =================================================================== --- test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll +++ test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -o - %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; ARM64ISelLowering.cpp was creating a new (floating-point) load for efficiency ; but not updating chain-successors of the old one. As a result, the two memory Index: test/CodeGen/AArch64/arm64-sli-sri-opt.ll =================================================================== --- test/CodeGen/AArch64/arm64-sli-sri-opt.ll +++ test/CodeGen/AArch64/arm64-sli-sri-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -aarch64-shift-insert-generation=true -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -aarch64-shift-insert-generation=true -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define void @testLeftGood(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind { ; CHECK-LABEL: testLeftGood: Index: test/CodeGen/AArch64/arm64-smaxv.ll =================================================================== --- test/CodeGen/AArch64/arm64-smaxv.ll +++ test/CodeGen/AArch64/arm64-smaxv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define signext i8 @test_vmaxv_s8(<8 x i8> %a1) { ; CHECK: test_vmaxv_s8 Index: test/CodeGen/AArch64/arm64-sminv.ll =================================================================== --- test/CodeGen/AArch64/arm64-sminv.ll +++ test/CodeGen/AArch64/arm64-sminv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define signext i8 @test_vminv_s8(<8 x i8> %a1) { ; CHECK: test_vminv_s8 Index: test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll =================================================================== --- test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll +++ test/CodeGen/AArch64/arm64-sqshl-uqshl-i64Contant.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -verify-machineinstrs -march=arm64 | FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-eabi | FileCheck %s ; Check if sqshl/uqshl with constant shift amout can be selected. define i64 @test_vqshld_s64_i(i64 %a) { Index: test/CodeGen/AArch64/arm64-st1.ll =================================================================== --- test/CodeGen/AArch64/arm64-st1.ll +++ test/CodeGen/AArch64/arm64-st1.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s define void @st1lane_16b(<16 x i8> %A, i8* %D) { ; CHECK-LABEL: st1lane_16b Index: test/CodeGen/AArch64/arm64-stp-aa.ll =================================================================== --- test/CodeGen/AArch64/arm64-stp-aa.ll +++ test/CodeGen/AArch64/arm64-stp-aa.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -enable-misched=false -aarch64-stp-suppress=false -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -aarch64-stp-suppress=false -verify-machineinstrs | FileCheck %s ; The next set of tests makes sure we can combine the second instruction into ; the first. Index: test/CodeGen/AArch64/arm64-stp.ll =================================================================== --- test/CodeGen/AArch64/arm64-stp.ll +++ test/CodeGen/AArch64/arm64-stp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s ; CHECK-LABEL: stp_int ; CHECK: stp w0, w1, [x2] Index: test/CodeGen/AArch64/arm64-stur.ll =================================================================== --- test/CodeGen/AArch64/arm64-stur.ll +++ test/CodeGen/AArch64/arm64-stur.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s %struct.X = type <{ i32, i64, i64 }> define void @foo1(i32* %p, i64 %val) nounwind { Index: test/CodeGen/AArch64/arm64-subvector-extend.ll =================================================================== --- test/CodeGen/AArch64/arm64-subvector-extend.ll +++ test/CodeGen/AArch64/arm64-subvector-extend.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s ; Test efficient codegen of vector extends up from legal type to 128 bit ; and 256 bit vector types. Index: test/CodeGen/AArch64/arm64-tbl.ll =================================================================== --- test/CodeGen/AArch64/arm64-tbl.ll +++ test/CodeGen/AArch64/arm64-tbl.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @tbl1_8b(<16 x i8> %A, <8 x i8> %B) nounwind { ; CHECK: tbl1_8b Index: test/CodeGen/AArch64/arm64-this-return.ll =================================================================== --- test/CodeGen/AArch64/arm64-this-return.ll +++ test/CodeGen/AArch64/arm64-this-return.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s %struct.A = type { i8 } %struct.B = type { i32 } Index: test/CodeGen/AArch64/arm64-trap.ll =================================================================== --- test/CodeGen/AArch64/arm64-trap.ll +++ test/CodeGen/AArch64/arm64-trap.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define void @foo() nounwind { ; CHECK: foo ; CHECK: brk #0x1 Index: test/CodeGen/AArch64/arm64-trn.ll =================================================================== --- test/CodeGen/AArch64/arm64-trn.ll +++ test/CodeGen/AArch64/arm64-trn.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vtrni8: Index: test/CodeGen/AArch64/arm64-umaxv.ll =================================================================== --- test/CodeGen/AArch64/arm64-umaxv.ll +++ test/CodeGen/AArch64/arm64-umaxv.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define i32 @vmax_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: vmax_u8x8: Index: test/CodeGen/AArch64/arm64-uminv.ll =================================================================== --- test/CodeGen/AArch64/arm64-uminv.ll +++ test/CodeGen/AArch64/arm64-uminv.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp { ; CHECK-LABEL: vmin_u8x8: Index: test/CodeGen/AArch64/arm64-umov.ll =================================================================== --- test/CodeGen/AArch64/arm64-umov.ll +++ test/CodeGen/AArch64/arm64-umov.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define zeroext i8 @f1(<16 x i8> %a) { ; CHECK-LABEL: f1: Index: test/CodeGen/AArch64/arm64-unaligned_ldst.ll =================================================================== --- test/CodeGen/AArch64/arm64-unaligned_ldst.ll +++ test/CodeGen/AArch64/arm64-unaligned_ldst.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s ; rdar://r11231896 define void @t1(i8* nocapture %a, i8* nocapture %b) nounwind { Index: test/CodeGen/AArch64/arm64-uzp.ll =================================================================== --- test/CodeGen/AArch64/arm64-uzp.ll +++ test/CodeGen/AArch64/arm64-uzp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vuzpi8: Index: test/CodeGen/AArch64/arm64-vaargs.ll =================================================================== --- test/CodeGen/AArch64/arm64-vaargs.ll +++ test/CodeGen/AArch64/arm64-vaargs.ll @@ -1,6 +1,5 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-darwin11.0.0 | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64" -target triple = "arm64-apple-darwin11.0.0" define float @t1(i8* nocapture %fmt, ...) nounwind ssp { entry: Index: test/CodeGen/AArch64/arm64-vabs.ll =================================================================== --- test/CodeGen/AArch64/arm64-vabs.ll +++ test/CodeGen/AArch64/arm64-vabs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i16> @sabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind { Index: test/CodeGen/AArch64/arm64-vadd.ll =================================================================== --- test/CodeGen/AArch64/arm64-vadd.ll +++ test/CodeGen/AArch64/arm64-vadd.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define <8 x i8> @addhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind { ;CHECK-LABEL: addhn8b: Index: test/CodeGen/AArch64/arm64-vaddlv.ll =================================================================== --- test/CodeGen/AArch64/arm64-vaddlv.ll +++ test/CodeGen/AArch64/arm64-vaddlv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define i64 @test_vaddlv_s32(<2 x i32> %a1) nounwind readnone { ; CHECK: test_vaddlv_s32 Index: test/CodeGen/AArch64/arm64-vaddv.ll =================================================================== --- test/CodeGen/AArch64/arm64-vaddv.ll +++ test/CodeGen/AArch64/arm64-vaddv.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -asm-verbose=false -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false -mcpu=cyclone | FileCheck %s define signext i8 @test_vaddv_s8(<8 x i8> %a1) { ; CHECK-LABEL: test_vaddv_s8: Index: test/CodeGen/AArch64/arm64-vbitwise.ll =================================================================== --- test/CodeGen/AArch64/arm64-vbitwise.ll +++ test/CodeGen/AArch64/arm64-vbitwise.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @rbit_8b(<8 x i8>* %A) nounwind { ;CHECK-LABEL: rbit_8b: Index: test/CodeGen/AArch64/arm64-vclz.ll =================================================================== --- test/CodeGen/AArch64/arm64-vclz.ll +++ test/CodeGen/AArch64/arm64-vclz.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @test_vclz_u8(<8 x i8> %a) nounwind readnone ssp { ; CHECK-LABEL: test_vclz_u8: Index: test/CodeGen/AArch64/arm64-vcmp.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcmp.ll +++ test/CodeGen/AArch64/arm64-vcmp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define void @fcmltz_4s(<4 x float> %a, <4 x i16>* %p) nounwind { Index: test/CodeGen/AArch64/arm64-vcnt.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcnt.ll +++ test/CodeGen/AArch64/arm64-vcnt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @cls_8b(<8 x i8>* %A) nounwind { ;CHECK-LABEL: cls_8b: Index: test/CodeGen/AArch64/arm64-vcombine.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcombine.ll +++ test/CodeGen/AArch64/arm64-vcombine.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; LowerCONCAT_VECTORS() was reversing the order of two parts. ; rdar://11558157 Index: test/CodeGen/AArch64/arm64-vcvt.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcvt.ll +++ test/CodeGen/AArch64/arm64-vcvt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind { ;CHECK-LABEL: fcvtas_2s: Index: test/CodeGen/AArch64/arm64-vcvt_f.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcvt_f.ll +++ test/CodeGen/AArch64/arm64-vcvt_f.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s -; RUN: llc < %s -O0 -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -O0 -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x double> @test_vcvt_f64_f32(<2 x float> %x) nounwind readnone ssp { ; CHECK-LABEL: test_vcvt_f64_f32: Index: test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll +++ test/CodeGen/AArch64/arm64-vcvt_f32_su32.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @ucvt(<2 x i32> %a) nounwind readnone ssp { ; CHECK-LABEL: ucvt: Index: test/CodeGen/AArch64/arm64-vcvt_n.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcvt_n.ll +++ test/CodeGen/AArch64/arm64-vcvt_n.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @cvtf32fxpu(<2 x i32> %a) nounwind readnone ssp { ; CHECK-LABEL: cvtf32fxpu: Index: test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll +++ test/CodeGen/AArch64/arm64-vcvt_su32_f32.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x i32> @c1(<2 x float> %a) nounwind readnone ssp { ; CHECK: c1 Index: test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll =================================================================== --- test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll +++ test/CodeGen/AArch64/arm64-vcvtxd_f32_f64.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define float @fcvtxn(double %a) { ; CHECK-LABEL: fcvtxn: Index: test/CodeGen/AArch64/arm64-vecCmpBr.ll =================================================================== --- test/CodeGen/AArch64/arm64-vecCmpBr.ll +++ test/CodeGen/AArch64/arm64-vecCmpBr.ll @@ -1,7 +1,6 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-apple-ios3.0.0 -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s ; ModuleID = 'arm64_vecCmpBr.c' target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128" -target triple = "arm64-apple-ios3.0.0" define i32 @anyZero64(<4 x i16> %a) #0 { Index: test/CodeGen/AArch64/arm64-vecFold.ll =================================================================== --- test/CodeGen/AArch64/arm64-vecFold.ll +++ test/CodeGen/AArch64/arm64-vecFold.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple -o - %s| FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <16 x i8> @foov16i8(<8 x i16> %a0, <8 x i16> %b0) nounwind readnone ssp { ; CHECK-LABEL: foov16i8: Index: test/CodeGen/AArch64/arm64-vector-ext.ll =================================================================== --- test/CodeGen/AArch64/arm64-vector-ext.ll +++ test/CodeGen/AArch64/arm64-vector-ext.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ;CHECK: @func30 ;CHECK: movi.4h v1, #1 Index: test/CodeGen/AArch64/arm64-vector-imm.ll =================================================================== --- test/CodeGen/AArch64/arm64-vector-imm.ll +++ test/CodeGen/AArch64/arm64-vector-imm.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_orrimm: Index: test/CodeGen/AArch64/arm64-vector-insertion.ll =================================================================== --- test/CodeGen/AArch64/arm64-vector-insertion.ll +++ test/CodeGen/AArch64/arm64-vector-insertion.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -mcpu=generic -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -mcpu=generic -aarch64-neon-syntax=apple | FileCheck %s define void @test0f(float* nocapture %x, float %a) #0 { entry: Index: test/CodeGen/AArch64/arm64-vector-ldst.ll =================================================================== --- test/CodeGen/AArch64/arm64-vector-ldst.ll +++ test/CodeGen/AArch64/arm64-vector-ldst.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s ; rdar://9428579 Index: test/CodeGen/AArch64/arm64-vext.ll =================================================================== --- test/CodeGen/AArch64/arm64-vext.ll +++ test/CodeGen/AArch64/arm64-vext.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define void @test_vext_s8() nounwind ssp { ; CHECK-LABEL: test_vext_s8: Index: test/CodeGen/AArch64/arm64-vfloatintrinsics.ll =================================================================== --- test/CodeGen/AArch64/arm64-vfloatintrinsics.ll +++ test/CodeGen/AArch64/arm64-vfloatintrinsics.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ;;; Float vectors Index: test/CodeGen/AArch64/arm64-vhadd.ll =================================================================== --- test/CodeGen/AArch64/arm64-vhadd.ll +++ test/CodeGen/AArch64/arm64-vhadd.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @shadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: shadd8b: Index: test/CodeGen/AArch64/arm64-vhsub.ll =================================================================== --- test/CodeGen/AArch64/arm64-vhsub.ll +++ test/CodeGen/AArch64/arm64-vhsub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @shsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: shsub8b: Index: test/CodeGen/AArch64/arm64-vmax.ll =================================================================== --- test/CodeGen/AArch64/arm64-vmax.ll +++ test/CodeGen/AArch64/arm64-vmax.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @smax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: smax_8b: @@ -244,7 +244,7 @@ declare <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32>, <2 x i32>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @smaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: smaxp_8b: @@ -368,7 +368,7 @@ declare <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone declare <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @sminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sminp_8b: Index: test/CodeGen/AArch64/arm64-vminmaxnm.ll =================================================================== --- test/CodeGen/AArch64/arm64-vminmaxnm.ll +++ test/CodeGen/AArch64/arm64-vminmaxnm.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @f1(<2 x float> %a, <2 x float> %b) nounwind readnone ssp { ; CHECK: fmaxnm.2s v0, v0, v1 Index: test/CodeGen/AArch64/arm64-vmovn.ll =================================================================== --- test/CodeGen/AArch64/arm64-vmovn.ll +++ test/CodeGen/AArch64/arm64-vmovn.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @xtn8b(<8 x i16> %A) nounwind { ;CHECK-LABEL: xtn8b: Index: test/CodeGen/AArch64/arm64-vmul.ll =================================================================== --- test/CodeGen/AArch64/arm64-vmul.ll +++ test/CodeGen/AArch64/arm64-vmul.ll @@ -1,4 +1,4 @@ -; RUN: llc -asm-verbose=false < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -asm-verbose=false -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i16> @smull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind { Index: test/CodeGen/AArch64/arm64-volatile.ll =================================================================== --- test/CodeGen/AArch64/arm64-volatile.ll +++ test/CodeGen/AArch64/arm64-volatile.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i64 @normal_load(i64* nocapture %bar) nounwind readonly { ; CHECK: normal_load ; CHECK: ldp Index: test/CodeGen/AArch64/arm64-vpopcnt.ll =================================================================== --- test/CodeGen/AArch64/arm64-vpopcnt.ll +++ test/CodeGen/AArch64/arm64-vpopcnt.ll @@ -1,5 +1,4 @@ -; RUN: llc < %s -march=arm64 -mcpu=cyclone | FileCheck %s -target triple = "arm64-apple-ios" +; RUN: llc < %s -mtriple=arm64-apple-ios -mcpu=cyclone | FileCheck %s ; The non-byte ones used to fail with "Cannot select" Index: test/CodeGen/AArch64/arm64-vqadd.ll =================================================================== --- test/CodeGen/AArch64/arm64-vqadd.ll +++ test/CodeGen/AArch64/arm64-vqadd.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @sqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sqadd8b: Index: test/CodeGen/AArch64/arm64-vqsub.ll =================================================================== --- test/CodeGen/AArch64/arm64-vqsub.ll +++ test/CodeGen/AArch64/arm64-vqsub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @sqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sqsub8b: Index: test/CodeGen/AArch64/arm64-vselect.ll =================================================================== --- test/CodeGen/AArch64/arm64-vselect.ll +++ test/CodeGen/AArch64/arm64-vselect.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ;CHECK: @func63 ;CHECK: cmeq.4h v0, v0, v1 Index: test/CodeGen/AArch64/arm64-vsetcc_fp.ll =================================================================== --- test/CodeGen/AArch64/arm64-vsetcc_fp.ll +++ test/CodeGen/AArch64/arm64-vsetcc_fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s define <2 x i32> @fcmp_one(<2 x float> %x, <2 x float> %y) nounwind optsize readnone { ; CHECK-LABEL: fcmp_one: ; CHECK-NEXT: fcmgt.2s [[REG:v[0-9]+]], v0, v1 Index: test/CodeGen/AArch64/arm64-vshift.ll =================================================================== --- test/CodeGen/AArch64/arm64-vshift.ll +++ test/CodeGen/AArch64/arm64-vshift.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -enable-misched=false | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -enable-misched=false | FileCheck %s define <8 x i8> @sqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: sqshl8b: Index: test/CodeGen/AArch64/arm64-vshr.ll =================================================================== --- test/CodeGen/AArch64/arm64-vshr.ll +++ test/CodeGen/AArch64/arm64-vshr.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 -aarch64-neon-syntax=apple < %s -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone | FileCheck %s define <8 x i16> @testShiftRightArith_v8i16(<8 x i16> %a, <8 x i16> %b) #0 { ; CHECK-LABEL: testShiftRightArith_v8i16: Index: test/CodeGen/AArch64/arm64-vsqrt.ll =================================================================== --- test/CodeGen/AArch64/arm64-vsqrt.ll +++ test/CodeGen/AArch64/arm64-vsqrt.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x float> @frecps_2s(<2 x float>* %A, <2 x float>* %B) nounwind { ;CHECK-LABEL: frecps_2s: Index: test/CodeGen/AArch64/arm64-vsra.ll =================================================================== --- test/CodeGen/AArch64/arm64-vsra.ll +++ test/CodeGen/AArch64/arm64-vsra.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vsras8: Index: test/CodeGen/AArch64/arm64-vsub.ll =================================================================== --- test/CodeGen/AArch64/arm64-vsub.ll +++ test/CodeGen/AArch64/arm64-vsub.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @subhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind { ;CHECK-LABEL: subhn8b: Index: test/CodeGen/AArch64/arm64-xaluo.ll =================================================================== --- test/CodeGen/AArch64/arm64-xaluo.ll +++ test/CodeGen/AArch64/arm64-xaluo.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -disable-post-ra -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort=1 -disable-post-ra -verify-machineinstrs | FileCheck %s ; ; Get the actual value of the overflow bit. Index: test/CodeGen/AArch64/arm64-zext.ll =================================================================== --- test/CodeGen/AArch64/arm64-zext.ll +++ test/CodeGen/AArch64/arm64-zext.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s define i64 @foo(i32 %a, i32 %b) nounwind readnone ssp { entry: Index: test/CodeGen/AArch64/arm64-zextload-unscaled.ll =================================================================== --- test/CodeGen/AArch64/arm64-zextload-unscaled.ll +++ test/CodeGen/AArch64/arm64-zextload-unscaled.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=arm64 < %s | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s @var32 = global i32 0 Index: test/CodeGen/AArch64/arm64-zip.ll =================================================================== --- test/CodeGen/AArch64/arm64-zip.ll +++ test/CodeGen/AArch64/arm64-zip.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: vzipi8: Index: test/CodeGen/AArch64/asm-large-immediate.ll =================================================================== --- test/CodeGen/AArch64/asm-large-immediate.ll +++ test/CodeGen/AArch64/asm-large-immediate.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -no-integrated-as < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -no-integrated-as | FileCheck %s define void @test() { entry: Index: test/CodeGen/AArch64/branch-folder-merge-mmos.ll =================================================================== --- test/CodeGen/AArch64/branch-folder-merge-mmos.ll +++ test/CodeGen/AArch64/branch-folder-merge-mmos.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -mtriple=aarch64-none-linux-gnu -stop-after branch-folder -o /dev/null < %s 2>&1 | FileCheck %s +; RUN: llc -mtriple=aarch64-none-linux-gnu -stop-after branch-folder -o /dev/null < %s 2>&1 | FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" ; Function Attrs: norecurse nounwind Index: test/CodeGen/AArch64/cmpwithshort.ll =================================================================== --- test/CodeGen/AArch64/cmpwithshort.ll +++ test/CodeGen/AArch64/cmpwithshort.ll @@ -1,4 +1,4 @@ -; RUN: llc -O3 -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s define i16 @test_1cmp_signed_1(i16* %ptr1) { ; CHECK-LABLE: @test_1cmp_signed_1 Index: test/CodeGen/AArch64/combine-comparisons-by-cse.ll =================================================================== --- test/CodeGen/AArch64/combine-comparisons-by-cse.ll +++ test/CodeGen/AArch64/combine-comparisons-by-cse.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s ; marked as external to prevent possible optimizations @a = external global i32 Index: test/CodeGen/AArch64/complex-fp-to-int.ll =================================================================== --- test/CodeGen/AArch64/complex-fp-to-int.ll +++ test/CodeGen/AArch64/complex-fp-to-int.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s define <2 x i64> @test_v2f32_to_signed_v2i64(<2 x float> %in) { ; CHECK-LABEL: test_v2f32_to_signed_v2i64: Index: test/CodeGen/AArch64/complex-int-to-fp.ll =================================================================== --- test/CodeGen/AArch64/complex-int-to-fp.ll +++ test/CodeGen/AArch64/complex-int-to-fp.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s ; CHECK: autogen_SD19655 ; CHECK: scvtf Index: test/CodeGen/AArch64/div_minsize.ll =================================================================== --- test/CodeGen/AArch64/div_minsize.ll +++ test/CodeGen/AArch64/div_minsize.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s define i32 @testsize1(i32 %x) minsize nounwind { entry: Index: test/CodeGen/AArch64/large_shift.ll =================================================================== --- test/CodeGen/AArch64/large_shift.ll +++ test/CodeGen/AArch64/large_shift.ll @@ -1,5 +1,4 @@ -; RUN: llc -march=aarch64 -o - %s -target triple = "arm64-unknown-unknown" +; RUN: llc -mtriple=arm64-unknown-unknown -o - %s ; Make sure we don't run into an assert in the aarch64 code selection when ; DAGCombining fails. Index: test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll =================================================================== --- test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll +++ test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -aarch64-neon-syntax=apple -aarch64-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s ; CHECK-LABEL: test_strd_sturd: ; CHECK-NEXT: stp d0, d1, [x0, #-8] Index: test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll =================================================================== --- test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll +++ test/CodeGen/AArch64/legalize-bug-bogus-cpu.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -mcpu=bogus -o - %s +; RUN: llc < %s -mtriple=aarch64-eabi -mcpu=bogus ; Fix the bug in PR20557. Set mcpu to a bogus name, llc will crash in type ; legalization. Index: test/CodeGen/AArch64/lit.local.cfg =================================================================== --- test/CodeGen/AArch64/lit.local.cfg +++ test/CodeGen/AArch64/lit.local.cfg @@ -2,7 +2,3 @@ if not 'AArch64' in config.root.targets: config.unsupported = True - -# For now we don't test arm64-win32. -if re.search(r'cygwin|mingw32|win32|windows-gnu|windows-msvc', config.target_triple): - config.unsupported = True Index: test/CodeGen/AArch64/lower-range-metadata-func-call.ll =================================================================== --- test/CodeGen/AArch64/lower-range-metadata-func-call.ll +++ test/CodeGen/AArch64/lower-range-metadata-func-call.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=aarch64 -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s ; and can be eliminated ; CHECK-LABEL: {{^}}test_call_known_max_range: Index: test/CodeGen/AArch64/memcpy-f128.ll =================================================================== --- test/CodeGen/AArch64/memcpy-f128.ll +++ test/CodeGen/AArch64/memcpy-f128.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s %structA = type { i128 } @stubA = internal unnamed_addr constant %structA zeroinitializer, align 8 Index: test/CodeGen/AArch64/merge-store-dependency.ll =================================================================== --- test/CodeGen/AArch64/merge-store-dependency.ll +++ test/CodeGen/AArch64/merge-store-dependency.ll @@ -1,4 +1,4 @@ -; RUN: llc -mcpu cortex-a53 -march aarch64 %s -o - | FileCheck %s --check-prefix=A53 +; RUN: llc < %s -mcpu cortex-a53 -mtriple=aarch64-eabi | FileCheck %s --check-prefix=A53 ; PR26827 - Merge stores causes wrong dependency. %struct1 = type { %struct1*, %struct1*, i32, i32, i16, i16, void (i32, i32, i8*)*, i8* } Index: test/CodeGen/AArch64/merge-store.ll =================================================================== --- test/CodeGen/AArch64/merge-store.ll +++ test/CodeGen/AArch64/merge-store.ll @@ -1,5 +1,5 @@ -; RUN: llc -mtriple=aarch64-unknown-unknown %s -mcpu=cyclone -o - | FileCheck %s --check-prefix=CYCLONE --check-prefix=CHECK -; RUN: llc -march aarch64 %s -mattr=-slow-misaligned-128store -o - | FileCheck %s --check-prefix=MISALIGNED --check-prefix=CHECK +; RUN: llc < %s -mtriple=aarch64-unknown-unknown -mcpu=cyclone | FileCheck %s --check-prefix=CYCLONE --check-prefix=CHECK +; RUN: llc < %s -mtriple=aarch64-eabi -mattr=-slow-misaligned-128store | FileCheck %s --check-prefix=MISALIGNED --check-prefix=CHECK @g0 = external global <3 x float>, align 16 @g1 = external global <3 x float>, align 4 Index: test/CodeGen/AArch64/mul_pow2.ll =================================================================== --- test/CodeGen/AArch64/mul_pow2.ll +++ test/CodeGen/AArch64/mul_pow2.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi | FileCheck %s ; Convert mul x, pow2 to shift. ; Convert mul x, pow2 +/- 1 to shift + add/sub. Index: test/CodeGen/AArch64/no-quad-ldp-stp.ll =================================================================== --- test/CodeGen/AArch64/no-quad-ldp-stp.ll +++ test/CodeGen/AArch64/no-quad-ldp-stp.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=aarch64 -mattr=+no-quad-ldst-pairs -verify-machineinstrs -asm-verbose=false | FileCheck %s -; RUN: llc < %s -march=aarch64 -mcpu=exynos-m1 -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -mattr=+no-quad-ldst-pairs -verify-machineinstrs -asm-verbose=false | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-eabi -mcpu=exynos-m1 -verify-machineinstrs -asm-verbose=false | FileCheck %s ; CHECK-LABEL: test_nopair_st ; CHECK: str Index: test/CodeGen/AArch64/nzcv-save.ll =================================================================== --- test/CodeGen/AArch64/nzcv-save.ll +++ test/CodeGen/AArch64/nzcv-save.ll @@ -1,4 +1,4 @@ -; RUN: llc -verify-machineinstrs -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-eabi | FileCheck %s ; CHECK: mrs [[NZCV_SAVE:x[0-9]+]], NZCV ; CHECK: msr NZCV, [[NZCV_SAVE]] Index: test/CodeGen/AArch64/postra-mi-sched.ll =================================================================== --- test/CodeGen/AArch64/postra-mi-sched.ll +++ test/CodeGen/AArch64/postra-mi-sched.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -O3 -march=aarch64 -mcpu=cortex-a53 | FileCheck %s +; RUN: llc < %s -O3 -mtriple=aarch64-eabi -mcpu=cortex-a53 | FileCheck %s ; With cortex-a53, each of fmul and fcvt have latency of 6 cycles. After the ; pre-RA MI scheduler, fmul, fcvt and fdiv will be consecutive. The top-down Index: test/CodeGen/AArch64/rem_crash.ll =================================================================== --- test/CodeGen/AArch64/rem_crash.ll +++ test/CodeGen/AArch64/rem_crash.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=aarch64 +; RUN: llc < %s -mtriple=aarch64-eabi define i8 @test_minsize_uu8(i8 %x) minsize optsize { entry: Index: test/CodeGen/AArch64/tailmerging_in_mbp.ll =================================================================== --- test/CodeGen/AArch64/tailmerging_in_mbp.ll +++ test/CodeGen/AArch64/tailmerging_in_mbp.ll @@ -1,4 +1,4 @@ -; RUN: llc <%s -march=aarch64 -verify-machine-dom-info | FileCheck %s +; RUN: llc <%s -mtriple=aarch64-eabi -verify-machine-dom-info | FileCheck %s ; CHECK-LABEL: test: ; CHECK: LBB0_7: Index: test/CodeGen/AArch64/tbz-tbnz.ll =================================================================== --- test/CodeGen/AArch64/tbz-tbnz.ll +++ test/CodeGen/AArch64/tbz-tbnz.ll @@ -1,4 +1,4 @@ -; RUN: llc -O1 -march=aarch64 < %s | FileCheck %s +; RUN: llc < %s -O1 -mtriple=aarch64-eabi | FileCheck %s declare void @t()