diff --git a/llvm/test/Analysis/MemorySSA/invariant-groups.ll b/llvm/test/Analysis/MemorySSA/invariant-groups.ll --- a/llvm/test/Analysis/MemorySSA/invariant-groups.ll +++ b/llvm/test/Analysis/MemorySSA/invariant-groups.ll @@ -342,7 +342,7 @@ ; CHECK-NEXT: call void @clobber8(i8* %ptr) call void @clobber8(i8* %ptr) ; 6 = MemoryDef(5) -; CHECK-NEXT call void @use(i8* %ptr2) +; CHECK-NEXT: call void @use(i8* %ptr2) call void @use(i8* %ptr2) ; CHECK: 7 = MemoryDef(6) ; CHECK-NEXT: call void @use(i8* %ptr3) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -136,7 +136,7 @@ br label %block } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing: define void @nonpow2_add_narrowing() { @@ -147,7 +147,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3, %4 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_add_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing: define void @nonpow2_or_narrowing() { @@ -160,7 +160,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %0, %1 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_load_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0, %1 :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_load_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing: define void @nonpow2_load_narrowing() { diff --git a/llvm/test/CodeGen/AArch64/swap-compare-operands.ll b/llvm/test/CodeGen/AArch64/swap-compare-operands.ll --- a/llvm/test/CodeGen/AArch64/swap-compare-operands.ll +++ b/llvm/test/CodeGen/AArch64/swap-compare-operands.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -mtriple=arm64 | FileCheck %s define i1 @testSwapCmpWithLSL64_1(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithLSL64_1: +; CHECK-LABEL: testSwapCmpWithLSL64_1: ; CHECK: cmp x1, x0, lsl #1 ; CHECK-NEXT: cset w0, gt entry: @@ -11,7 +11,7 @@ } define i1 @testSwapCmpWithLSL64_63(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithLSL64_63: +; CHECK-LABEL: testSwapCmpWithLSL64_63: ; CHECK: cmp x1, x0, lsl #63 ; CHECK-NEXT: cset w0, gt entry: @@ -21,7 +21,7 @@ } define i1 @testSwapCmpWithLSL32_1(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithLSL32_1: +; CHECK-LABEL: testSwapCmpWithLSL32_1: ; CHECK: cmp w1, w0, lsl #1 ; CHECK-NEXT: cset w0, gt entry: @@ -31,7 +31,7 @@ } define i1 @testSwapCmpWithLSL32_31(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithLSL32_31: +; CHECK-LABEL: testSwapCmpWithLSL32_31: ; CHECK: cmp w1, w0, lsl #31 ; CHECK-NEXT: cset w0, gt entry: @@ -41,7 +41,7 @@ } define i1 @testSwapCmpWithLSR64_1(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithLSR64_1: +; CHECK-LABEL: testSwapCmpWithLSR64_1: ; CHECK: cmp x1, x0, lsr #1 ; CHECK-NEXT: cset w0, gt entry: @@ -51,7 +51,7 @@ } define i1 @testSwapCmpWithLSR64_63(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithLSR64_63: +; CHECK-LABEL: testSwapCmpWithLSR64_63: ; CHECK: cmp x1, x0, lsr #63 ; CHECK-NEXT: cset w0, gt entry: @@ -61,7 +61,7 @@ } define i1 @testSwapCmpWithLSR32_1(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithLSR32_1: +; CHECK-LABEL: testSwapCmpWithLSR32_1: ; CHECK: cmp w1, w0, lsr #1 ; CHECK-NEXT: cset w0, gt entry: @@ -71,7 +71,7 @@ } define i1 @testSwapCmpWithLSR32_31(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithLSR32_31: +; CHECK-LABEL: testSwapCmpWithLSR32_31: ; CHECK: cmp w1, w0, lsr #31 ; CHECK-NEXT: cset w0, gt entry: @@ -81,7 +81,7 @@ } define i1 @testSwapCmpWithASR64_1(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithASR64_1: +; CHECK-LABEL: testSwapCmpWithASR64_1: ; CHECK: cmp x1, x0, asr #1 ; CHECK-NEXT: cset w0, gt entry: @@ -91,7 +91,7 @@ } define i1 @testSwapCmpWithASR64_63(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithASR64_63: +; CHECK-LABEL: testSwapCmpWithASR64_63: ; CHECK: cmp x1, x0, asr #63 ; CHECK-NEXT: cset w0, gt entry: @@ -101,7 +101,7 @@ } define i1 @testSwapCmpWithASR32_1(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithASR32_1: +; CHECK-LABEL: testSwapCmpWithASR32_1: ; CHECK: cmp w1, w0, asr #1 ; CHECK-NEXT: cset w0, gt entry: @@ -111,7 +111,7 @@ } define i1 @testSwapCmpWithASR32_31(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithASR32_31: +; CHECK-LABEL: testSwapCmpWithASR32_31: ; CHECK: cmp w1, w0, asr #31 ; CHECK-NEXT: cset w0, gt entry: @@ -121,7 +121,7 @@ } define i1 @testSwapCmpWithShiftedZeroExtend32_64(i32 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithShiftedZeroExtend32_64 +; CHECK-LABEL: testSwapCmpWithShiftedZeroExtend32_64 ; CHECK: cmp x1, w0, uxtw #2 ; CHECK-NEXT: cset w0, lo entry: @@ -132,7 +132,7 @@ } define i1 @testSwapCmpWithShiftedZeroExtend16_64(i16 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithShiftedZeroExtend16_64 +; CHECK-LABEL: testSwapCmpWithShiftedZeroExtend16_64 ; CHECK: cmp x1, w0, uxth #2 ; CHECK-NEXT: cset w0, lo entry: @@ -143,7 +143,7 @@ } define i1 @testSwapCmpWithShiftedZeroExtend8_64(i8 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithShiftedZeroExtend8_64 +; CHECK-LABEL: testSwapCmpWithShiftedZeroExtend8_64 ; CHECK: cmp x1, w0, uxtb #4 ; CHECK-NEXT: cset w0, lo entry: @@ -154,7 +154,7 @@ } define i1 @testSwapCmpWithShiftedZeroExtend16_32(i16 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithShiftedZeroExtend8_64 +; CHECK-LABEL: testSwapCmpWithShiftedZeroExtend8_64 ; CHECK: cmp w1, w0, uxth #3 ; CHECK-NEXT: cset w0, lo entry: @@ -165,7 +165,7 @@ } define i1 @testSwapCmpWithShiftedZeroExtend8_32(i8 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithShiftedZeroExtend8_64 +; CHECK-LABEL: testSwapCmpWithShiftedZeroExtend8_64 ; CHECK: cmp w1, w0, uxtb #4 ; CHECK-NEXT: cset w0, lo entry: @@ -176,7 +176,7 @@ } define i1 @testSwapCmpWithTooLargeShiftedZeroExtend8_32(i8 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithTooLargeShiftedZeroExtend8_64 +; CHECK-LABEL: testSwapCmpWithTooLargeShiftedZeroExtend8_64 ; CHECK: and [[REG:w[0-9]+]], w0, #0xff ; CHECK: cmp w1, [[REG]], lsl #5 ; CHECK-NEXT: cset w0, lo @@ -188,7 +188,7 @@ } define i1 @testSwapCmpWithZeroExtend8_32(i8 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithZeroExtend8_64 +; CHECK-LABEL: testSwapCmpWithZeroExtend8_64 ; CHECK: cmp w1, w0, uxtb ; CHECK-NEXT: cset w0, lo entry: @@ -198,7 +198,7 @@ } define i1 @testSwapCmpWithShiftedSignExtend32_64(i32 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithShiftedZeroExtend32_64 +; CHECK-LABEL: testSwapCmpWithShiftedZeroExtend32_64 ; CHECK: cmp x1, w0, sxtw #2 ; CHECK-NEXT: cset w0, lo entry: @@ -209,7 +209,7 @@ } define i1 @testSwapCmpWithShiftedSignExtend16_64(i16 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithShiftedZeroExtend16_64 +; CHECK-LABEL: testSwapCmpWithShiftedZeroExtend16_64 ; CHECK: cmp x1, w0, sxth #2 ; CHECK-NEXT: cset w0, lo entry: @@ -220,7 +220,7 @@ } define i1 @testSwapCmpWithShiftedSignExtend8_64(i8 %a, i64 %b) { -; CHECK-LABEL testSwapCmpWithShiftedSignExtend8_64 +; CHECK-LABEL: testSwapCmpWithShiftedSignExtend8_64 ; CHECK: cmp x1, w0, sxtb #4 ; CHECK-NEXT: cset w0, lo entry: @@ -231,7 +231,7 @@ } define i1 @testSwapCmpWithShiftedSignExtend16_32(i16 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithShiftedSignExtend8_64 +; CHECK-LABEL: testSwapCmpWithShiftedSignExtend8_64 ; CHECK: cmp w1, w0, sxth #3 ; CHECK-NEXT: cset w0, lo entry: @@ -242,7 +242,7 @@ } define i1 @testSwapCmpWithShiftedSignExtend8_32(i8 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithShiftedSignExtend8_64 +; CHECK-LABEL: testSwapCmpWithShiftedSignExtend8_64 ; CHECK: cmp w1, w0, sxtb #4 ; CHECK-NEXT: cset w0, lo entry: @@ -253,7 +253,7 @@ } define i1 @testSwapCmpWithTooLargeShiftedSignExtend8_32(i8 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithTooLargeShiftedSignExtend8_64 +; CHECK-LABEL: testSwapCmpWithTooLargeShiftedSignExtend8_64 ; CHECK: sxtb [[REG:w[0-9]+]], w0 ; CHECK-NEXT: cmp w1, [[REG]], lsl #5 ; CHECK-NEXT: cset w0, lo @@ -265,7 +265,7 @@ } define i1 @testSwapCmpWithSignExtend8_32(i8 %a, i32 %b) { -; CHECK-LABEL testSwapCmpWithSignExtend8_64 +; CHECK-LABEL: testSwapCmpWithSignExtend8_64 ; CHECK: cmp w1, w0, sxtb ; CHECK-NEXT: cset w0, lo entry: @@ -275,7 +275,7 @@ } define i1 @testSwapCmnWithLSL64_1(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithLSL64_1: +; CHECK-LABEL: testSwapCmnWithLSL64_1: ; CHECK: cmn x1, x0, lsl #1 ; CHECK-NEXT: cset w0, ne entry: @@ -287,7 +287,7 @@ ; Note: testing with a 62 bits shift as 63 has another optimization kicking in. define i1 @testSwapCmnWithLSL64_62(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithLSL64_62: +; CHECK-LABEL: testSwapCmnWithLSL64_62: ; CHECK: cmn x1, x0, lsl #62 ; CHECK-NEXT: cset w0, ne entry: @@ -302,7 +302,7 @@ ; so that this test can be adapted should the optimization be done with the ; operand swap. define i1 @testSwapCmnWithLSL64_63(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithLSL64_63: +; CHECK-LABEL: testSwapCmnWithLSL64_63: ; CHECK: cmp x1, x0, lsl #63 ; CHECK-NEXT: cset w0, ne entry: @@ -313,7 +313,7 @@ } define i1 @testSwapCmnWithLSL32_1(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithLSL32_1: +; CHECK-LABEL: testSwapCmnWithLSL32_1: ; CHECK: cmn w1, w0, lsl #1 ; CHECK-NEXT: cset w0, ne entry: @@ -325,7 +325,7 @@ ; Note: testing with a 30 bits shift as 30 has another optimization kicking in. define i1 @testSwapCmnWithLSL32_30(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithLSL32_30: +; CHECK-LABEL: testSwapCmnWithLSL32_30: ; CHECK: cmn w1, w0, lsl #30 ; CHECK-NEXT: cset w0, ne entry: @@ -340,7 +340,7 @@ ; so that this test can be adapted should the optimization be done with the ; operand swap. define i1 @testSwapCmnWithLSL32_31(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithLSL32_31: +; CHECK-LABEL: testSwapCmnWithLSL32_31: ; CHECK: cmp w1, w0, lsl #31 ; CHECK-NEXT: cset w0, ne entry: @@ -351,7 +351,7 @@ } define i1 @testSwapCmnWithLSR64_1(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithLSR64_1: +; CHECK-LABEL: testSwapCmnWithLSR64_1: ; CHECK: cmn x1, x0, lsr #1 ; CHECK-NEXT: cset w0, ne entry: @@ -363,7 +363,7 @@ ; Note: testing with a 62 bits shift as 63 has another optimization kicking in. define i1 @testSwapCmnWithLSR64_62(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithLSR64_62: +; CHECK-LABEL: testSwapCmnWithLSR64_62: ; CHECK: cmn x1, x0, lsr #62 ; CHECK-NEXT: cset w0, ne entry: @@ -378,7 +378,7 @@ ; so that this test can be adapted should the optimization be done with the ; operand swap. define i1 @testSwapCmnWithLSR64_63(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithLSR64_63: +; CHECK-LABEL: testSwapCmnWithLSR64_63: ; CHECK: cmp x1, x0, asr #63 ; CHECK-NEXT: cset w0, ne entry: @@ -389,7 +389,7 @@ } define i1 @testSwapCmnWithLSR32_1(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithLSR32_1: +; CHECK-LABEL: testSwapCmnWithLSR32_1: ; CHECK: cmn w1, w0, lsr #1 ; CHECK-NEXT: cset w0, ne entry: @@ -401,7 +401,7 @@ ; Note: testing with a 30 bits shift as 31 has another optimization kicking in. define i1 @testSwapCmnWithLSR32_30(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithLSR32_30: +; CHECK-LABEL: testSwapCmnWithLSR32_30: ; CHECK: cmn w1, w0, lsr #30 ; CHECK-NEXT: cset w0, ne entry: @@ -416,7 +416,7 @@ ; so that this test can be adapted should the optimization be done with the ; operand swap. define i1 @testSwapCmnWithLSR32_31(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithLSR32_31: +; CHECK-LABEL: testSwapCmnWithLSR32_31: ; CHECK: cmp w1, w0, asr #31 ; CHECK-NEXT: cset w0, ne entry: @@ -427,7 +427,7 @@ } define i1 @testSwapCmnWithASR64_1(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithASR64_1: +; CHECK-LABEL: testSwapCmnWithASR64_1: ; CHECK: cmn x1, x0, asr #3 ; CHECK-NEXT: cset w0, ne entry: @@ -439,7 +439,7 @@ ; Note: testing with a 62 bits shift as 63 has another optimization kicking in. define i1 @testSwapCmnWithASR64_62(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithASR64_62: +; CHECK-LABEL: testSwapCmnWithASR64_62: ; CHECK: cmn x1, x0, asr #62 ; CHECK-NEXT: cset w0, ne entry: @@ -454,7 +454,7 @@ ; so that this test can be adapted should the optimization be done with the ; operand swap. define i1 @testSwapCmnWithASR64_63(i64 %a, i64 %b) { -; CHECK-LABEL testSwapCmnWithASR64_63: +; CHECK-LABEL: testSwapCmnWithASR64_63: ; CHECK: cmp x1, x0, lsr #63 ; CHECK-NEXT: cset w0, ne entry: @@ -465,7 +465,7 @@ } define i1 @testSwapCmnWithASR32_1(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithASR32_1: +; CHECK-LABEL: testSwapCmnWithASR32_1: ; CHECK: cmn w1, w0, asr #1 ; CHECK-NEXT: cset w0, eq entry: @@ -477,7 +477,7 @@ ; Note: testing with a 30 bits shift as 31 has another optimization kicking in. define i1 @testSwapCmnWithASR32_30(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithASR32_30: +; CHECK-LABEL: testSwapCmnWithASR32_30: ; CHECK: cmn w1, w0, asr #30 ; CHECK-NEXT: cset w0, ne entry: @@ -492,7 +492,7 @@ ; so that this test can be adapted should the optimization be done with the ; operand swap. define i1 @testSwapCmnWithASR32_31(i32 %a, i32 %b) { -; CHECK-LABEL testSwapCmnWithASR32_31: +; CHECK-LABEL: testSwapCmnWithASR32_31: ; CHECK: cmp w1, w0, lsr #31 ; CHECK-NEXT: cset w0, ne entry: @@ -503,7 +503,7 @@ } define i64 @testSwapCmpToCmnWithZeroExtend(i32 %a32, i16 %a16, i8 %a8, i64 %b64, i32 %b32) { -; CHECK-LABEL testSwapCmpToCmnWithZeroExtend: +; CHECK-LABEL: testSwapCmpToCmnWithZeroExtend: t0: %conv0 = zext i32 %a32 to i64 %shl0 = shl i64 %conv0, 1 @@ -567,7 +567,7 @@ ret i64 1 } define i64 @testSwapCmpToCmnWithSignExtend(i32 %a32, i16 %a16, i8 %a8, i64 %b64, i32 %b32) { -; CHECK-LABEL testSwapCmpToCmnWithSignExtend: +; CHECK-LABEL: testSwapCmpToCmnWithSignExtend: t0: %conv0 = sext i32 %a32 to i64 %shl0 = shl i64 %conv0, 1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir @@ -455,7 +455,7 @@ bb.0: liveins: $sgpr0_sgpr1 ; CHECK-LABEL: name: load_constant_v8i32_uniform - ; CHECK (<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4) + ; CHECK: (<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4) %0:_(p4) = COPY $sgpr0_sgpr1 %1:_(<8 x s32>) = G_LOAD %0 :: (load 32, addrspace 4) ... @@ -468,7 +468,7 @@ bb.0: liveins: $sgpr0_sgpr1 ; CHECK-LABEL: name: load_constant_v4i64_uniform - ; CHECK (<4 x s64>) = G_LOAD %0 :: (load 32, addrspace 4) + ; CHECK: (<4 x s64>) = G_LOAD %0 :: (load 32, addrspace 4) %0:_(p4) = COPY $sgpr0_sgpr1 %1:_(<4 x s64>) = G_LOAD %0 :: (load 32, addrspace 4) ... @@ -481,7 +481,7 @@ bb.0: liveins: $sgpr0_sgpr1 ; CHECK-LABEL: name: load_constant_v16i32_uniform - ; CHECK (<16 x s32>) = G_LOAD %0 :: (load 64, addrspace 4) + ; CHECK: (<16 x s32>) = G_LOAD %0 :: (load 64, addrspace 4) %0:_(p4) = COPY $sgpr0_sgpr1 %1:_(<16 x s32>) = G_LOAD %0 :: (load 64, addrspace 4) ... @@ -494,7 +494,7 @@ bb.0: liveins: $sgpr0_sgpr1 ; CHECK-LABEL: name: load_constant_v8i64_uniform - ; CHECK (<8 x s64>) = G_LOAD %0 :: (load 64, addrspace 4) + ; CHECK: (<8 x s64>) = G_LOAD %0 :: (load 64, addrspace 4) %0:_(p4) = COPY $sgpr0_sgpr1 %1:_(<8 x s64>) = G_LOAD %0 :: (load 64, addrspace 4) ... diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll @@ -106,7 +106,7 @@ ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE v_cmp_le_f32_e32 vcc, [[A]], [[B]] +; VI-SAFE: v_cmp_le_f32_e32 vcc, [[A]], [[B]] ; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] @@ -130,7 +130,7 @@ ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE v_cmp_lt_f32_e32 vcc, [[A]], [[B]] +; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] ; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] @@ -154,7 +154,7 @@ ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE v_cmp_lt_f32_e32 vcc, [[A]], [[B]] +; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] ; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] @@ -178,7 +178,7 @@ ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE v_cmp_lt_f32_e32 vcc, [[A]], [[B]] +; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] ; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] @@ -202,9 +202,9 @@ ; SI-SAFE: v_min_legacy_f32_e32 ; SI-SAFE: v_min_legacy_f32_e32 -; VI-SAFE v_cmp_lt_f32_e32 +; VI-SAFE: v_cmp_lt_f32_e32 ; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE v_cmp_lt_f32_e32 +; VI-SAFE: v_cmp_lt_f32_e32 ; VI-SAFE: v_cndmask_b32_e32 ; GCN-NONAN: v_min_f32_e32 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll @@ -357,9 +357,9 @@ ; SI: s_mov_b32 [[K:s[0-9]+]], 0x80000{{$}} ; SI: s_buffer_load_dword s0, s[0:3], [[K]]{{$}} -; CI s_buffer_load_dword s0, s[0:3], 0x20000{{$}} +; CI: s_buffer_load_dword s0, s[0:3], 0x20000{{$}} -; VI s_buffer_load_dword s0, s[0:3], 0x20000{{$}} +; VI: s_buffer_load_dword s0, s[0:3], 0x20000{{$}} define amdgpu_ps i32 @s_buffer_load_imm_bit19(<4 x i32> inreg %desc) { %load = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %desc, i32 524288, i32 0) ret i32 %load @@ -369,9 +369,9 @@ ; SI: s_mov_b32 [[K:s[0-9]+]], 0xfff80000{{$}} ; SI: s_buffer_load_dword s0, s[0:3], [[K]]{{$}} -; CI s_buffer_load_dword s0, s[0:3], 0x20000{{$}} +; CI: s_buffer_load_dword s0, s[0:3], 0x20000{{$}} -; VI s_buffer_load_dword s0, s[0:3], 0x20000{{$}} +; VI: s_buffer_load_dword s0, s[0:3], 0x20000{{$}} define amdgpu_ps i32 @s_buffer_load_imm_neg_bit19(<4 x i32> inreg %desc) { %load = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %desc, i32 -524288, i32 0) ret i32 %load diff --git a/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir b/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir --- a/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir +++ b/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir @@ -344,7 +344,7 @@ --- # GCN-LABEL: name: shrink_addc_vop3{{$}} # GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def $vcc, implicit $vcc, implicit $exec -# GCN %24 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $vcc, implicit $exec +# GCN: %24 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $vcc, implicit $exec name: shrink_addc_vop3 alignment: 1 diff --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll --- a/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll +++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll @@ -1,11 +1,11 @@ ; RUN: llc -march=amdgcn -mcpu=kaveri -verify-machineinstrs < %s | FileCheck %s ; CHECK-LABEL: {{^}}test: -; CHECK s_and_saveexec_b64 -; CHECK s_xor_b64 -; CHECK s_or_b64 exec, exec -; CHECK s_andn2_b64 exec, exec -; CHECK s_cbranch_execnz +; CHECK: s_and_saveexec_b64 +; CHECK: s_xor_b64 +; CHECK: s_or_b64 exec, exec +; CHECK: s_andn2_b64 exec, exec +; CHECK: s_cbranch_execnz define amdgpu_kernel void @test(i32 %arg, i32 %arg1) { bb: %tmp = icmp ne i32 %arg, 0 diff --git a/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll b/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll --- a/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll +++ b/llvm/test/CodeGen/AMDGPU/trunc-cmp-constant.ll @@ -3,7 +3,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone -; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_eq_0: +; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_eq_0: ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]] ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]] ; SI: v_cmp_eq_u32_e32 vcc, 0, [[TMP]]{{$}} @@ -80,7 +80,7 @@ } -; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_ne_0: +; FUNC-LABEL: {{^}}sextload_i1_to_i32_trunc_cmp_ne_0: ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]] ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]] ; SI: buffer_store_byte [[RESULT]] diff --git a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll --- a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll +++ b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll @@ -8,7 +8,7 @@ ; No dynamic indexing required ; GCN-LABEL: {{^}}extract_insert_same_dynelt_v4i32: ; GCN: s_load_dword [[VAL:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd{{$}} -; GCN-NOT buffer_load_dword +; GCN-NOT: buffer_load_dword ; GCN-NOT: [[VAL]] ; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]] ; GCN-NOT: [[VVAL]] @@ -49,7 +49,7 @@ ; GCN-LABEL: {{^}}extract_insert_same_elt2_v4i32: ; GCN: s_load_dword [[VAL:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd{{$}} -; GCN-NOT buffer_load_dword +; GCN-NOT: buffer_load_dword ; GCN-NOT: [[VAL]] ; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]] ; GCN-NOT: [[VVAL]] @@ -68,7 +68,7 @@ ; GCN-LABEL: {{^}}extract_insert_same_dynelt_v4f32: ; GCN: s_load_dword [[VAL:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd{{$}} -; GCN-NOT buffer_load_dword +; GCN-NOT: buffer_load_dword ; GCN-NOT: [[VAL]] ; GCN: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[VAL]] ; GCN-NOT: [[VVAL]] diff --git a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir --- a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir +++ b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir @@ -47,7 +47,7 @@ # CHECK_SWIFT: Latency : 3 # CHECK_R52: Latency : 4 # -# CHECK : SU(6): %6 = t2ADDrr %3:rgpr, %3:rgpr, 14, $noreg, $noreg +# CHECK: SU(6): %6 = t2ADDrr %3:rgpr, %3:rgpr, 14, $noreg, $noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 diff --git a/llvm/test/CodeGen/ARM/thumb1_return_sequence.ll b/llvm/test/CodeGen/ARM/thumb1_return_sequence.ll --- a/llvm/test/CodeGen/ARM/thumb1_return_sequence.ll +++ b/llvm/test/CodeGen/ARM/thumb1_return_sequence.ll @@ -28,7 +28,7 @@ ; CHECK-V4T-NEXT: ldr [[POP:r[4567]]], [sp, #16] ; CHECK-V4T-NEXT: mov lr, [[POP]] ; CHECK-V4T-NEXT: pop {[[SAVED]]} -; CHECK-V4T-NEXT add sp, sp, #4 +; CHECK-V4T-NEXT: add sp, sp, #4 ; The ISA for v4 does not support pop pc, so make sure we do not emit ; one even when we do not need to update SP. ; CHECK-V4T-NOT: pop {pc} @@ -104,7 +104,7 @@ ; used for the return value). ; CHECK-V4T-NEXT: pop {[[POP_REG:r[1-3]]]} ; CHECK-V4T-NEXT: bx [[POP_REG]] -; CHECK-V5T: pop {[[SAVED]], pc} +; CHECK-V5T: pop {[[SAVED]], pc} } ; CHECK-V4T-LABEL: simplevariadicframe diff --git a/llvm/test/CodeGen/PowerPC/fold-frame-offset-using-rr.mir b/llvm/test/CodeGen/PowerPC/fold-frame-offset-using-rr.mir --- a/llvm/test/CodeGen/PowerPC/fold-frame-offset-using-rr.mir +++ b/llvm/test/CodeGen/PowerPC/fold-frame-offset-using-rr.mir @@ -11,7 +11,7 @@ --- name: testIndexForm1 -#CHECK : name : testIndexForm1 +#CHECK-LABEL: name : testIndexForm1 # ToBeDeletedReg equals to ScaleReg tracksRegLiveness: true body: | @@ -27,7 +27,7 @@ ... --- name: testIndexForm2 -#CHECK : name : testIndexForm2 +#CHECK-LABEL: name : testIndexForm2 # ToBeDeletedReg equals to ToBeChangedReg tracksRegLiveness: true body: | @@ -43,7 +43,7 @@ ... --- name: testIndexForm3 -#CHECK : name : testIndexForm3 +#CHECK-LABEL: name : testIndexForm3 # There is other use for ToBeDeletedReg between ADD instr and Imm instr tracksRegLiveness: true body: | @@ -61,7 +61,7 @@ ... --- name: testIndexForm4 -#CHECK : name : testIndexForm3 +#CHECK-LABEL: name : testIndexForm3 # There is other use for ToBeChangedReg between ADDI instr and ADD instr tracksRegLiveness: true body: | @@ -79,7 +79,7 @@ ... --- name: testIndexForm5 -#CHECK : name : testIndexForm5 +#CHECK-LABEL: name : testIndexForm5 # ToBeChangedReg has no killed flag tracksRegLiveness: true body: | @@ -97,7 +97,7 @@ ... --- name: testIndexForm6 -#CHECK : name : testIndexForm6 +#CHECK-LABEL: name : testIndexForm6 # ToBeDeletedReg has no killed flag tracksRegLiveness: true body: | @@ -114,7 +114,7 @@ ... --- name: testIndexForm7 -#CHECK : name : testIndexForm7 +#CHECK-LABEL: name : testIndexForm7 # There is other def for ToBeChangedReg between ADD instr and Imm instr tracksRegLiveness: true body: | @@ -134,7 +134,7 @@ ... --- name: testIndexForm8 -#CHECK : name : testIndexForm8 +#CHECK-LABEL: name : testIndexForm8 # There is other def for ScaleReg between ADD instr and Imm instr tracksRegLiveness: true body: | diff --git a/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir b/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir --- a/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir +++ b/llvm/test/CodeGen/PowerPC/fold-rlwinm.mir @@ -3,7 +3,7 @@ --- name: testFoldRLWINM -#CHECK : name : testFoldRLWINM +#CHECK-LABEL: name : testFoldRLWINM tracksRegLiveness: true body: | bb.0.entry: @@ -18,7 +18,7 @@ ... --- name: testFoldRLWINMSrcFullMask1 -#CHECK : name : testFoldRLWINMSrcFullMask1 +#CHECK-LABEL: name : testFoldRLWINMSrcFullMask1 tracksRegLiveness: true body: | bb.0.entry: @@ -33,7 +33,7 @@ ... --- name: testFoldRLWINMSrcFullMask2 -#CHECK : name : testFoldRLWINMSrcFullMask2 +#CHECK-LABEL: name : testFoldRLWINMSrcFullMask2 tracksRegLiveness: true body: | bb.0.entry: @@ -48,7 +48,7 @@ ... --- name: testFoldRLWINMSrcWrapped -#CHECK : name : testFoldRLWINMSrcWrapped +#CHECK-LABEL: name : testFoldRLWINMSrcWrapped tracksRegLiveness: true body: | bb.0.entry: @@ -63,7 +63,7 @@ ... --- name: testFoldRLWINMUserWrapped -#CHECK : name : testFoldRLWINMUserWrapped +#CHECK-LABEL: name : testFoldRLWINMUserWrapped tracksRegLiveness: true body: | bb.0.entry: @@ -78,7 +78,7 @@ ... --- name: testFoldRLWINMResultWrapped -#CHECK : name : testFoldRLWINMResultWrapped +#CHECK-LABEL: name : testFoldRLWINMResultWrapped tracksRegLiveness: true body: | bb.0.entry: @@ -93,7 +93,7 @@ ... --- name: testFoldRLWINMMultipleUses -#CHECK : name : testFoldRLWINMMultipleUses +#CHECK-LABEL: name : testFoldRLWINMMultipleUses tracksRegLiveness: true body: | bb.0.entry: @@ -110,7 +110,7 @@ ... --- name: testFoldRLWINMToZero -#CHECK : name : testFoldRLWINMToZero +#CHECK-LABEL: name : testFoldRLWINMToZero tracksRegLiveness: true body: | bb.0.entry: @@ -125,7 +125,7 @@ ... --- name: testFoldRLWINM_recToZero -#CHECK : name : testFoldRLWINM_recToZero +#CHECK-LABEL: name : testFoldRLWINM_recToZero tracksRegLiveness: true body: | bb.0.entry: @@ -140,7 +140,7 @@ ... --- name: testFoldRLWINMoToZeroSrcCanNotBeDeleted -#CHECK : name : testFoldRLWINMoToZeroSrcCanNotBeDeleted +#CHECK-LABEL: name : testFoldRLWINMoToZeroSrcCanNotBeDeleted tracksRegLiveness: true body: | bb.0.entry: @@ -155,7 +155,7 @@ ... --- name: testFoldRLWINMInvalidMask -#CHECK : name : testFoldRLWINMInvalidMask +#CHECK-LABEL: name : testFoldRLWINMInvalidMask tracksRegLiveness: true body: | bb.0.entry: diff --git a/llvm/test/CodeGen/PowerPC/optcmp.ll b/llvm/test/CodeGen/PowerPC/optcmp.ll --- a/llvm/test/CodeGen/PowerPC/optcmp.ll +++ b/llvm/test/CodeGen/PowerPC/optcmp.ll @@ -60,7 +60,7 @@ ; CHECK: isel 3, 4, 3, 1 ; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]] ; CHECK-NO-ISEL-NEXT: b .LBB -; CHECK-NO-ISEL addi: 3, 4, 0 +; CHECK-NO-ISEL: addi: 3, 4, 0 ; CHECK: std [[REG]], 0(5) } diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll --- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll @@ -402,7 +402,7 @@ ; PC64-NEXT: ld 0, 16(1) ; PC64-NEXT: mtlr 0 ; PC64-NEXT: blr -; PC64LE9 : clrldi 5, 5, 32 +; PC64LE9: clrldi 5, 5, 32 entry: %powi = call ppc_fp128 @llvm.experimental.constrained.powi.ppcf128( ppc_fp128 %first, diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir b/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir --- a/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-copy.mir @@ -40,10 +40,10 @@ registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } -# ALL %0:gr8 = COPY $al -# ALL-NEXT %1:gr32 = MOVZX32rr8 %0 -# ALL-NEXT $eax = COPY %1 -# ALL-NEXT RET 0, implicit $eax +# ALL: %0:gr8 = COPY $al +# ALL-NEXT: %1:gr32 = MOVZX32rr8 %0 +# ALL-NEXT: $eax = COPY %1 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): liveins: $eax @@ -95,11 +95,11 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# ALL %0:gr16 = COPY $ax -# ALL-NEXT %1:gr8 = COPY %0.sub_8bit -# ALL-NEXT %2:gr32 = MOVZX32rr8 %1 -# ALL-NEXT $eax = COPY %2 -# ALL-NEXT RET 0, implicit $eax +# ALL: %0:gr16 = COPY $ax +# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit +# ALL-NEXT: %2:gr32 = MOVZX32rr8 %1 +# ALL-NEXT: $eax = COPY %2 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): liveins: $eax @@ -125,11 +125,11 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# ALL %0:gr32 = COPY $eax -# ALL-NEXT %1:gr16 = COPY %0.sub_16bit -# ALL-NEXT %2:gr32 = MOVZX32rr16 %1 -# ALL-NEXT $eax = COPY %2 -# ALL-NEXT RET 0, implicit $eax +# ALL: %0:gr32 = COPY $eax +# ALL-NEXT: %1:gr16 = COPY %0.sub_16bit +# ALL-NEXT: %2:gr32 = MOVZX32rr16 %1 +# ALL-NEXT: $eax = COPY %2 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): liveins: $eax @@ -156,11 +156,11 @@ - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } - { id: 2, class: gpr, preferred-register: '' } -# ALL %0:gr32[[ABCD]] = COPY $edx -# ALL-NEXT %1:gr8 = COPY %0.sub_8bit -# ALL-NEXT %2:gr32 = MOVZX32rr8 %1 -# ALL-NEXT $eax = COPY %2 -# ALL-NEXT RET 0, implicit $eax +# ALL: %0:gr32[[ABCD]] = COPY $edx +# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit +# ALL-NEXT: %2:gr32 = MOVZX32rr8 %1 +# ALL-NEXT: $eax = COPY %2 +# ALL-NEXT: RET 0, implicit $eax body: | bb.1 (%ir-block.0): liveins: $eax,$edx diff --git a/llvm/test/CodeGen/X86/emutls.ll b/llvm/test/CodeGen/X86/emutls.ll --- a/llvm/test/CodeGen/X86/emutls.ll +++ b/llvm/test/CodeGen/X86/emutls.ll @@ -249,31 +249,31 @@ ;;;;;;;;;;;;;; 32-bit __emutls_v. and __emutls_t. -; X32 .section .data.rel.local, +; X32: .section .data.rel.local, ; X32-LABEL: __emutls_v.i1: ; X32-NEXT: .long 4 ; X32-NEXT: .long 4 ; X32-NEXT: .long 0 ; X32-NEXT: .long __emutls_t.i1 -; X32 .section .rodata, +; X32: .section .rodata, ; X32-LABEL: __emutls_t.i1: ; X32-NEXT: .long 15 ; X32-NOT: __emutls_v.i2 -; X32 .section .data.rel.local, +; X32: .section .data.rel.local, ; X32-LABEL: __emutls_v.i3: ; X32-NEXT: .long 4 ; X32-NEXT: .long 4 ; X32-NEXT: .long 0 ; X32-NEXT: .long __emutls_t.i3 -; X32 .section .rodata, +; X32: .section .rodata, ; X32-LABEL: __emutls_t.i3: ; X32-NEXT: .long 15 -; X32 .section .data.rel.local, +; X32: .section .data.rel.local, ; X32-LABEL: __emutls_v.i4: ; X32-NEXT: .L__emutls_v.i4$local: ; X32-NEXT: .long 4 @@ -281,27 +281,27 @@ ; X32-NEXT: .long 0 ; X32-NEXT: .long __emutls_t.i4 -; X32 .section .rodata, +; X32: .section .rodata, ; X32-LABEL: __emutls_t.i4: ; X32-NEXT: .L__emutls_t.i4$local: ; X32-NEXT: .long 15 ; X32-NOT: __emutls_v.i5: -; X32 .hidden __emutls_v.i5 +; X32: .hidden __emutls_v.i5 ; X32-NOT: __emutls_v.i5: -; X32 .section .data.rel.local, +; X32: .section .data.rel.local, ; X32-LABEL: __emutls_v.s1: ; X32-NEXT: .long 2 ; X32-NEXT: .long 2 ; X32-NEXT: .long 0 ; X32-NEXT: .long __emutls_t.s1 -; X32 .section .rodata, +; X32: .section .rodata, ; X32-LABEL: __emutls_t.s1: ; X32-NEXT: .short 15 -; X32 .section .data.rel.local, +; X32: .section .data.rel.local, ; X32-LABEL: __emutls_v.b1: ; X32-NEXT: .long 1 ; X32-NEXT: .long 1 @@ -312,31 +312,31 @@ ;;;;;;;;;;;;;; 64-bit __emutls_v. and __emutls_t. -; X64 .section .data.rel.local, +; X64: .section .data.rel.local, ; X64-LABEL: __emutls_v.i1: ; X64-NEXT: .quad 4 ; X64-NEXT: .quad 4 ; X64-NEXT: .quad 0 ; X64-NEXT: .quad __emutls_t.i1 -; X64 .section .rodata, +; X64: .section .rodata, ; X64-LABEL: __emutls_t.i1: ; X64-NEXT: .long 15 ; X64-NOT: __emutls_v.i2 -; X64 .section .data.rel.local, +; X64: .section .data.rel.local, ; X64-LABEL: __emutls_v.i3: ; X64-NEXT: .quad 4 ; X64-NEXT: .quad 4 ; X64-NEXT: .quad 0 ; X64-NEXT: .quad __emutls_t.i3 -; X64 .section .rodata, +; X64: .section .rodata, ; X64-LABEL: __emutls_t.i3: ; X64-NEXT: .long 15 -; X64 .section .data.rel.local, +; X64: .section .data.rel.local, ; X64-LABEL: __emutls_v.i4: ; X64-NEXT: .L__emutls_v.i4$local: ; X64-NEXT: .quad 4 @@ -344,27 +344,27 @@ ; X64-NEXT: .quad 0 ; X64-NEXT: .quad __emutls_t.i4 -; X64 .section .rodata, +; X64: .section .rodata, ; X64-LABEL: __emutls_t.i4: ; X64-NEXT: .L__emutls_t.i4$local: ; X64-NEXT: .long 15 ; X64-NOT: __emutls_v.i5: -; X64 .hidden __emutls_v.i5 +; X64: .hidden __emutls_v.i5 ; X64-NOT: __emutls_v.i5: -; X64 .section .data.rel.local, +; X64: .section .data.rel.local, ; X64-LABEL: __emutls_v.s1: ; X64-NEXT: .quad 2 ; X64-NEXT: .quad 2 ; X64-NEXT: .quad 0 ; X64-NEXT: .quad __emutls_t.s1 -; X64 .section .rodata, +; X64: .section .rodata, ; X64-LABEL: __emutls_t.s1: ; X64-NEXT: .short 15 -; X64 .section .data.rel.local, +; X64: .section .data.rel.local, ; X64-LABEL: __emutls_v.b1: ; X64-NEXT: .quad 1 ; X64-NEXT: .quad 1 diff --git a/llvm/test/CodeGen/X86/linux-preemption.ll b/llvm/test/CodeGen/X86/linux-preemption.ll --- a/llvm/test/CodeGen/X86/linux-preemption.ll +++ b/llvm/test/CodeGen/X86/linux-preemption.ll @@ -73,9 +73,9 @@ define i32* @get_weak_preemptable_global() { ret i32* @weak_preemptable_global } -; CHECK ;ADD_LABEL_BACK; movq weak_preemptable_global@GOTPCREL(%rip), %rax -; STATIC ;ADD_LABEL_BACK; movq weak_preemptable_global@GOTPCREL, %rax -; CHECK32 ;ADD_LABEL_BACK; movl weak_preemptable_global@GOT(%eax), %eax +; CHECK: ADD_LABEL_BACK; movq weak_preemptable_global@GOTPCREL(%rip), %rax +; STATIC: ADD_LABEL_BACK; movq weak_preemptable_global@GOTPCREL, %rax +; CHECK32: ADD_LABEL_BACK; movl weak_preemptable_global@GOT(%eax), %eax @external_preemptable_global = external dso_preemptable global i32 define i32* @get_external_preemptable_global() { @@ -174,7 +174,7 @@ ret void()* @strong_local_function } ; COMMON: {{^}}strong_local_function: -; COMMON-NEXT .Lstrong_local_function: +; COMMON-NEXT: .Lstrong_local_function: ; CHECK: leaq .Lstrong_local_function$local(%rip), %rax ; STATIC: movl $.Lstrong_local_function$local, %eax ; CHECK32: leal .Lstrong_local_function$local@GOTOFF(%eax), %eax @@ -227,7 +227,7 @@ ; CHECK32: movl external_preemptable_function@GOT(%eax), %eax ; COMMON: {{^}}strong_local_global: -; COMMON-NEXT .Lstrong_local_global: +; COMMON-NEXT: .Lstrong_local_global: ; COMMON: .globl strong_default_alias ; COMMON-NEXT: .set strong_default_alias, aliasee diff --git a/llvm/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll b/llvm/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll --- a/llvm/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll +++ b/llvm/test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll @@ -13,33 +13,33 @@ %res = select i1 %cmp, fp128 %sub, fp128 0xL00000000000000000000000000000000 ret fp128 %res ; CHECK-LABEL: TestSelect: -; CHECK movaps 16(%rsp), %xmm1 -; CHECK-NEXT callq __subtf3 -; CHECK-NEXT testl %ebx, %ebx -; CHECK-NEXT jg .LBB0_2 -; CHECK-NEXT # %bb.1: -; CHECK-NEXT movaps .LCPI0_0(%rip), %xmm0 -; CHECK-NEXT .LBB0_2: -; CHECK-NEXT addq $32, %rsp -; CHECK-NEXT popq %rbx -; CHECK-NEXT retq +; CHECK: movaps 16(%rsp), %xmm1 +; CHECK-NEXT: callq __subtf3 +; CHECK-NEXT: testl %ebx, %ebx +; CHECK-NEXT: jg .LBB0_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: movaps .LCPI0_0(%rip), %xmm0 +; CHECK-NEXT: .LBB0_2: +; CHECK-NEXT: addq $32, %rsp +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq } define fp128 @TestFabs(fp128 %a) { %res = call fp128 @llvm.fabs.f128(fp128 %a) ret fp128 %res ; CHECK-LABEL: TestFabs: -; CHECK andps .LCPI1_0(%rip), %xmm0 -; CHECK-NEXT retq +; CHECK: andps .LCPI1_0(%rip), %xmm0 +; CHECK-NEXT: retq } define fp128 @TestCopysign(fp128 %a, fp128 %b) { %res = call fp128 @llvm.copysign.f128(fp128 %a, fp128 %b) ret fp128 %res ; CHECK-LABEL: TestCopysign: -; CHECK andps .LCPI2_1(%rip), %xmm0 -; CHECK-NEXT orps %xmm1, %xmm0 -; CHECK-NEXT retq +; CHECK: andps .LCPI2_1(%rip), %xmm0 +; CHECK-NEXT: orps %xmm1, %xmm0 +; CHECK-NEXT: retq } define fp128 @TestFneg(fp128 %a) { @@ -47,9 +47,9 @@ %res = fsub fp128 0xL00000000000000008000000000000000, %mul ret fp128 %res ; CHECK-LABEL: TestFneg: -; CHECK movaps %xmm0, %xmm1 -; CHECK-NEXT callq __multf3 -; CHECK-NEXT xorps .LCPI3_0(%rip), %xmm0 -; CHECK-NEXT popq %rax -; CHECK-NEXT retq +; CHECK: movaps %xmm0, %xmm1 +; CHECK-NEXT: callq __multf3 +; CHECK-NEXT: xorps .LCPI3_0(%rip), %xmm0 +; CHECK-NEXT: popq %rax +; CHECK-NEXT: retq } diff --git a/llvm/test/CodeGen/X86/stack-clash-medium-natural-probes-mutliple-objects.ll b/llvm/test/CodeGen/X86/stack-clash-medium-natural-probes-mutliple-objects.ll --- a/llvm/test/CodeGen/X86/stack-clash-medium-natural-probes-mutliple-objects.ll +++ b/llvm/test/CodeGen/X86/stack-clash-medium-natural-probes-mutliple-objects.ll @@ -8,15 +8,15 @@ ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT subq $4096, %rsp # imm = 0x1000 -; CHECK-NEXT .cfi_def_cfa_offset 5888 -; CHECK-NEXT movl $1, 2088(%rsp) -; CHECK-NEXT subq $1784, %rsp # imm = 0x6F8 -; CHECK-NEXT movl $2, 672(%rsp) -; CHECK-NEXT movl 1872(%rsp), %eax -; CHECK-NEXT addq $5880, %rsp # imm = 0x16F8 -; CHECK-NEXT .cfi_def_cfa_offset 8 -; CHECK-NEXT retq +; CHECK-NEXT: subq $4096, %rsp # imm = 0x1000 +; CHECK-NEXT: .cfi_def_cfa_offset 5888 +; CHECK-NEXT: movl $1, 2088(%rsp) +; CHECK-NEXT: subq $1784, %rsp # imm = 0x6F8 +; CHECK-NEXT: movl $2, 672(%rsp) +; CHECK-NEXT: movl 1872(%rsp), %eax +; CHECK-NEXT: addq $5880, %rsp # imm = 0x16F8 +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: retq %a = alloca i32, i64 1000, align 16 diff --git a/llvm/test/MC/Mips/mips32r2/valid.s b/llvm/test/MC/Mips/mips32r2/valid.s --- a/llvm/test/MC/Mips/mips32r2/valid.s +++ b/llvm/test/MC/Mips/mips32r2/valid.s @@ -210,7 +210,7 @@ or $2, 4 # CHECK: ori $2, $2, 4 # encoding: [0x34,0x42,0x00,0x04] pause # CHECK: pause # encoding: [0x00,0x00,0x01,0x40] # CHECK-NEXT: #