diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp @@ -1026,11 +1026,11 @@ unsigned Shift = AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm()); O << '#' << formatImm(Val); - if (Shift != 0) + if (Shift != 0) { printShifter(MI, OpNum + 1, STI, O); - - if (CommentStream) - *CommentStream << '=' << formatImm(Val << Shift) << '\n'; + if (CommentStream) + *CommentStream << '=' << formatImm(Val << Shift) << '\n'; + } } else { assert(MO.isExpr() && "Unexpected operand type!"); MO.getExpr()->print(O, &MAI); diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll @@ -65,7 +65,7 @@ ; ; CHECK-CAS-O0-LABEL: val_compare_and_swap: ; CHECK-CAS-O0: // %bb.0: -; CHECK-CAS-O0-NEXT: sub sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: sub sp, sp, #16 ; CHECK-CAS-O0-NEXT: .cfi_def_cfa_offset 16 ; CHECK-CAS-O0-NEXT: str x3, [sp, #8] // 8-byte Folded Spill ; CHECK-CAS-O0-NEXT: mov x1, x5 @@ -81,7 +81,7 @@ ; CHECK-CAS-O0-NEXT: mov v0.d[0], x9 ; CHECK-CAS-O0-NEXT: mov v0.d[1], x8 ; CHECK-CAS-O0-NEXT: str q0, [x0] -; CHECK-CAS-O0-NEXT: add sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: add sp, sp, #16 ; CHECK-CAS-O0-NEXT: ret %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire @@ -150,7 +150,7 @@ ; ; CHECK-CAS-O0-LABEL: val_compare_and_swap_monotonic_seqcst: ; CHECK-CAS-O0: // %bb.0: -; CHECK-CAS-O0-NEXT: sub sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: sub sp, sp, #16 ; CHECK-CAS-O0-NEXT: .cfi_def_cfa_offset 16 ; CHECK-CAS-O0-NEXT: str x3, [sp, #8] // 8-byte Folded Spill ; CHECK-CAS-O0-NEXT: mov x1, x5 @@ -166,7 +166,7 @@ ; CHECK-CAS-O0-NEXT: mov v0.d[0], x9 ; CHECK-CAS-O0-NEXT: mov v0.d[1], x8 ; CHECK-CAS-O0-NEXT: str q0, [x0] -; CHECK-CAS-O0-NEXT: add sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: add sp, sp, #16 ; CHECK-CAS-O0-NEXT: ret %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval monotonic seq_cst @@ -235,7 +235,7 @@ ; ; CHECK-CAS-O0-LABEL: val_compare_and_swap_release_acquire: ; CHECK-CAS-O0: // %bb.0: -; CHECK-CAS-O0-NEXT: sub sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: sub sp, sp, #16 ; CHECK-CAS-O0-NEXT: .cfi_def_cfa_offset 16 ; CHECK-CAS-O0-NEXT: str x3, [sp, #8] // 8-byte Folded Spill ; CHECK-CAS-O0-NEXT: mov x1, x5 @@ -251,7 +251,7 @@ ; CHECK-CAS-O0-NEXT: mov v0.d[0], x9 ; CHECK-CAS-O0-NEXT: mov v0.d[1], x8 ; CHECK-CAS-O0-NEXT: str q0, [x0] -; CHECK-CAS-O0-NEXT: add sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: add sp, sp, #16 ; CHECK-CAS-O0-NEXT: ret %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release acquire @@ -320,7 +320,7 @@ ; ; CHECK-CAS-O0-LABEL: val_compare_and_swap_monotonic: ; CHECK-CAS-O0: // %bb.0: -; CHECK-CAS-O0-NEXT: sub sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: sub sp, sp, #16 ; CHECK-CAS-O0-NEXT: .cfi_def_cfa_offset 16 ; CHECK-CAS-O0-NEXT: str x3, [sp, #8] // 8-byte Folded Spill ; CHECK-CAS-O0-NEXT: mov x1, x5 @@ -336,7 +336,7 @@ ; CHECK-CAS-O0-NEXT: mov v0.d[0], x9 ; CHECK-CAS-O0-NEXT: mov v0.d[1], x8 ; CHECK-CAS-O0-NEXT: str q0, [x0] -; CHECK-CAS-O0-NEXT: add sp, sp, #16 // =16 +; CHECK-CAS-O0-NEXT: add sp, sp, #16 ; CHECK-CAS-O0-NEXT: ret %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release acquire %val = extractvalue { i128, i1 } %pair, 0 @@ -373,7 +373,7 @@ ; ; CHECK-LLSC-O0-LABEL: atomic_load_relaxed: ; CHECK-LLSC-O0: // %bb.0: -; CHECK-LLSC-O0-NEXT: sub sp, sp, #64 // =64 +; CHECK-LLSC-O0-NEXT: sub sp, sp, #64 ; CHECK-LLSC-O0-NEXT: .cfi_def_cfa_offset 64 ; CHECK-LLSC-O0-NEXT: str x2, [sp, #48] // 8-byte Folded Spill ; CHECK-LLSC-O0-NEXT: str x3, [sp, #56] // 8-byte Folded Spill @@ -388,17 +388,17 @@ ; CHECK-LLSC-O0-NEXT: mov w10, #64 ; CHECK-LLSC-O0-NEXT: // kill: def $x10 killed $w10 ; CHECK-LLSC-O0-NEXT: str x10, [sp, #8] // 8-byte Folded Spill -; CHECK-LLSC-O0-NEXT: subs x16, x10, #64 // =64 -; CHECK-LLSC-O0-NEXT: subs x13, x8, #64 // =64 +; CHECK-LLSC-O0-NEXT: subs x16, x10, #64 +; CHECK-LLSC-O0-NEXT: subs x13, x8, #64 ; CHECK-LLSC-O0-NEXT: lsl x14, x15, x10 ; CHECK-LLSC-O0-NEXT: lsr x13, x15, x13 ; CHECK-LLSC-O0-NEXT: orr x13, x13, x12 ; CHECK-LLSC-O0-NEXT: lsl x15, x15, x16 -; CHECK-LLSC-O0-NEXT: subs x16, x10, #64 // =64 +; CHECK-LLSC-O0-NEXT: subs x16, x10, #64 ; CHECK-LLSC-O0-NEXT: csel x14, x14, x12, lo -; CHECK-LLSC-O0-NEXT: subs x16, x10, #64 // =64 +; CHECK-LLSC-O0-NEXT: subs x16, x10, #64 ; CHECK-LLSC-O0-NEXT: csel x13, x13, x15, lo -; CHECK-LLSC-O0-NEXT: subs x15, x10, #0 // =0 +; CHECK-LLSC-O0-NEXT: subs x15, x10, #0 ; CHECK-LLSC-O0-NEXT: csel x13, x12, x13, eq ; CHECK-LLSC-O0-NEXT: orr x9, x9, x14 ; CHECK-LLSC-O0-NEXT: orr x12, x12, x13 @@ -407,14 +407,14 @@ ; CHECK-LLSC-O0-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-LLSC-O0-NEXT: mov v0.d[1], x12 ; CHECK-LLSC-O0-NEXT: str q0, [sp, #32] // 16-byte Folded Spill -; CHECK-LLSC-O0-NEXT: subs x13, x10, #64 // =64 -; CHECK-LLSC-O0-NEXT: subs x8, x8, #64 // =64 +; CHECK-LLSC-O0-NEXT: subs x13, x10, #64 +; CHECK-LLSC-O0-NEXT: subs x8, x8, #64 ; CHECK-LLSC-O0-NEXT: lsl x8, x12, x8 ; CHECK-LLSC-O0-NEXT: orr x8, x8, x9, lsr #0 ; CHECK-LLSC-O0-NEXT: lsr x12, x12, x13 -; CHECK-LLSC-O0-NEXT: subs x13, x10, #64 // =64 +; CHECK-LLSC-O0-NEXT: subs x13, x10, #64 ; CHECK-LLSC-O0-NEXT: csel x8, x8, x12, lo -; CHECK-LLSC-O0-NEXT: subs x10, x10, #0 // =0 +; CHECK-LLSC-O0-NEXT: subs x10, x10, #0 ; CHECK-LLSC-O0-NEXT: csel x10, x9, x8, eq ; CHECK-LLSC-O0-NEXT: stxp w8, x9, x10, [x11] ; CHECK-LLSC-O0-NEXT: cbnz w8, .LBB4_1 @@ -423,12 +423,12 @@ ; CHECK-LLSC-O0-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-LLSC-O0-NEXT: ldr x8, [sp, #56] // 8-byte Folded Reload ; CHECK-LLSC-O0-NEXT: str q0, [x8] -; CHECK-LLSC-O0-NEXT: add sp, sp, #64 // =64 +; CHECK-LLSC-O0-NEXT: add sp, sp, #64 ; CHECK-LLSC-O0-NEXT: ret ; ; CHECK-CAS-O0-LABEL: atomic_load_relaxed: ; CHECK-CAS-O0: // %bb.0: -; CHECK-CAS-O0-NEXT: sub sp, sp, #64 // =64 +; CHECK-CAS-O0-NEXT: sub sp, sp, #64 ; CHECK-CAS-O0-NEXT: .cfi_def_cfa_offset 64 ; CHECK-CAS-O0-NEXT: str x2, [sp, #48] // 8-byte Folded Spill ; CHECK-CAS-O0-NEXT: str x3, [sp, #56] // 8-byte Folded Spill @@ -443,17 +443,17 @@ ; CHECK-CAS-O0-NEXT: mov w10, #64 ; CHECK-CAS-O0-NEXT: // kill: def $x10 killed $w10 ; CHECK-CAS-O0-NEXT: str x10, [sp, #8] // 8-byte Folded Spill -; CHECK-CAS-O0-NEXT: subs x16, x10, #64 // =64 -; CHECK-CAS-O0-NEXT: subs x13, x8, #64 // =64 +; CHECK-CAS-O0-NEXT: subs x16, x10, #64 +; CHECK-CAS-O0-NEXT: subs x13, x8, #64 ; CHECK-CAS-O0-NEXT: lsl x14, x15, x10 ; CHECK-CAS-O0-NEXT: lsr x13, x15, x13 ; CHECK-CAS-O0-NEXT: orr x13, x13, x12 ; CHECK-CAS-O0-NEXT: lsl x15, x15, x16 -; CHECK-CAS-O0-NEXT: subs x16, x10, #64 // =64 +; CHECK-CAS-O0-NEXT: subs x16, x10, #64 ; CHECK-CAS-O0-NEXT: csel x14, x14, x12, lo -; CHECK-CAS-O0-NEXT: subs x16, x10, #64 // =64 +; CHECK-CAS-O0-NEXT: subs x16, x10, #64 ; CHECK-CAS-O0-NEXT: csel x13, x13, x15, lo -; CHECK-CAS-O0-NEXT: subs x15, x10, #0 // =0 +; CHECK-CAS-O0-NEXT: subs x15, x10, #0 ; CHECK-CAS-O0-NEXT: csel x13, x12, x13, eq ; CHECK-CAS-O0-NEXT: orr x9, x9, x14 ; CHECK-CAS-O0-NEXT: orr x12, x12, x13 @@ -462,14 +462,14 @@ ; CHECK-CAS-O0-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-CAS-O0-NEXT: mov v0.d[1], x12 ; CHECK-CAS-O0-NEXT: str q0, [sp, #32] // 16-byte Folded Spill -; CHECK-CAS-O0-NEXT: subs x13, x10, #64 // =64 -; CHECK-CAS-O0-NEXT: subs x8, x8, #64 // =64 +; CHECK-CAS-O0-NEXT: subs x13, x10, #64 +; CHECK-CAS-O0-NEXT: subs x8, x8, #64 ; CHECK-CAS-O0-NEXT: lsl x8, x12, x8 ; CHECK-CAS-O0-NEXT: orr x8, x8, x9, lsr #0 ; CHECK-CAS-O0-NEXT: lsr x12, x12, x13 -; CHECK-CAS-O0-NEXT: subs x13, x10, #64 // =64 +; CHECK-CAS-O0-NEXT: subs x13, x10, #64 ; CHECK-CAS-O0-NEXT: csel x8, x8, x12, lo -; CHECK-CAS-O0-NEXT: subs x10, x10, #0 // =0 +; CHECK-CAS-O0-NEXT: subs x10, x10, #0 ; CHECK-CAS-O0-NEXT: csel x10, x9, x8, eq ; CHECK-CAS-O0-NEXT: stxp w8, x9, x10, [x11] ; CHECK-CAS-O0-NEXT: cbnz w8, .LBB4_1 @@ -478,7 +478,7 @@ ; CHECK-CAS-O0-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-CAS-O0-NEXT: ldr x8, [sp, #56] // 8-byte Folded Reload ; CHECK-CAS-O0-NEXT: str q0, [x8] -; CHECK-CAS-O0-NEXT: add sp, sp, #64 // =64 +; CHECK-CAS-O0-NEXT: add sp, sp, #64 ; CHECK-CAS-O0-NEXT: ret %r = load atomic i128, i128* %p monotonic, align 16 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll @@ -325,7 +325,7 @@ ; ; CHECK-NOLSE-O0-LABEL: fetch_and_nand: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: ldr w8, [x0] ; CHECK-NOLSE-O0-NEXT: str w8, [sp, #28] ; 4-byte Folded Spill @@ -357,7 +357,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB6_5 ; CHECK-NOLSE-O0-NEXT: LBB6_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: fetch_and_nand: @@ -375,7 +375,7 @@ ; ; CHECK-LSE-O0-LABEL: fetch_and_nand: ; CHECK-LSE-O0: ; %bb.0: -; CHECK-LSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-LSE-O0-NEXT: sub sp, sp, #32 ; CHECK-LSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-LSE-O0-NEXT: ldr w8, [x0] ; CHECK-LSE-O0-NEXT: str w8, [sp, #28] ; 4-byte Folded Spill @@ -396,7 +396,7 @@ ; CHECK-LSE-O0-NEXT: b LBB6_2 ; CHECK-LSE-O0-NEXT: LBB6_2: ; %atomicrmw.end ; CHECK-LSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-LSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-LSE-O0-NEXT: add sp, sp, #32 ; CHECK-LSE-O0-NEXT: ret %val = atomicrmw nand i32* %p, i32 7 release ret i32 %val @@ -418,7 +418,7 @@ ; ; CHECK-NOLSE-O0-LABEL: fetch_and_nand_64: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: ldr x8, [x0] ; CHECK-NOLSE-O0-NEXT: str x8, [sp, #24] ; 8-byte Folded Spill @@ -450,7 +450,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB7_5 ; CHECK-NOLSE-O0-NEXT: LBB7_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr x0, [sp, #8] ; 8-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: fetch_and_nand_64: @@ -468,7 +468,7 @@ ; ; CHECK-LSE-O0-LABEL: fetch_and_nand_64: ; CHECK-LSE-O0: ; %bb.0: -; CHECK-LSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-LSE-O0-NEXT: sub sp, sp, #32 ; CHECK-LSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-LSE-O0-NEXT: ldr x8, [x0] ; CHECK-LSE-O0-NEXT: str x8, [sp, #24] ; 8-byte Folded Spill @@ -489,7 +489,7 @@ ; CHECK-LSE-O0-NEXT: b LBB7_2 ; CHECK-LSE-O0-NEXT: LBB7_2: ; %atomicrmw.end ; CHECK-LSE-O0-NEXT: ldr x0, [sp, #8] ; 8-byte Folded Reload -; CHECK-LSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-LSE-O0-NEXT: add sp, sp, #32 ; CHECK-LSE-O0-NEXT: ret %val = atomicrmw nand i64* %p, i64 7 acq_rel ret i64 %val @@ -511,7 +511,7 @@ ; ; CHECK-NOLSE-O0-LABEL: fetch_and_or: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: ldr w8, [x0] ; CHECK-NOLSE-O0-NEXT: str w8, [sp, #28] ; 4-byte Folded Spill @@ -543,7 +543,7 @@ ; CHECK-NOLSE-O0-NEXT: LBB8_5 ; CHECK-NOLSE-O0-NEXT: LBB8_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: fetch_and_or: @@ -576,7 +576,7 @@ ; ; CHECK-NOLSE-O0-LABEL: fetch_and_or_64: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: ldr x8, [x0] ; CHECK-NOLSE-O0-NEXT: str x8, [sp, #24] ; 8-byte Folded Spill @@ -607,7 +607,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB9_5 ; CHECK-NOLSE-O0-NEXT: LBB9_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr x0, [sp, #8] ; 8-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: fetch_and_or_64: @@ -721,7 +721,7 @@ ; CHECK-NOLSE-O0-NEXT: add x8, x0, w1, sxtw ; CHECK-NOLSE-O0-NEXT: ldrb w8, [x8] ; CHECK-NOLSE-O0-NEXT: add w8, w8, w9, uxtb -; CHECK-NOLSE-O0-NEXT: subs x9, x0, #256 ; =256 +; CHECK-NOLSE-O0-NEXT: subs x9, x0, #256 ; CHECK-NOLSE-O0-NEXT: ldrb w9, [x9] ; CHECK-NOLSE-O0-NEXT: add w8, w8, w9, uxtb ; CHECK-NOLSE-O0-NEXT: add x9, x0, #291, lsl #12 ; =1191936 @@ -747,7 +747,7 @@ ; CHECK-LSE-O0-NEXT: add x8, x0, w1, sxtw ; CHECK-LSE-O0-NEXT: ldrb w8, [x8] ; CHECK-LSE-O0-NEXT: add w8, w8, w9, uxtb -; CHECK-LSE-O0-NEXT: subs x9, x0, #256 ; =256 +; CHECK-LSE-O0-NEXT: subs x9, x0, #256 ; CHECK-LSE-O0-NEXT: ldrb w9, [x9] ; CHECK-LSE-O0-NEXT: add w8, w8, w9, uxtb ; CHECK-LSE-O0-NEXT: add x9, x0, #291, lsl #12 ; =1191936 @@ -791,7 +791,7 @@ ; CHECK-NOLSE-O0-NEXT: add x8, x0, w1, sxtw #1 ; CHECK-NOLSE-O0-NEXT: ldrh w8, [x8] ; CHECK-NOLSE-O0-NEXT: add w8, w8, w9, uxth -; CHECK-NOLSE-O0-NEXT: subs x9, x0, #256 ; =256 +; CHECK-NOLSE-O0-NEXT: subs x9, x0, #256 ; CHECK-NOLSE-O0-NEXT: ldrh w9, [x9] ; CHECK-NOLSE-O0-NEXT: add w8, w8, w9, uxth ; CHECK-NOLSE-O0-NEXT: add x9, x0, #291, lsl #12 ; =1191936 @@ -817,7 +817,7 @@ ; CHECK-LSE-O0-NEXT: add x8, x0, w1, sxtw #1 ; CHECK-LSE-O0-NEXT: ldrh w8, [x8] ; CHECK-LSE-O0-NEXT: add w8, w8, w9, uxth -; CHECK-LSE-O0-NEXT: subs x9, x0, #256 ; =256 +; CHECK-LSE-O0-NEXT: subs x9, x0, #256 ; CHECK-LSE-O0-NEXT: ldrh w9, [x9] ; CHECK-LSE-O0-NEXT: add w8, w8, w9, uxth ; CHECK-LSE-O0-NEXT: add x9, x0, #291, lsl #12 ; =1191936 @@ -1350,7 +1350,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_add_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1385,7 +1385,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB27_5 ; CHECK-NOLSE-O0-NEXT: LBB27_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_add_i8: @@ -1416,7 +1416,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_xchg_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1450,7 +1450,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB28_5 ; CHECK-NOLSE-O0-NEXT: LBB28_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_xchg_i8: @@ -1481,7 +1481,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_sub_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1516,7 +1516,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB29_5 ; CHECK-NOLSE-O0-NEXT: LBB29_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_sub_i8: @@ -1549,7 +1549,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_and_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1584,7 +1584,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB30_5 ; CHECK-NOLSE-O0-NEXT: LBB30_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_and_i8: @@ -1617,7 +1617,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_or_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1652,7 +1652,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB31_5 ; CHECK-NOLSE-O0-NEXT: LBB31_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_or_i8: @@ -1683,7 +1683,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_xor_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1718,7 +1718,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB32_5 ; CHECK-NOLSE-O0-NEXT: LBB32_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_xor_i8: @@ -1751,7 +1751,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_min_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1789,7 +1789,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB33_5 ; CHECK-NOLSE-O0-NEXT: LBB33_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_min_i8: @@ -1822,7 +1822,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_max_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1860,7 +1860,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB34_5 ; CHECK-NOLSE-O0-NEXT: LBB34_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_max_i8: @@ -1893,7 +1893,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_umin_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -1931,7 +1931,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB35_5 ; CHECK-NOLSE-O0-NEXT: LBB35_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_umin_i8: @@ -1964,7 +1964,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_umax_i8: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2002,7 +2002,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB36_5 ; CHECK-NOLSE-O0-NEXT: LBB36_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_umax_i8: @@ -2033,7 +2033,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_add_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2068,7 +2068,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB37_5 ; CHECK-NOLSE-O0-NEXT: LBB37_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_add_i16: @@ -2099,7 +2099,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_xchg_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2133,7 +2133,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB38_5 ; CHECK-NOLSE-O0-NEXT: LBB38_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_xchg_i16: @@ -2164,7 +2164,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_sub_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2199,7 +2199,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB39_5 ; CHECK-NOLSE-O0-NEXT: LBB39_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_sub_i16: @@ -2232,7 +2232,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_and_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2267,7 +2267,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB40_5 ; CHECK-NOLSE-O0-NEXT: LBB40_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_and_i16: @@ -2300,7 +2300,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_or_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2335,7 +2335,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB41_5 ; CHECK-NOLSE-O0-NEXT: LBB41_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_or_i16: @@ -2366,7 +2366,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_xor_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2401,7 +2401,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB42_5 ; CHECK-NOLSE-O0-NEXT: LBB42_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_xor_i16: @@ -2434,7 +2434,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_min_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2472,7 +2472,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB43_5 ; CHECK-NOLSE-O0-NEXT: LBB43_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_min_i16: @@ -2505,7 +2505,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_max_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2543,7 +2543,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB44_5 ; CHECK-NOLSE-O0-NEXT: LBB44_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_max_i16: @@ -2576,7 +2576,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_umin_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2614,7 +2614,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB45_5 ; CHECK-NOLSE-O0-NEXT: LBB45_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_umin_i16: @@ -2647,7 +2647,7 @@ ; ; CHECK-NOLSE-O0-LABEL: atomicrmw_umax_i16: ; CHECK-NOLSE-O0: ; %bb.0: -; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: sub sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NOLSE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill ; CHECK-NOLSE-O0-NEXT: str w1, [sp, #24] ; 4-byte Folded Spill @@ -2685,7 +2685,7 @@ ; CHECK-NOLSE-O0-NEXT: b LBB46_5 ; CHECK-NOLSE-O0-NEXT: LBB46_5: ; %atomicrmw.end ; CHECK-NOLSE-O0-NEXT: ldr w0, [sp, #12] ; 4-byte Folded Reload -; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; =32 +; CHECK-NOLSE-O0-NEXT: add sp, sp, #32 ; CHECK-NOLSE-O0-NEXT: ret ; ; CHECK-LSE-O1-LABEL: atomicrmw_umax_i16: diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll @@ -6,7 +6,7 @@ define void @call_byval_i32(i32* %incoming) { ; CHECK-LABEL: call_byval_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -14,7 +14,7 @@ ; CHECK-NEXT: str w8, [sp] ; CHECK-NEXT: bl byval_i32 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret call void @byval_i32(i32* byval(i32) %incoming) ret void @@ -25,10 +25,10 @@ define void @call_byval_a64i32([64 x i32]* %incoming) { ; CHECK-LABEL: call_byval_a64i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #288 // =288 +; CHECK-NEXT: sub sp, sp, #288 ; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill ; CHECK-NEXT: str x28, [sp, #272] // 8-byte Folded Spill -; CHECK-NEXT: add x29, sp, #256 // =256 +; CHECK-NEXT: add x29, sp, #256 ; CHECK-NEXT: .cfi_def_cfa w29, 32 ; CHECK-NEXT: .cfi_offset w28, -16 ; CHECK-NEXT: .cfi_offset w30, -24 @@ -68,7 +68,7 @@ ; CHECK-NEXT: bl byval_a64i32 ; CHECK-NEXT: ldr x28, [sp, #272] // 8-byte Folded Reload ; CHECK-NEXT: ldp x29, x30, [sp, #256] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #288 // =288 +; CHECK-NEXT: add sp, sp, #288 ; CHECK-NEXT: ret call void @byval_a64i32([64 x i32]* byval([64 x i32]) %incoming) ret void diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll @@ -30,7 +30,7 @@ define i32 @test_musttail_variadic_spill(i32 %arg0, ...) { ; CHECK-LABEL: test_musttail_variadic_spill: ; CHECK: ; %bb.0: -; CHECK-NEXT: sub sp, sp, #224 ; =224 +; CHECK-NEXT: sub sp, sp, #224 ; CHECK-NEXT: stp x28, x27, [sp, #128] ; 16-byte Folded Spill ; CHECK-NEXT: stp x26, x25, [sp, #144] ; 16-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #160] ; 16-byte Folded Spill @@ -87,7 +87,7 @@ ; CHECK-NEXT: ldp x24, x23, [sp, #160] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x26, x25, [sp, #144] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x28, x27, [sp, #128] ; 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #224 ; =224 +; CHECK-NEXT: add sp, sp, #224 ; CHECK-NEXT: b _musttail_variadic_callee ; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1 call void @puts(i8* getelementptr ([4 x i8], [4 x i8]* @asdf, i32 0, i32 0)) @@ -102,7 +102,7 @@ define void @f_thunk(i8* %this, ...) { ; CHECK-LABEL: f_thunk: ; CHECK: ; %bb.0: -; CHECK-NEXT: sub sp, sp, #256 ; =256 +; CHECK-NEXT: sub sp, sp, #256 ; CHECK-NEXT: stp x28, x27, [sp, #160] ; 16-byte Folded Spill ; CHECK-NEXT: stp x26, x25, [sp, #176] ; 16-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #192] ; 16-byte Folded Spill @@ -123,8 +123,8 @@ ; CHECK-NEXT: .cfi_offset w27, -88 ; CHECK-NEXT: .cfi_offset w28, -96 ; CHECK-NEXT: mov x27, x8 -; CHECK-NEXT: add x8, sp, #128 ; =128 -; CHECK-NEXT: add x9, sp, #256 ; =256 +; CHECK-NEXT: add x8, sp, #128 +; CHECK-NEXT: add x9, sp, #256 ; CHECK-NEXT: mov x19, x0 ; CHECK-NEXT: mov x20, x1 ; CHECK-NEXT: mov x21, x2 @@ -159,7 +159,7 @@ ; CHECK-NEXT: ldp x24, x23, [sp, #192] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x26, x25, [sp, #176] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x28, x27, [sp, #160] ; 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #256 ; =256 +; CHECK-NEXT: add sp, sp, #256 ; CHECK-NEXT: br x9 %ap = alloca [4 x i8*], align 16 %ap_i8 = bitcast [4 x i8*]* %ap to i8* diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll b/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll @@ -67,12 +67,12 @@ define i8* @freeze_ptr() { ; CHECK-LABEL: freeze_ptr: ; CHECK: // %bb.0: -; CHECK-NEXT: add x0, x8, #4 // =4 +; CHECK-NEXT: add x0, x8, #4 ; CHECK-NEXT: ret ; ; GISEL-LABEL: freeze_ptr: ; GISEL: // %bb.0: -; GISEL-NEXT: add x0, x8, #4 // =4 +; GISEL-NEXT: add x0, x8, #4 ; GISEL-NEXT: ret %y1 = freeze i8* undef %t1 = getelementptr i8, i8* %y1, i64 4 diff --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll --- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll @@ -154,14 +154,14 @@ } ; CHECK-LABEL: novla_nodynamicrealign_nocall ; Check that space is reserved for one local variable on the stack. -; CHECK: sub sp, sp, #16 // =16 +; CHECK: sub sp, sp, #16 ; Check correct access to arguments passed on the stack, through stack pointer ; CHECK: ldr d[[DARG:[0-9]+]], [sp, #40] ; CHECK: ldr w[[IARG:[0-9]+]], [sp, #24] ; Check correct access to local variable on the stack, through stack pointer ; CHECK: ldr w[[ILOC:[0-9]+]], [sp, #12] ; Check epilogue: -; CHECK: add sp, sp, #16 // =16 +; CHECK: add sp, sp, #16 ; CHECK: ret @@ -394,7 +394,7 @@ ; bytes & the base pointer (x19) gets initialized to ; this 128-byte aligned area for local variables & ; spill slots -; CHECK: sub x9, sp, #80 // =80 +; CHECK: sub x9, sp, #80 ; CHECK: and sp, x9, #0xffffffffffffff80 ; CHECK: mov x19, sp ; Check correctness of cfi pseudo-instructions @@ -688,7 +688,7 @@ ; CHECK-LABEL: realign_conditional2 ; Extra realignment in the prologue (performance issue). ; CHECK: tbz {{.*}} .[[LABEL:.*]] -; CHECK: sub x9, sp, #32 // =32 +; CHECK: sub x9, sp, #32 ; CHECK: and sp, x9, #0xffffffffffffffe0 ; CHECK: mov x19, sp ; Stack is realigned in a non-entry BB. diff --git a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll --- a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll @@ -23,7 +23,7 @@ ; CHECK-LE-LABEL: test1: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ld1 { v0.h }[0], [x0] -; CHECK-LE-NEXT: add x8, x0, #2 // =2 +; CHECK-LE-NEXT: add x8, x0, #2 ; CHECK-LE-NEXT: ld1 { v0.h }[2], [x8] ; CHECK-LE-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-LE-NEXT: ret @@ -31,7 +31,7 @@ ; CHECK-BE-LABEL: test1: ; CHECK-BE: // %bb.0: ; CHECK-BE-NEXT: ld1 { v0.h }[0], [x0] -; CHECK-BE-NEXT: add x8, x0, #2 // =2 +; CHECK-BE-NEXT: add x8, x0, #2 ; CHECK-BE-NEXT: ld1 { v0.h }[2], [x8] ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret @@ -67,7 +67,7 @@ ; CHECK-LE-LABEL: test3: ; CHECK-LE: // %bb.0: ; CHECK-LE-NEXT: ld1 { v0.b }[0], [x0] -; CHECK-LE-NEXT: add x8, x0, #1 // =1 +; CHECK-LE-NEXT: add x8, x0, #1 ; CHECK-LE-NEXT: ld1 { v0.b }[4], [x8] ; CHECK-LE-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-LE-NEXT: ret @@ -75,7 +75,7 @@ ; CHECK-BE-LABEL: test3: ; CHECK-BE: // %bb.0: ; CHECK-BE-NEXT: ld1 { v0.b }[0], [x0] -; CHECK-BE-NEXT: add x8, x0, #1 // =1 +; CHECK-BE-NEXT: add x8, x0, #1 ; CHECK-BE-NEXT: ld1 { v0.b }[4], [x8] ; CHECK-BE-NEXT: rev64 v0.2s, v0.2s ; CHECK-BE-NEXT: ret @@ -446,25 +446,25 @@ define <4 x i8> @bitcast(i32 %0) { ; CHECK-LE-LABEL: bitcast: ; CHECK-LE: // %bb.0: -; CHECK-LE-NEXT: sub sp, sp, #16 // =16 +; CHECK-LE-NEXT: sub sp, sp, #16 ; CHECK-LE-NEXT: .cfi_def_cfa_offset 16 ; CHECK-LE-NEXT: str w0, [sp, #12] ; CHECK-LE-NEXT: ldr s0, [sp, #12] ; CHECK-LE-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-LE-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-LE-NEXT: add sp, sp, #16 // =16 +; CHECK-LE-NEXT: add sp, sp, #16 ; CHECK-LE-NEXT: ret ; ; CHECK-BE-LABEL: bitcast: ; CHECK-BE: // %bb.0: -; CHECK-BE-NEXT: sub sp, sp, #16 // =16 +; CHECK-BE-NEXT: sub sp, sp, #16 ; CHECK-BE-NEXT: .cfi_def_cfa_offset 16 ; CHECK-BE-NEXT: str w0, [sp, #12] ; CHECK-BE-NEXT: ldr s0, [sp, #12] ; CHECK-BE-NEXT: rev32 v0.8b, v0.8b ; CHECK-BE-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-BE-NEXT: rev64 v0.4h, v0.4h -; CHECK-BE-NEXT: add sp, sp, #16 // =16 +; CHECK-BE-NEXT: add sp, sp, #16 ; CHECK-BE-NEXT: ret %2 = bitcast i32 %0 to <4 x i8> ret <4 x i8> %2 diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll --- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll @@ -13,8 +13,8 @@ ; CHECK-NEXT: add x9, x2, w0, uxtw #1 ; CHECK-NEXT: ldp d1, d2, [x9] ; CHECK-NEXT: add x9, x1, w0, uxtw #2 -; CHECK-NEXT: subs x8, x8, #8 // =8 -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: subs x8, x8, #8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: umull v1.4s, v0.4h, v1.4h ; CHECK-NEXT: umull v2.4s, v0.4h, v2.4h ; CHECK-NEXT: stp q1, q2, [x9] @@ -77,8 +77,8 @@ ; CHECK-NEXT: add x9, x2, w0, sxtw #1 ; CHECK-NEXT: ldp d1, d2, [x9] ; CHECK-NEXT: add x9, x1, w0, sxtw #2 -; CHECK-NEXT: subs x8, x8, #8 // =8 -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: subs x8, x8, #8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: smull v1.4s, v0.4h, v1.4h ; CHECK-NEXT: smull v2.4s, v0.4h, v2.4h ; CHECK-NEXT: stp q1, q2, [x9] @@ -141,11 +141,11 @@ ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldrh w9, [x2], #16 ; CHECK-NEXT: mov w10, w0 -; CHECK-NEXT: subs x8, x8, #8 // =8 +; CHECK-NEXT: subs x8, x8, #8 ; CHECK-NEXT: lsl x10, x10, #2 ; CHECK-NEXT: dup v1.4h, w9 ; CHECK-NEXT: umull v1.4s, v0.4h, v1.4h -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: str q1, [x1, x10] ; CHECK-NEXT: b.ne .LBB2_1 ; CHECK-NEXT: // %bb.2: // %for.end12 diff --git a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll --- a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll @@ -29,7 +29,7 @@ ; CHECK-O2-NEXT: .LBB0_3: // %if.end ; CHECK-O2-NEXT: adrp x9, global_int ; CHECK-O2-NEXT: ldr w1, [x9, :lo12:global_int] -; CHECK-O2-NEXT: add x2, x8, #16 // =16 +; CHECK-O2-NEXT: add x2, x8, #16 ; CHECK-O2-NEXT: mov w0, #10 ; CHECK-O2-NEXT: b externalfunc ; @@ -44,14 +44,14 @@ ; CHECK-O3-NEXT: ldr x8, [x8, :lo12:global_ptr] ; CHECK-O3-NEXT: adrp x9, global_int ; CHECK-O3-NEXT: ldr w1, [x9, :lo12:global_int] -; CHECK-O3-NEXT: add x2, x8, #16 // =16 +; CHECK-O3-NEXT: add x2, x8, #16 ; CHECK-O3-NEXT: mov w0, #10 ; CHECK-O3-NEXT: b externalfunc ; CHECK-O3-NEXT: .LBB0_2: ; CHECK-O3-NEXT: mov x8, xzr ; CHECK-O3-NEXT: adrp x9, global_int ; CHECK-O3-NEXT: ldr w1, [x9, :lo12:global_int] -; CHECK-O3-NEXT: add x2, x8, #16 // =16 +; CHECK-O3-NEXT: add x2, x8, #16 ; CHECK-O3-NEXT: mov w0, #10 ; CHECK-O3-NEXT: b externalfunc entry: diff --git a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll --- a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll +++ b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll @@ -4,9 +4,9 @@ define win64cc void @pass_va(i32 %count, ...) nounwind { ; CHECK-LABEL: pass_va: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sub sp, sp, #96 // =96 -; CHECK-NEXT: add x8, sp, #40 // =40 -; CHECK-NEXT: add x0, sp, #40 // =40 +; CHECK-NEXT: sub sp, sp, #96 +; CHECK-NEXT: add x8, sp, #40 +; CHECK-NEXT: add x0, sp, #40 ; CHECK-NEXT: stp x30, x18, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x1, x2, [sp, #40] ; CHECK-NEXT: stp x3, x4, [sp, #56] @@ -15,7 +15,7 @@ ; CHECK-NEXT: str x8, [sp, #8] ; CHECK-NEXT: bl other_func ; CHECK-NEXT: ldp x30, x18, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #96 // =96 +; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret entry: %ap = alloca i8*, align 8 @@ -35,8 +35,8 @@ ; CHECK-LABEL: f9: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x18, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: add x8, sp, #24 // =24 -; CHECK-NEXT: add x0, sp, #24 // =24 +; CHECK-NEXT: add x8, sp, #24 +; CHECK-NEXT: add x0, sp, #24 ; CHECK-NEXT: str x8, [sp, #8] ; CHECK-NEXT: ldr x18, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret @@ -52,8 +52,8 @@ ; CHECK-LABEL: f8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x18, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: add x8, sp, #16 // =16 -; CHECK-NEXT: add x0, sp, #16 // =16 +; CHECK-NEXT: add x8, sp, #16 +; CHECK-NEXT: add x0, sp, #16 ; CHECK-NEXT: str x8, [sp, #8] ; CHECK-NEXT: ldr x18, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret @@ -69,9 +69,9 @@ ; CHECK-LABEL: f7: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x18, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: add x8, sp, #24 // =24 +; CHECK-NEXT: add x8, sp, #24 ; CHECK-NEXT: str x7, [sp, #24] -; CHECK-NEXT: add x0, sp, #24 // =24 +; CHECK-NEXT: add x0, sp, #24 ; CHECK-NEXT: str x8, [sp, #8] ; CHECK-NEXT: ldr x18, [sp], #32 // 8-byte Folded Reload ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll --- a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll +++ b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll @@ -9,7 +9,7 @@ define i32 @add_const_add_const(i32 %arg) { ; CHECK-LABEL: add_const_add_const: ; CHECK: // %bb.0: -; CHECK-NEXT: add w0, w0, #10 // =10 +; CHECK-NEXT: add w0, w0, #10 ; CHECK-NEXT: ret %t0 = add i32 %arg, 8 %t1 = add i32 %t0, 2 @@ -24,9 +24,9 @@ ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: bl use -; CHECK-NEXT: add w0, w19, #10 // =10 +; CHECK-NEXT: add w0, w19, #10 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret %t0 = add i32 %arg, 8 @@ -49,7 +49,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_add_const_add_const_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -61,7 +61,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: movi v0.4s, #10 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = add <4 x i32> %arg, call void @vec_use(<4 x i32> %t0) @@ -86,7 +86,7 @@ define i32 @add_const_sub_const(i32 %arg) { ; CHECK-LABEL: add_const_sub_const: ; CHECK: // %bb.0: -; CHECK-NEXT: add w0, w0, #6 // =6 +; CHECK-NEXT: add w0, w0, #6 ; CHECK-NEXT: ret %t0 = add i32 %arg, 8 %t1 = sub i32 %t0, 2 @@ -101,9 +101,9 @@ ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: bl use -; CHECK-NEXT: add w0, w19, #6 // =6 +; CHECK-NEXT: add w0, w19, #6 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret %t0 = add i32 %arg, 8 @@ -126,7 +126,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_add_const_sub_const_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -138,7 +138,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: movi v0.4s, #6 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = add <4 x i32> %arg, call void @vec_use(<4 x i32> %t0) @@ -179,7 +179,7 @@ ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: bl use ; CHECK-NEXT: mov w8, #-6 ; CHECK-NEXT: sub w0, w8, w19 @@ -205,7 +205,7 @@ define <4 x i32> @vec_add_const_const_sub_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_add_const_const_sub_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -217,7 +217,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: mvni v0.4s, #5 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = add <4 x i32> %arg, call void @vec_use(<4 x i32> %t0) @@ -242,7 +242,7 @@ define i32 @sub_const_add_const(i32 %arg) { ; CHECK-LABEL: sub_const_add_const: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w0, w0, #6 // =6 +; CHECK-NEXT: sub w0, w0, #6 ; CHECK-NEXT: ret %t0 = sub i32 %arg, 8 %t1 = add i32 %t0, 2 @@ -257,9 +257,9 @@ ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: sub w0, w0, #8 // =8 +; CHECK-NEXT: sub w0, w0, #8 ; CHECK-NEXT: bl use -; CHECK-NEXT: sub w0, w19, #6 // =6 +; CHECK-NEXT: sub w0, w19, #6 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret %t0 = sub i32 %arg, 8 @@ -282,7 +282,7 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_sub_const_add_const_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -294,7 +294,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: mvni v0.4s, #5 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = sub <4 x i32> %arg, call void @vec_use(<4 x i32> %t0) @@ -319,7 +319,7 @@ define i32 @sub_const_sub_const(i32 %arg) { ; CHECK-LABEL: sub_const_sub_const: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w0, w0, #10 // =10 +; CHECK-NEXT: sub w0, w0, #10 ; CHECK-NEXT: ret %t0 = sub i32 %arg, 8 %t1 = sub i32 %t0, 2 @@ -334,9 +334,9 @@ ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: sub w0, w0, #8 // =8 +; CHECK-NEXT: sub w0, w0, #8 ; CHECK-NEXT: bl use -; CHECK-NEXT: sub w0, w19, #10 // =10 +; CHECK-NEXT: sub w0, w19, #10 ; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret %t0 = sub i32 %arg, 8 @@ -359,7 +359,7 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_sub_const_sub_const_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -371,7 +371,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: movi v0.4s, #10 ; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = sub <4 x i32> %arg, call void @vec_use(<4 x i32> %t0) @@ -412,7 +412,7 @@ ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: sub w0, w0, #8 // =8 +; CHECK-NEXT: sub w0, w0, #8 ; CHECK-NEXT: bl use ; CHECK-NEXT: mov w8, #10 ; CHECK-NEXT: sub w0, w8, w19 @@ -438,7 +438,7 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_sub_const_const_sub_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -450,7 +450,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: movi v0.4s, #2 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = sub <4 x i32> %arg, call void @vec_use(<4 x i32> %t0) @@ -518,7 +518,7 @@ define <4 x i32> @vec_const_sub_add_const_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_const_sub_add_const_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -530,7 +530,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: movi v0.4s, #10 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg call void @vec_use(<4 x i32> %t0) @@ -598,7 +598,7 @@ define <4 x i32> @vec_const_sub_sub_const_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_const_sub_sub_const_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -610,7 +610,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: movi v0.4s, #6 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg call void @vec_use(<4 x i32> %t0) @@ -635,7 +635,7 @@ define i32 @const_sub_const_sub(i32 %arg) { ; CHECK-LABEL: const_sub_const_sub: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w0, w0, #6 // =6 +; CHECK-NEXT: sub w0, w0, #6 ; CHECK-NEXT: ret %t0 = sub i32 8, %arg %t1 = sub i32 2, %t0 @@ -677,7 +677,7 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) { ; CHECK-LABEL: vec_const_sub_const_sub_extrause: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -689,7 +689,7 @@ ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-NEXT: movi v0.4s, #2 ; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg call void @vec_use(<4 x i32> %t0) diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll --- a/llvm/test/CodeGen/AArch64/addsub.ll +++ b/llvm/test/CodeGen/AArch64/addsub.ll @@ -19,8 +19,8 @@ ; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64] ; CHECK-NEXT: ldr w10, [x8] ; CHECK-NEXT: ldr x11, [x9] -; CHECK-NEXT: add w10, w10, #4095 // =4095 -; CHECK-NEXT: add x11, x11, #52 // =52 +; CHECK-NEXT: add w10, w10, #4095 +; CHECK-NEXT: add x11, x11, #52 ; CHECK-NEXT: str w10, [x8] ; CHECK-NEXT: str x11, [x9] ; CHECK-NEXT: ret @@ -50,7 +50,7 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldrb w8, [x0] ; CHECK-NEXT: add w9, w8, w2 -; CHECK-NEXT: add x8, x8, #12 // =12 +; CHECK-NEXT: add x8, x8, #12 ; CHECK-NEXT: str w9, [x3] ; CHECK-NEXT: str x8, [x1] ; CHECK-NEXT: ret @@ -105,8 +105,8 @@ ; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64] ; CHECK-NEXT: ldr w10, [x8] ; CHECK-NEXT: ldr x11, [x9] -; CHECK-NEXT: sub w10, w10, #4095 // =4095 -; CHECK-NEXT: sub x11, x11, #52 // =52 +; CHECK-NEXT: sub w10, w10, #4095 +; CHECK-NEXT: sub x11, x11, #52 ; CHECK-NEXT: str w10, [x8] ; CHECK-NEXT: str x11, [x9] ; CHECK-NEXT: ret @@ -155,33 +155,33 @@ ; CHECK-NEXT: adrp x8, :got:var_i32 ; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32] ; CHECK-NEXT: ldr w9, [x8] -; CHECK-NEXT: cmp w9, #4095 // =4095 +; CHECK-NEXT: cmp w9, #4095 ; CHECK-NEXT: b.ne .LBB5_6 ; CHECK-NEXT: // %bb.1: // %test2 ; CHECK-NEXT: adrp x10, :got:var2_i32 ; CHECK-NEXT: ldr x10, [x10, :got_lo12:var2_i32] -; CHECK-NEXT: add w11, w9, #1 // =1 +; CHECK-NEXT: add w11, w9, #1 ; CHECK-NEXT: str w11, [x8] ; CHECK-NEXT: ldr w10, [x10] ; CHECK-NEXT: cmp w10, #3567, lsl #12 // =14610432 ; CHECK-NEXT: b.lo .LBB5_6 ; CHECK-NEXT: // %bb.2: // %test3 -; CHECK-NEXT: add w11, w9, #2 // =2 -; CHECK-NEXT: cmp w9, #123 // =123 +; CHECK-NEXT: add w11, w9, #2 +; CHECK-NEXT: cmp w9, #123 ; CHECK-NEXT: str w11, [x8] ; CHECK-NEXT: b.lt .LBB5_6 ; CHECK-NEXT: // %bb.3: // %test4 -; CHECK-NEXT: add w11, w9, #3 // =3 -; CHECK-NEXT: cmp w10, #321 // =321 +; CHECK-NEXT: add w11, w9, #3 +; CHECK-NEXT: cmp w10, #321 ; CHECK-NEXT: str w11, [x8] ; CHECK-NEXT: b.gt .LBB5_6 ; CHECK-NEXT: // %bb.4: // %test5 -; CHECK-NEXT: add w11, w9, #4 // =4 -; CHECK-NEXT: cmn w10, #443 // =443 +; CHECK-NEXT: add w11, w9, #4 +; CHECK-NEXT: cmn w10, #443 ; CHECK-NEXT: str w11, [x8] ; CHECK-NEXT: b.ge .LBB5_6 ; CHECK-NEXT: // %bb.5: // %test6 -; CHECK-NEXT: add w9, w9, #5 // =5 +; CHECK-NEXT: add w9, w9, #5 ; CHECK-NEXT: str w9, [x8] ; CHECK-NEXT: .LBB5_6: // %common.ret ; CHECK-NEXT: ret @@ -232,7 +232,7 @@ ; CHECK-NEXT: mvn w8, w0 ; CHECK-NEXT: adds w8, w8, w1 ; CHECK-NEXT: cset w0, vs -; CHECK-NEXT: add w8, w8, #1 // =1 +; CHECK-NEXT: add w8, w8, #1 ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: ret %nota = xor i32 %a, -1 @@ -253,7 +253,7 @@ ; CHECK-NEXT: and w8, w8, #0xff ; CHECK-NEXT: add w8, w8, w1, uxtb ; CHECK-NEXT: lsr w0, w8, #8 -; CHECK-NEXT: add w8, w8, #1 // =1 +; CHECK-NEXT: add w8, w8, #1 ; CHECK-NEXT: strb w8, [x2] ; CHECK-NEXT: ret %nota = xor i8 %a, -1 diff --git a/llvm/test/CodeGen/AArch64/align-down.ll b/llvm/test/CodeGen/AArch64/align-down.ll --- a/llvm/test/CodeGen/AArch64/align-down.ll +++ b/llvm/test/CodeGen/AArch64/align-down.ll @@ -55,7 +55,7 @@ ; CHECK-LABEL: t3_extrause0: ; CHECK: // %bb.0: ; CHECK-NEXT: neg w9, w1 -; CHECK-NEXT: sub w8, w1, #1 // =1 +; CHECK-NEXT: sub w8, w1, #1 ; CHECK-NEXT: and w0, w0, w9 ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define i32 @n4_extrause1(i32 %ptr, i32 %alignment, i32* %bias_storage) nounwind { ; CHECK-LABEL: n4_extrause1: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, #1 // =1 +; CHECK-NEXT: sub w8, w1, #1 ; CHECK-NEXT: and w8, w0, w8 ; CHECK-NEXT: sub w0, w0, w8 ; CHECK-NEXT: str w8, [x2] @@ -82,7 +82,7 @@ define i32 @n5_extrause2(i32 %ptr, i32 %alignment, i32* %mask_storage, i32* %bias_storage) nounwind { ; CHECK-LABEL: n5_extrause2: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, #1 // =1 +; CHECK-NEXT: sub w8, w1, #1 ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: and w8, w0, w8 ; CHECK-NEXT: sub w0, w0, w8 @@ -101,7 +101,7 @@ define i32 @n6_different_ptrs(i32 %ptr0, i32 %ptr1, i32 %alignment) nounwind { ; CHECK-LABEL: n6_different_ptrs: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, #1 // =1 +; CHECK-NEXT: sub w8, w2, #1 ; CHECK-NEXT: and w8, w1, w8 ; CHECK-NEXT: sub w0, w0, w8 ; CHECK-NEXT: ret @@ -113,7 +113,7 @@ define i32 @n7_different_ptrs_commutative(i32 %ptr0, i32 %ptr1, i32 %alignment) nounwind { ; CHECK-LABEL: n7_different_ptrs_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, #1 // =1 +; CHECK-NEXT: sub w8, w2, #1 ; CHECK-NEXT: and w8, w8, w1 ; CHECK-NEXT: sub w0, w0, w8 ; CHECK-NEXT: ret @@ -126,7 +126,7 @@ define i32 @n8_not_lowbit_mask(i32 %ptr, i32 %alignment) nounwind { ; CHECK-LABEL: n8_not_lowbit_mask: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w1, #1 // =1 +; CHECK-NEXT: add w8, w1, #1 ; CHECK-NEXT: bic w0, w0, w8 ; CHECK-NEXT: ret %mask = add i32 %alignment, 1 ; not -1 @@ -138,7 +138,7 @@ define i32 @n9_sub_is_not_commutative(i32 %ptr, i32 %alignment) nounwind { ; CHECK-LABEL: n9_sub_is_not_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, #1 // =1 +; CHECK-NEXT: sub w8, w1, #1 ; CHECK-NEXT: and w8, w0, w8 ; CHECK-NEXT: sub w0, w8, w0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll --- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -7,7 +7,7 @@ define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp { ; CHECK-LABEL: fn9: ; CHECK: ; %bb.0: -; CHECK-NEXT: sub sp, sp, #64 ; =64 +; CHECK-NEXT: sub sp, sp, #64 ; CHECK-NEXT: ldr w8, [sp, #64] ; CHECK-NEXT: stp w2, w1, [sp, #52] ; CHECK-NEXT: stp w4, w3, [sp, #44] @@ -17,12 +17,12 @@ ; CHECK-NEXT: ldr w9, [sp, #72] ; CHECK-NEXT: ldr w8, [sp, #80] ; CHECK-NEXT: stp w8, w9, [sp, #16] -; CHECK-NEXT: add x8, sp, #72 ; =72 -; CHECK-NEXT: add x8, x8, #24 ; =24 +; CHECK-NEXT: add x8, sp, #72 +; CHECK-NEXT: add x8, x8, #24 ; CHECK-NEXT: str x8, [sp, #24] ; CHECK-NEXT: ldr w8, [sp, #88] ; CHECK-NEXT: str w8, [sp, #12] -; CHECK-NEXT: add sp, sp, #64 ; =64 +; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret %1 = alloca i32, align 4 %2 = alloca i32, align 4 @@ -62,7 +62,7 @@ define i32 @main() nounwind ssp { ; CHECK-LABEL: main: ; CHECK: ; %bb.0: -; CHECK-NEXT: sub sp, sp, #96 ; =96 +; CHECK-NEXT: sub sp, sp, #96 ; CHECK-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: mov w8, #2 @@ -85,7 +85,7 @@ ; CHECK-NEXT: stp x10, x11, [sp, #16] ; CHECK-NEXT: str x9, [sp, #8] ; CHECK-NEXT: str w8, [sp] -; CHECK-NEXT: add x0, sp, #76 ; =76 +; CHECK-NEXT: add x0, sp, #76 ; CHECK-NEXT: mov w1, #2 ; CHECK-NEXT: mov w2, #3 ; CHECK-NEXT: mov w3, #4 @@ -96,7 +96,7 @@ ; CHECK-NEXT: bl _fn9 ; CHECK-NEXT: mov w0, #0 ; CHECK-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #96 ; =96 +; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret %a1 = alloca i32, align 4 %a2 = alloca i32, align 4 @@ -143,13 +143,13 @@ define void @foo(i8* %fmt, ...) nounwind { ; CHECK-LABEL: foo: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: sub sp, sp, #48 ; =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: ldr w8, [sp, #48] ; CHECK-NEXT: str w8, [sp, #28] -; CHECK-NEXT: add x8, sp, #48 ; =48 -; CHECK-NEXT: add x8, x8, #23 ; =23 +; CHECK-NEXT: add x8, sp, #48 +; CHECK-NEXT: add x8, x8, #23 ; CHECK-NEXT: and x8, x8, #0xfffffffffffffff0 -; CHECK-NEXT: add x9, x8, #16 ; =16 +; CHECK-NEXT: add x9, x8, #16 ; CHECK-NEXT: stp x9, x0, [sp, #32] ; CHECK-NEXT: ldr q0, [x8] ; CHECK-NEXT: str q0, [sp], #48 @@ -172,7 +172,7 @@ define void @bar(i32 %x, <4 x i32> %y) nounwind { ; CHECK-LABEL: bar: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: sub sp, sp, #80 ; =80 +; CHECK-NEXT: sub sp, sp, #80 ; CHECK-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill ; CHECK-NEXT: ; kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: str w0, [sp, #60] @@ -184,7 +184,7 @@ ; CHECK-NEXT: add x0, x0, l_.str@PAGEOFF ; CHECK-NEXT: bl _foo ; CHECK-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #80 ; =80 +; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret ; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1 entry: @@ -205,13 +205,13 @@ define void @foo2(i8* %fmt, ...) nounwind { ; CHECK-LABEL: foo2: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: sub sp, sp, #48 ; =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: ldr w8, [sp, #48] ; CHECK-NEXT: str w8, [sp, #28] -; CHECK-NEXT: add x8, sp, #48 ; =48 -; CHECK-NEXT: add x8, x8, #23 ; =23 +; CHECK-NEXT: add x8, sp, #48 +; CHECK-NEXT: add x8, x8, #23 ; CHECK-NEXT: and x8, x8, #0xfffffffffffffff0 -; CHECK-NEXT: add x9, x8, #16 ; =16 +; CHECK-NEXT: add x9, x8, #16 ; CHECK-NEXT: stp x9, x0, [sp, #32] ; CHECK-NEXT: ldr q0, [x8] ; CHECK-NEXT: str q0, [sp], #48 @@ -244,7 +244,7 @@ define void @bar2(i32 %x, i128 %s41.coerce) nounwind { ; CHECK-LABEL: bar2: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: sub sp, sp, #80 ; =80 +; CHECK-NEXT: sub sp, sp, #80 ; CHECK-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill ; CHECK-NEXT: ; kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: str w0, [sp, #60] @@ -257,7 +257,7 @@ ; CHECK-NEXT: add x0, x0, l_.str@PAGEOFF ; CHECK-NEXT: bl _foo2 ; CHECK-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #80 ; =80 +; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret ; CHECK-NEXT: .loh AdrpAdd Lloh2, Lloh3 entry: diff --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll --- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll @@ -306,7 +306,7 @@ ; CHECK-NEXT: cmp x8, x3 ; CHECK-NEXT: cset w11, le ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x10, x8, x3, ne ; CHECK-NEXT: csel x11, x9, x2, ne ; CHECK-NEXT: stlxp w12, x11, x10, [x0] @@ -333,7 +333,7 @@ ; CHECK-NEXT: cmp x8, x3 ; CHECK-NEXT: cset w11, gt ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x10, x8, x3, ne ; CHECK-NEXT: csel x11, x9, x2, ne ; CHECK-NEXT: stlxp w12, x11, x10, [x0] @@ -360,7 +360,7 @@ ; CHECK-NEXT: cmp x8, x3 ; CHECK-NEXT: cset w11, ls ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x10, x8, x3, ne ; CHECK-NEXT: csel x11, x9, x2, ne ; CHECK-NEXT: stlxp w12, x11, x10, [x0] @@ -387,7 +387,7 @@ ; CHECK-NEXT: cmp x8, x3 ; CHECK-NEXT: cset w11, hi ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x10, x8, x3, ne ; CHECK-NEXT: csel x11, x9, x2, ne ; CHECK-NEXT: stlxp w12, x11, x10, [x0] diff --git a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll --- a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll +++ b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll @@ -813,7 +813,7 @@ ; LLC-NEXT: adrp x9, first_ones ; LLC-NEXT: add x9, x9, :lo12:first_ones ; LLC-NEXT: ldrb w8, [x9, x8] -; LLC-NEXT: add w0, w8, #16 // =16 +; LLC-NEXT: add w0, w8, #16 ; LLC-NEXT: ret ; LLC-NEXT: .LBB26_4: // %if.end13 ; LLC-NEXT: ubfx x8, x0, #16, #16 @@ -822,7 +822,7 @@ ; LLC-NEXT: adrp x9, first_ones ; LLC-NEXT: add x9, x9, :lo12:first_ones ; LLC-NEXT: ldrb w8, [x9, x8] -; LLC-NEXT: add w0, w8, #32 // =32 +; LLC-NEXT: add w0, w8, #32 ; LLC-NEXT: ret ; LLC-NEXT: .LBB26_6: ; LLC-NEXT: mov w0, #64 @@ -932,7 +932,7 @@ ; LLC-NEXT: movk x12, #45, lsl #48 ; LLC-NEXT: and x11, x9, x11 ; LLC-NEXT: and x12, x8, x12 -; LLC-NEXT: cmp x10, #0 // =0 +; LLC-NEXT: cmp x10, #0 ; LLC-NEXT: csel x0, x12, x8, eq ; LLC-NEXT: csel x1, x11, x9, eq ; LLC-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll --- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll @@ -5,7 +5,7 @@ define i32 @single_same(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: single_same: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #5 ; =5 +; CHECK-NEXT: cmp w0, #5 ; CHECK-NEXT: ccmp w1, #17, #4, ne ; CHECK-NEXT: b.ne LBB0_2 ; CHECK-NEXT: ; %bb.1: ; %if.then @@ -33,7 +33,7 @@ define i32 @single_different(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: single_different: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #6 ; =6 +; CHECK-NEXT: cmp w0, #6 ; CHECK-NEXT: ccmp w1, #17, #0, ge ; CHECK-NEXT: b.eq LBB1_2 ; CHECK-NEXT: ; %bb.1: ; %if.then @@ -61,13 +61,13 @@ define i32 @single_flagclobber(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: single_flagclobber: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #5 ; =5 +; CHECK-NEXT: cmp w0, #5 ; CHECK-NEXT: b.eq LBB2_2 ; CHECK-NEXT: ; %bb.1: ; %lor.lhs.false ; CHECK-NEXT: lsl w8, w1, #1 -; CHECK-NEXT: cmp w1, #7 ; =7 +; CHECK-NEXT: cmp w1, #7 ; CHECK-NEXT: csinc w8, w8, w1, lt -; CHECK-NEXT: cmp w8, #16 ; =16 +; CHECK-NEXT: cmp w8, #16 ; CHECK-NEXT: b.gt LBB2_3 ; CHECK-NEXT: LBB2_2: ; %if.then ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill @@ -100,11 +100,11 @@ define i32 @single_flagclobber_tbz(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: single_flagclobber_tbz: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #5 ; =5 +; CHECK-NEXT: cmp w0, #5 ; CHECK-NEXT: b.eq LBB3_2 ; CHECK-NEXT: ; %bb.1: ; %lor.lhs.false ; CHECK-NEXT: lsl w8, w1, #1 -; CHECK-NEXT: cmp w1, #7 ; =7 +; CHECK-NEXT: cmp w1, #7 ; CHECK-NEXT: csinc w8, w8, w1, lt ; CHECK-NEXT: tbz w8, #3, LBB3_3 ; CHECK-NEXT: LBB3_2: ; %if.then @@ -141,7 +141,7 @@ define i32 @speculate_division(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: speculate_division: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #1 ; =1 +; CHECK-NEXT: cmp w0, #1 ; CHECK-NEXT: sdiv w8, w1, w0 ; CHECK-NEXT: ccmp w8, #16, #0, ge ; CHECK-NEXT: b.le LBB4_2 @@ -175,7 +175,7 @@ define i32 @single_fcmp(i32 %a, float %b) nounwind ssp { ; CHECK-LABEL: single_fcmp: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #1 ; =1 +; CHECK-NEXT: cmp w0, #1 ; CHECK-NEXT: scvtf s1, w0 ; CHECK-NEXT: fdiv s0, s0, s1 ; CHECK-NEXT: fmov s1, #17.00000000 @@ -244,7 +244,7 @@ define i32 @cbz_head(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: cbz_head: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ccmp w1, #17, #0, ne ; CHECK-NEXT: b.eq LBB7_2 ; CHECK-NEXT: ; %bb.1: ; %if.then @@ -274,10 +274,10 @@ define i32 @immediate_range(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: immediate_range: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #5 ; =5 +; CHECK-NEXT: cmp w0, #5 ; CHECK-NEXT: b.eq LBB8_3 ; CHECK-NEXT: ; %bb.1: ; %entry -; CHECK-NEXT: cmp w1, #32 ; =32 +; CHECK-NEXT: cmp w1, #32 ; CHECK-NEXT: b.eq LBB8_3 ; CHECK-NEXT: ; %bb.2: ; %if.end ; CHECK-NEXT: mov w0, #7 @@ -306,7 +306,7 @@ define i32 @cbz_second(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: cbz_second: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ccmp w1, #0, #0, ne ; CHECK-NEXT: b.eq LBB9_2 ; CHECK-NEXT: ; %bb.1: ; %if.then @@ -334,7 +334,7 @@ define i32 @cbnz_second(i32 %a, i32 %b) nounwind ssp { ; CHECK-LABEL: cbnz_second: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ccmp w1, #0, #4, ne ; CHECK-NEXT: b.ne LBB10_2 ; CHECK-NEXT: ; %bb.1: ; %if.then @@ -367,7 +367,7 @@ define void @build_modify_expr() nounwind ssp { ; CHECK-LABEL: build_modify_expr: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w8, #37 ; =37 +; CHECK-NEXT: cmp w8, #37 ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: lsl x8, x8, xzr ; CHECK-NEXT: mov x9, #31 @@ -409,7 +409,7 @@ define i64 @select_and(i32 %w0, i32 %w1, i64 %x2, i64 %x3) { ; CHECK-LABEL: select_and: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp w1, #5 ; =5 +; CHECK-NEXT: cmp w1, #5 ; CHECK-NEXT: ccmp w0, w1, #0, ne ; CHECK-NEXT: csel x0, x2, x3, lt ; CHECK-NEXT: ret @@ -423,7 +423,7 @@ define i64 @select_or(i32 %w0, i32 %w1, i64 %x2, i64 %x3) { ; CHECK-LABEL: select_or: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp w1, #5 ; =5 +; CHECK-NEXT: cmp w1, #5 ; CHECK-NEXT: ccmp w0, w1, #8, eq ; CHECK-NEXT: csel x0, x2, x3, lt ; CHECK-NEXT: ret @@ -437,7 +437,7 @@ define i64 @gccbug(i64 %x0, i64 %x1) { ; CHECK-LABEL: gccbug: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp x0, #2 ; =2 +; CHECK-NEXT: cmp x0, #2 ; CHECK-NEXT: ccmp x0, #4, #4, ne ; CHECK-NEXT: ccmp x1, #0, #0, eq ; CHECK-NEXT: mov w8, #1 @@ -457,7 +457,7 @@ define i32 @select_ororand(i32 %w0, i32 %w1, i32 %w2, i32 %w3) { ; CHECK-LABEL: select_ororand: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp w3, #4 ; =4 +; CHECK-NEXT: cmp w3, #4 ; CHECK-NEXT: ccmp w2, #2, #0, gt ; CHECK-NEXT: ccmp w1, #13, #2, ge ; CHECK-NEXT: ccmp w0, #0, #4, ls @@ -494,18 +494,18 @@ define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) { ; CHECK-LABEL: select_noccmp1: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp x0, #0 ; =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: cset w8, lt -; CHECK-NEXT: cmp x0, #13 ; =13 +; CHECK-NEXT: cmp x0, #13 ; CHECK-NEXT: cset w9, gt -; CHECK-NEXT: cmp x2, #2 ; =2 +; CHECK-NEXT: cmp x2, #2 ; CHECK-NEXT: cset w10, lt -; CHECK-NEXT: cmp x2, #4 ; =4 +; CHECK-NEXT: cmp x2, #4 ; CHECK-NEXT: cset w11, gt ; CHECK-NEXT: and w8, w8, w9 ; CHECK-NEXT: and w9, w10, w11 ; CHECK-NEXT: orr w8, w8, w9 -; CHECK-NEXT: cmp w8, #0 ; =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel x0, xzr, x3, ne ; CHECK-NEXT: ret %c0 = icmp slt i64 %v1, 0 @@ -526,12 +526,12 @@ define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) { ; CHECK-LABEL: select_noccmp2: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp x0, #0 ; =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: cset w8, lt -; CHECK-NEXT: cmp x0, #13 ; =13 +; CHECK-NEXT: cmp x0, #13 ; CHECK-NEXT: cset w9, gt ; CHECK-NEXT: orr w8, w8, w9 -; CHECK-NEXT: cmp w8, #0 ; =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel x0, xzr, x3, ne ; CHECK-NEXT: sbfx w8, w8, #0, #1 ; CHECK-NEXT: adrp x9, _g@PAGE @@ -551,17 +551,17 @@ define i32 @select_noccmp3(i32 %v0, i32 %v1, i32 %v2) { ; CHECK-LABEL: select_noccmp3: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w8, lt -; CHECK-NEXT: cmp w0, #13 ; =13 +; CHECK-NEXT: cmp w0, #13 ; CHECK-NEXT: cset w9, gt -; CHECK-NEXT: cmp w0, #22 ; =22 +; CHECK-NEXT: cmp w0, #22 ; CHECK-NEXT: cset w10, lt -; CHECK-NEXT: cmp w0, #44 ; =44 +; CHECK-NEXT: cmp w0, #44 ; CHECK-NEXT: cset w11, gt -; CHECK-NEXT: cmp w0, #99 ; =99 +; CHECK-NEXT: cmp w0, #99 ; CHECK-NEXT: cset w12, eq -; CHECK-NEXT: cmp w0, #77 ; =77 +; CHECK-NEXT: cmp w0, #77 ; CHECK-NEXT: cset w13, eq ; CHECK-NEXT: orr w8, w8, w9 ; CHECK-NEXT: orr w9, w10, w11 @@ -787,7 +787,7 @@ define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32 %b) #0 { ; CHECK-LABEL: f128_select_and_olt_oge: ; CHECK: ; %bb.0: -; CHECK-NEXT: sub sp, sp, #80 ; =80 +; CHECK-NEXT: sub sp, sp, #80 ; CHECK-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill @@ -795,18 +795,18 @@ ; CHECK-NEXT: mov x20, x0 ; CHECK-NEXT: stp q2, q3, [sp] ; 32-byte Folded Spill ; CHECK-NEXT: bl ___lttf2 -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w21, lt ; CHECK-NEXT: ldp q0, q1, [sp] ; 32-byte Folded Reload ; CHECK-NEXT: bl ___getf2 -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w8, ge ; CHECK-NEXT: tst w8, w21 ; CHECK-NEXT: csel w0, w20, w19, ne ; CHECK-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x22, x21, [sp, #32] ; 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #80 ; =80 +; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret %c0 = fcmp olt fp128 %v0, %v1 %c1 = fcmp oge fp128 %v2, %v3 @@ -820,7 +820,7 @@ define i32 @deep_or(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) { ; CHECK-LABEL: deep_or: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp w2, #20 ; =20 +; CHECK-NEXT: cmp w2, #20 ; CHECK-NEXT: ccmp w2, #15, #4, ne ; CHECK-NEXT: ccmp w1, #0, #4, eq ; CHECK-NEXT: ccmp w0, #0, #4, ne @@ -842,7 +842,7 @@ define i32 @deep_or1(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) { ; CHECK-LABEL: deep_or1: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp w2, #20 ; =20 +; CHECK-NEXT: cmp w2, #20 ; CHECK-NEXT: ccmp w2, #15, #4, ne ; CHECK-NEXT: ccmp w0, #0, #4, eq ; CHECK-NEXT: ccmp w1, #0, #4, ne @@ -864,7 +864,7 @@ define i32 @deep_or2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) { ; CHECK-LABEL: deep_or2: ; CHECK: ; %bb.0: -; CHECK-NEXT: cmp w2, #20 ; =20 +; CHECK-NEXT: cmp w2, #20 ; CHECK-NEXT: ccmp w2, #15, #4, ne ; CHECK-NEXT: ccmp w1, #0, #4, eq ; CHECK-NEXT: ccmp w0, #0, #4, ne diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll --- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll @@ -74,7 +74,7 @@ define dso_local void @test_fptosi() { ; CHECK-LABEL: test_fptosi: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -89,7 +89,7 @@ ; CHECK-NEXT: adrp x8, var64 ; CHECK-NEXT: str x0, [x8, :lo12:var64] ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %val = load fp128, fp128* @lhs, align 16 @@ -105,7 +105,7 @@ define dso_local void @test_fptoui() { ; CHECK-LABEL: test_fptoui: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -120,7 +120,7 @@ ; CHECK-NEXT: adrp x8, var64 ; CHECK-NEXT: str x0, [x8, :lo12:var64] ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %val = load fp128, fp128* @lhs, align 16 @@ -204,7 +204,7 @@ ; CHECK-NEXT: adrp x8, rhs ; CHECK-NEXT: ldr q1, [x8, :lo12:rhs] ; CHECK-NEXT: bl __letf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, le ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret @@ -230,7 +230,7 @@ ; CHECK-NEXT: adrp x8, rhs ; CHECK-NEXT: ldr q1, [x8, :lo12:rhs] ; CHECK-NEXT: bl __letf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w0, gt ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret @@ -246,7 +246,7 @@ define dso_local i1 @test_setcc3() { ; CHECK-LABEL: test_setcc3: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: .cfi_offset w19, -8 @@ -257,15 +257,15 @@ ; CHECK-NEXT: ldr q1, [x8, :lo12:rhs] ; CHECK-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill ; CHECK-NEXT: bl __eqtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w19, eq ; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload ; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w8, ne ; CHECK-NEXT: orr w0, w8, w19 ; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %lhs = load fp128, fp128* @lhs, align 16 @@ -288,7 +288,7 @@ ; CHECK-NEXT: adrp x8, rhs ; CHECK-NEXT: ldr q1, [x8, :lo12:rhs] ; CHECK-NEXT: bl __lttf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: b.ge .LBB11_2 ; CHECK-NEXT: // %bb.1: // %iftrue ; CHECK-NEXT: mov w0, #42 @@ -336,7 +336,7 @@ define dso_local void @test_round() { ; CHECK-LABEL: test_round: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -355,7 +355,7 @@ ; CHECK-NEXT: adrp x8, vardouble ; CHECK-NEXT: str d0, [x8, :lo12:vardouble] ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %val = load fp128, fp128* @lhs, align 16 diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll --- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll +++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll @@ -233,7 +233,7 @@ ; CHECK-LABEL: memset_8_stack: ; CHECK: mov x8, #-6148914691236517206 ; CHECK-NEXT: stp x30, x8, [sp, #-16]! -; CHECK-NEXT: add x0, sp, #8 // =8 +; CHECK-NEXT: add x0, sp, #8 ; CHECK-NEXT: bl something %buf = alloca [8 x i8], align 1 %cast = bitcast [8 x i8]* %buf to i8* diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll --- a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll @@ -1237,7 +1237,7 @@ ; CHECK-NEXT: mov v1.h[2], v0.h[2] ; CHECK-NEXT: mov v1.h[3], v0.h[3] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #16 // =16 +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %tmp = extractelement <8 x i16> %x, i32 %idx %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 0 @@ -1253,11 +1253,11 @@ define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) { ; CHECK-LABEL: test_extracts_inserts_varidx_insert: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 // =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: and x8, x0, #0x3 -; CHECK-NEXT: add x9, sp, #8 // =8 +; CHECK-NEXT: add x9, sp, #8 ; CHECK-NEXT: bfi x9, x8, #1, #2 ; CHECK-NEXT: str h0, [x9] ; CHECK-NEXT: ldr d1, [sp, #8] @@ -1265,7 +1265,7 @@ ; CHECK-NEXT: mov v1.h[2], v0.h[2] ; CHECK-NEXT: mov v1.h[3], v0.h[3] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #16 // =16 +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %tmp = extractelement <8 x i16> %x, i32 0 %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 %idx diff --git a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll --- a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll +++ b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll @@ -3,7 +3,7 @@ define void @test(float * %p1, i32 %v1) { ; CHECK-LABEL: test: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: sub sp, sp, #16 ; =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ; kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: fmov.2d v0, #2.00000000 @@ -13,7 +13,7 @@ ; CHECK-NEXT: bfi x9, x8, #2, #2 ; CHECK-NEXT: ldr s0, [x9] ; CHECK-NEXT: str s0, [x0] -; CHECK-NEXT: add sp, sp, #16 ; =16 +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret entry: %v2 = extractelement <3 x float> , i32 %v1 @@ -24,7 +24,7 @@ define void @test2(float * %p1, i32 %v1) { ; CHECK-LABEL: test2: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: sub sp, sp, #16 ; =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ; kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: movi.16b v0, #63 @@ -34,7 +34,7 @@ ; CHECK-NEXT: bfi x9, x8, #2, #2 ; CHECK-NEXT: ldr s0, [x9] ; CHECK-NEXT: str s0, [x0] -; CHECK-NEXT: add sp, sp, #16 ; =16 +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret entry: %v2 = extractelement <3 x float> , i32 %v1 diff --git a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll --- a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll @@ -172,7 +172,7 @@ ; CHECK-NEXT: cnt.8b v0, v0 ; CHECK-NEXT: uaddlv.8b h0, v0 ; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: cmp x8, #1 // =1 +; CHECK-NEXT: cmp x8, #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret ; @@ -190,7 +190,7 @@ ; CHECK-NONEON-NEXT: mov x9, #72340172838076673 ; CHECK-NONEON-NEXT: mul x8, x8, x9 ; CHECK-NONEON-NEXT: lsr x8, x8, #56 -; CHECK-NONEON-NEXT: cmp x8, #1 // =1 +; CHECK-NONEON-NEXT: cmp x8, #1 ; CHECK-NONEON-NEXT: cset w0, eq ; CHECK-NONEON-NEXT: ret %count = tail call i64 @llvm.ctpop.i64(i64 %x) @@ -206,7 +206,7 @@ ; CHECK-NEXT: cnt.8b v0, v0 ; CHECK-NEXT: uaddlv.8b h0, v0 ; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: cmp x8, #1 // =1 +; CHECK-NEXT: cmp x8, #1 ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret ; @@ -224,7 +224,7 @@ ; CHECK-NONEON-NEXT: mov x9, #72340172838076673 ; CHECK-NONEON-NEXT: mul x8, x8, x9 ; CHECK-NONEON-NEXT: lsr x8, x8, #56 -; CHECK-NONEON-NEXT: cmp x8, #1 // =1 +; CHECK-NONEON-NEXT: cmp x8, #1 ; CHECK-NONEON-NEXT: cset w0, ne ; CHECK-NONEON-NEXT: ret %count = tail call i64 @llvm.ctpop.i64(i64 %x) diff --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll --- a/llvm/test/CodeGen/AArch64/arm64-rev.ll +++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll @@ -535,7 +535,7 @@ ; CHECK-LABEL: test_vrev64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ldr q0, [x0] -; CHECK-NEXT: add x8, x1, #2 // =2 +; CHECK-NEXT: add x8, x1, #2 ; CHECK-NEXT: st1.h { v0 }[5], [x8] ; CHECK-NEXT: st1.h { v0 }[6], [x1] ; CHECK-NEXT: ret @@ -543,7 +543,7 @@ ; GISEL-LABEL: test_vrev64: ; GISEL: // %bb.0: // %entry ; GISEL-NEXT: ldr q0, [x0] -; GISEL-NEXT: add x8, x1, #2 // =2 +; GISEL-NEXT: add x8, x1, #2 ; GISEL-NEXT: st1.h { v0 }[6], [x1] ; GISEL-NEXT: st1.h { v0 }[5], [x8] ; GISEL-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll --- a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll +++ b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll @@ -12,26 +12,26 @@ ; ENABLE-NEXT: cmp w0, w1 ; ENABLE-NEXT: b.ge LBB0_2 ; ENABLE-NEXT: ; %bb.1: ; %true -; ENABLE-NEXT: sub sp, sp, #32 ; =32 +; ENABLE-NEXT: sub sp, sp, #32 ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #16 ; =16 +; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 ; ENABLE-NEXT: stur w0, [x29, #-4] -; ENABLE-NEXT: sub x1, x29, #4 ; =4 +; ENABLE-NEXT: sub x1, x29, #4 ; ENABLE-NEXT: mov w0, wzr ; ENABLE-NEXT: bl _doSomething ; ENABLE-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload -; ENABLE-NEXT: add sp, sp, #32 ; =32 +; ENABLE-NEXT: add sp, sp, #32 ; ENABLE-NEXT: LBB0_2: ; %false ; ENABLE-NEXT: ret ; ; DISABLE-LABEL: foo: ; DISABLE: ; %bb.0: -; DISABLE-NEXT: sub sp, sp, #32 ; =32 +; DISABLE-NEXT: sub sp, sp, #32 ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #16 ; =16 +; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -39,12 +39,12 @@ ; DISABLE-NEXT: b.ge LBB0_2 ; DISABLE-NEXT: ; %bb.1: ; %true ; DISABLE-NEXT: stur w0, [x29, #-4] -; DISABLE-NEXT: sub x1, x29, #4 ; =4 +; DISABLE-NEXT: sub x1, x29, #4 ; DISABLE-NEXT: mov w0, wzr ; DISABLE-NEXT: bl _doSomething ; DISABLE-NEXT: LBB0_2: ; %false ; DISABLE-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload -; DISABLE-NEXT: add sp, sp, #32 ; =32 +; DISABLE-NEXT: add sp, sp, #32 ; DISABLE-NEXT: ret %tmp = alloca i32, align 4 %tmp2 = icmp slt i32 %a, %b @@ -73,7 +73,7 @@ ; ENABLE-NEXT: ; %bb.1: ; %for.body.preheader ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #16 ; =16 +; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 @@ -84,7 +84,7 @@ ; ENABLE-NEXT: LBB1_2: ; %for.body ; ENABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; ENABLE-NEXT: bl _something -; ENABLE-NEXT: subs w20, w20, #1 ; =1 +; ENABLE-NEXT: subs w20, w20, #1 ; ENABLE-NEXT: add w19, w0, w19 ; ENABLE-NEXT: b.ne LBB1_2 ; ENABLE-NEXT: ; %bb.3: ; %for.end @@ -100,7 +100,7 @@ ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #16 ; =16 +; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -113,7 +113,7 @@ ; DISABLE-NEXT: LBB1_2: ; %for.body ; DISABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; DISABLE-NEXT: bl _something -; DISABLE-NEXT: subs w20, w20, #1 ; =1 +; DISABLE-NEXT: subs w20, w20, #1 ; DISABLE-NEXT: add w19, w0, w19 ; DISABLE-NEXT: b.ne LBB1_2 ; DISABLE-NEXT: ; %bb.3: ; %for.end @@ -160,7 +160,7 @@ ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #16 ; =16 +; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 @@ -171,7 +171,7 @@ ; ENABLE-NEXT: LBB2_1: ; %for.body ; ENABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; ENABLE-NEXT: bl _something -; ENABLE-NEXT: subs w20, w20, #1 ; =1 +; ENABLE-NEXT: subs w20, w20, #1 ; ENABLE-NEXT: add w19, w0, w19 ; ENABLE-NEXT: b.ne LBB2_1 ; ENABLE-NEXT: ; %bb.2: ; %for.end @@ -184,7 +184,7 @@ ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #16 ; =16 +; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -195,7 +195,7 @@ ; DISABLE-NEXT: LBB2_1: ; %for.body ; DISABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; DISABLE-NEXT: bl _something -; DISABLE-NEXT: subs w20, w20, #1 ; =1 +; DISABLE-NEXT: subs w20, w20, #1 ; DISABLE-NEXT: add w19, w0, w19 ; DISABLE-NEXT: b.ne LBB2_1 ; DISABLE-NEXT: ; %bb.2: ; %for.end @@ -228,7 +228,7 @@ ; ENABLE-NEXT: ; %bb.1: ; %for.body.preheader ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #16 ; =16 +; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 @@ -239,7 +239,7 @@ ; ENABLE-NEXT: LBB3_2: ; %for.body ; ENABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; ENABLE-NEXT: bl _something -; ENABLE-NEXT: subs w20, w20, #1 ; =1 +; ENABLE-NEXT: subs w20, w20, #1 ; ENABLE-NEXT: add w19, w0, w19 ; ENABLE-NEXT: b.ne LBB3_2 ; ENABLE-NEXT: ; %bb.3: ; %for.end @@ -256,7 +256,7 @@ ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #16 ; =16 +; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -269,7 +269,7 @@ ; DISABLE-NEXT: LBB3_2: ; %for.body ; DISABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; DISABLE-NEXT: bl _something -; DISABLE-NEXT: subs w20, w20, #1 ; =1 +; DISABLE-NEXT: subs w20, w20, #1 ; DISABLE-NEXT: add w19, w0, w19 ; DISABLE-NEXT: b.ne LBB3_2 ; DISABLE-NEXT: ; %bb.3: ; %for.end @@ -320,14 +320,14 @@ ; ENABLE-NEXT: ; %bb.1: ; %if.then ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #16 ; =16 +; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: bl _somethingElse ; ENABLE-NEXT: mov w19, wzr ; ENABLE-NEXT: mov w20, #10 ; ENABLE-NEXT: LBB4_2: ; %for.body ; ENABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; ENABLE-NEXT: bl _something -; ENABLE-NEXT: subs w20, w20, #1 ; =1 +; ENABLE-NEXT: subs w20, w20, #1 ; ENABLE-NEXT: add w19, w0, w19 ; ENABLE-NEXT: b.ne LBB4_2 ; ENABLE-NEXT: ; %bb.3: ; %for.end @@ -343,7 +343,7 @@ ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #16 ; =16 +; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: cbz w0, LBB4_4 ; DISABLE-NEXT: ; %bb.1: ; %if.then ; DISABLE-NEXT: bl _somethingElse @@ -352,7 +352,7 @@ ; DISABLE-NEXT: LBB4_2: ; %for.body ; DISABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; DISABLE-NEXT: bl _something -; DISABLE-NEXT: subs w20, w20, #1 ; =1 +; DISABLE-NEXT: subs w20, w20, #1 ; DISABLE-NEXT: add w19, w0, w19 ; DISABLE-NEXT: b.ne LBB4_2 ; DISABLE-NEXT: ; %bb.3: ; %for.end @@ -415,23 +415,23 @@ ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: cbz w0, LBB6_4 ; ENABLE-NEXT: ; %bb.1: ; %if.then -; ENABLE-NEXT: sub sp, sp, #16 ; =16 -; ENABLE-NEXT: add x8, sp, #16 ; =16 -; ENABLE-NEXT: cmp w1, #1 ; =1 +; ENABLE-NEXT: sub sp, sp, #16 +; ENABLE-NEXT: add x8, sp, #16 +; ENABLE-NEXT: cmp w1, #1 ; ENABLE-NEXT: str x8, [sp, #8] ; ENABLE-NEXT: mov w0, wzr ; ENABLE-NEXT: b.lt LBB6_3 ; ENABLE-NEXT: LBB6_2: ; %for.body ; ENABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; ENABLE-NEXT: ldr x8, [sp, #8] -; ENABLE-NEXT: add x9, x8, #8 ; =8 +; ENABLE-NEXT: add x9, x8, #8 ; ENABLE-NEXT: str x9, [sp, #8] ; ENABLE-NEXT: ldr w8, [x8] -; ENABLE-NEXT: subs w1, w1, #1 ; =1 +; ENABLE-NEXT: subs w1, w1, #1 ; ENABLE-NEXT: add w0, w0, w8 ; ENABLE-NEXT: b.ne LBB6_2 ; ENABLE-NEXT: LBB6_3: ; %for.end -; ENABLE-NEXT: add sp, sp, #16 ; =16 +; ENABLE-NEXT: add sp, sp, #16 ; ENABLE-NEXT: ret ; ENABLE-NEXT: LBB6_4: ; %if.else ; ENABLE-NEXT: lsl w0, w1, #1 @@ -439,29 +439,29 @@ ; ; DISABLE-LABEL: variadicFunc: ; DISABLE: ; %bb.0: ; %entry -; DISABLE-NEXT: sub sp, sp, #16 ; =16 +; DISABLE-NEXT: sub sp, sp, #16 ; DISABLE-NEXT: cbz w0, LBB6_4 ; DISABLE-NEXT: ; %bb.1: ; %if.then -; DISABLE-NEXT: add x8, sp, #16 ; =16 -; DISABLE-NEXT: cmp w1, #1 ; =1 +; DISABLE-NEXT: add x8, sp, #16 +; DISABLE-NEXT: cmp w1, #1 ; DISABLE-NEXT: str x8, [sp, #8] ; DISABLE-NEXT: mov w0, wzr ; DISABLE-NEXT: b.lt LBB6_3 ; DISABLE-NEXT: LBB6_2: ; %for.body ; DISABLE-NEXT: ; =>This Inner Loop Header: Depth=1 ; DISABLE-NEXT: ldr x8, [sp, #8] -; DISABLE-NEXT: add x9, x8, #8 ; =8 +; DISABLE-NEXT: add x9, x8, #8 ; DISABLE-NEXT: str x9, [sp, #8] ; DISABLE-NEXT: ldr w8, [x8] -; DISABLE-NEXT: subs w1, w1, #1 ; =1 +; DISABLE-NEXT: subs w1, w1, #1 ; DISABLE-NEXT: add w0, w0, w8 ; DISABLE-NEXT: b.ne LBB6_2 ; DISABLE-NEXT: LBB6_3: ; %if.end -; DISABLE-NEXT: add sp, sp, #16 ; =16 +; DISABLE-NEXT: add sp, sp, #16 ; DISABLE-NEXT: ret ; DISABLE-NEXT: LBB6_4: ; %if.else ; DISABLE-NEXT: lsl w0, w1, #1 -; DISABLE-NEXT: add sp, sp, #16 ; =16 +; DISABLE-NEXT: add sp, sp, #16 ; DISABLE-NEXT: ret entry: %ap = alloca i8*, align 8 @@ -514,9 +514,9 @@ ; ENABLE-NEXT: mov w8, #10 ; ENABLE-NEXT: LBB7_2: ; %for.body ; ENABLE-NEXT: ; =>This Inner Loop Header: Depth=1 -; ENABLE-NEXT: subs w8, w8, #1 ; =1 +; ENABLE-NEXT: subs w8, w8, #1 ; ENABLE-NEXT: ; InlineAsm Start -; ENABLE-NEXT: add x19, x19, #1 ; =1 +; ENABLE-NEXT: add x19, x19, #1 ; ENABLE-NEXT: ; InlineAsm End ; ENABLE-NEXT: b.ne LBB7_2 ; ENABLE-NEXT: ; %bb.3: @@ -538,9 +538,9 @@ ; DISABLE-NEXT: mov w8, #10 ; DISABLE-NEXT: LBB7_2: ; %for.body ; DISABLE-NEXT: ; =>This Inner Loop Header: Depth=1 -; DISABLE-NEXT: subs w8, w8, #1 ; =1 +; DISABLE-NEXT: subs w8, w8, #1 ; DISABLE-NEXT: ; InlineAsm Start -; DISABLE-NEXT: add x19, x19, #1 ; =1 +; DISABLE-NEXT: add x19, x19, #1 ; DISABLE-NEXT: ; InlineAsm End ; DISABLE-NEXT: b.ne LBB7_2 ; DISABLE-NEXT: ; %bb.3: @@ -578,9 +578,9 @@ ; ENABLE-NEXT: ; kill: def $w1 killed $w1 def $x1 ; ENABLE-NEXT: cbz w0, LBB8_2 ; ENABLE-NEXT: ; %bb.1: ; %if.then -; ENABLE-NEXT: sub sp, sp, #64 ; =64 +; ENABLE-NEXT: sub sp, sp, #64 ; ENABLE-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #48 ; =48 +; ENABLE-NEXT: add x29, sp, #48 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 @@ -591,7 +591,7 @@ ; ENABLE-NEXT: bl _someVariadicFunc ; ENABLE-NEXT: lsl w0, w0, #3 ; ENABLE-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload -; ENABLE-NEXT: add sp, sp, #64 ; =64 +; ENABLE-NEXT: add sp, sp, #64 ; ENABLE-NEXT: ret ; ENABLE-NEXT: LBB8_2: ; %if.else ; ENABLE-NEXT: lsl w0, w1, #1 @@ -599,9 +599,9 @@ ; ; DISABLE-LABEL: callVariadicFunc: ; DISABLE: ; %bb.0: ; %entry -; DISABLE-NEXT: sub sp, sp, #64 ; =64 +; DISABLE-NEXT: sub sp, sp, #64 ; DISABLE-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #48 ; =48 +; DISABLE-NEXT: add x29, sp, #48 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -619,7 +619,7 @@ ; DISABLE-NEXT: lsl w0, w1, #1 ; DISABLE-NEXT: LBB8_3: ; %if.end ; DISABLE-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload -; DISABLE-NEXT: add sp, sp, #64 ; =64 +; DISABLE-NEXT: add sp, sp, #64 ; DISABLE-NEXT: ret entry: %tobool = icmp eq i32 %cond, 0 @@ -703,7 +703,7 @@ ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #16 ; =16 +; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 @@ -711,7 +711,7 @@ ; ENABLE-NEXT: .cfi_offset w20, -32 ; ENABLE-NEXT: cbnz wzr, LBB10_3 ; ENABLE-NEXT: ; %bb.1: ; %if.then -; ENABLE-NEXT: sub x19, sp, #16 ; =16 +; ENABLE-NEXT: sub x19, sp, #16 ; ENABLE-NEXT: mov sp, x19 ; ENABLE-NEXT: mov w20, wzr ; ENABLE-NEXT: LBB10_2: ; %for.body @@ -721,7 +721,7 @@ ; ENABLE-NEXT: str w20, [x19] ; ENABLE-NEXT: b LBB10_2 ; ENABLE-NEXT: LBB10_3: ; %if.end -; ENABLE-NEXT: sub sp, x29, #16 ; =16 +; ENABLE-NEXT: sub sp, x29, #16 ; ENABLE-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; ENABLE-NEXT: ldp x20, x19, [sp], #32 ; 16-byte Folded Reload ; ENABLE-NEXT: ret @@ -730,7 +730,7 @@ ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #16 ; =16 +; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -738,7 +738,7 @@ ; DISABLE-NEXT: .cfi_offset w20, -32 ; DISABLE-NEXT: cbnz wzr, LBB10_3 ; DISABLE-NEXT: ; %bb.1: ; %if.then -; DISABLE-NEXT: sub x19, sp, #16 ; =16 +; DISABLE-NEXT: sub x19, sp, #16 ; DISABLE-NEXT: mov sp, x19 ; DISABLE-NEXT: mov w20, wzr ; DISABLE-NEXT: LBB10_2: ; %for.body @@ -748,7 +748,7 @@ ; DISABLE-NEXT: str w20, [x19] ; DISABLE-NEXT: b LBB10_2 ; DISABLE-NEXT: LBB10_3: ; %if.end -; DISABLE-NEXT: sub sp, x29, #16 ; =16 +; DISABLE-NEXT: sub sp, x29, #16 ; DISABLE-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; DISABLE-NEXT: ldp x20, x19, [sp], #32 ; 16-byte Folded Reload ; DISABLE-NEXT: ret @@ -776,7 +776,7 @@ ; ENABLE: ; %bb.0: ; %entry ; ENABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #16 ; =16 +; ENABLE-NEXT: add x29, sp, #16 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 ; ENABLE-NEXT: .cfi_offset w29, -16 @@ -784,7 +784,7 @@ ; ENABLE-NEXT: .cfi_offset w20, -32 ; ENABLE-NEXT: cbnz wzr, LBB11_3 ; ENABLE-NEXT: ; %bb.1: ; %if.then -; ENABLE-NEXT: sub x8, sp, #16 ; =16 +; ENABLE-NEXT: sub x8, sp, #16 ; ENABLE-NEXT: mov sp, x8 ; ENABLE-NEXT: mov w9, wzr ; ENABLE-NEXT: ; InlineAsm Start @@ -800,7 +800,7 @@ ; ENABLE-NEXT: mov w9, #1 ; ENABLE-NEXT: b LBB11_2 ; ENABLE-NEXT: LBB11_3: ; %if.end -; ENABLE-NEXT: sub sp, x29, #16 ; =16 +; ENABLE-NEXT: sub sp, x29, #16 ; ENABLE-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; ENABLE-NEXT: ldp x20, x19, [sp], #32 ; 16-byte Folded Reload ; ENABLE-NEXT: ret @@ -809,7 +809,7 @@ ; DISABLE: ; %bb.0: ; %entry ; DISABLE-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #16 ; =16 +; DISABLE-NEXT: add x29, sp, #16 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 ; DISABLE-NEXT: .cfi_offset w29, -16 @@ -817,7 +817,7 @@ ; DISABLE-NEXT: .cfi_offset w20, -32 ; DISABLE-NEXT: cbnz wzr, LBB11_3 ; DISABLE-NEXT: ; %bb.1: ; %if.then -; DISABLE-NEXT: sub x8, sp, #16 ; =16 +; DISABLE-NEXT: sub x8, sp, #16 ; DISABLE-NEXT: mov sp, x8 ; DISABLE-NEXT: mov w9, wzr ; DISABLE-NEXT: ; InlineAsm Start @@ -833,7 +833,7 @@ ; DISABLE-NEXT: mov w9, #1 ; DISABLE-NEXT: b LBB11_2 ; DISABLE-NEXT: LBB11_3: ; %if.end -; DISABLE-NEXT: sub sp, x29, #16 ; =16 +; DISABLE-NEXT: sub sp, x29, #16 ; DISABLE-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload ; DISABLE-NEXT: ldp x20, x19, [sp], #32 ; 16-byte Folded Reload ; DISABLE-NEXT: ret @@ -947,7 +947,7 @@ ; ENABLE: ; %bb.0: ; ENABLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill ; ENABLE-NEXT: mov x29, sp -; ENABLE-NEXT: sub x9, sp, #16 ; =16 +; ENABLE-NEXT: sub x9, sp, #16 ; ENABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 @@ -969,7 +969,7 @@ ; DISABLE: ; %bb.0: ; DISABLE-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill ; DISABLE-NEXT: mov x29, sp -; DISABLE-NEXT: sub x9, sp, #16 ; =16 +; DISABLE-NEXT: sub x9, sp, #16 ; DISABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 @@ -1018,8 +1018,8 @@ ; ENABLE-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; ENABLE-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill ; ENABLE-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill -; ENABLE-NEXT: add x29, sp, #80 ; =80 -; ENABLE-NEXT: sub x9, sp, #32 ; =32 +; ENABLE-NEXT: add x29, sp, #80 +; ENABLE-NEXT: sub x9, sp, #32 ; ENABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; ENABLE-NEXT: .cfi_def_cfa w29, 16 ; ENABLE-NEXT: .cfi_offset w30, -8 @@ -1060,7 +1060,7 @@ ; ENABLE-NEXT: stp w0, w1, [x2, #4] ; ENABLE-NEXT: stp w16, w11, [x2, #12] ; ENABLE-NEXT: stp w13, w14, [x2, #20] -; ENABLE-NEXT: sub sp, x29, #80 ; =80 +; ENABLE-NEXT: sub sp, x29, #80 ; ENABLE-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload ; ENABLE-NEXT: ldp x20, x19, [sp, #64] ; 16-byte Folded Reload ; ENABLE-NEXT: ldp x22, x21, [sp, #48] ; 16-byte Folded Reload @@ -1077,8 +1077,8 @@ ; DISABLE-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill ; DISABLE-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill ; DISABLE-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill -; DISABLE-NEXT: add x29, sp, #80 ; =80 -; DISABLE-NEXT: sub x9, sp, #32 ; =32 +; DISABLE-NEXT: add x29, sp, #80 +; DISABLE-NEXT: sub x9, sp, #32 ; DISABLE-NEXT: and sp, x9, #0xffffffffffffffe0 ; DISABLE-NEXT: .cfi_def_cfa w29, 16 ; DISABLE-NEXT: .cfi_offset w30, -8 @@ -1119,7 +1119,7 @@ ; DISABLE-NEXT: stp w0, w1, [x2, #4] ; DISABLE-NEXT: stp w16, w11, [x2, #12] ; DISABLE-NEXT: stp w13, w14, [x2, #20] -; DISABLE-NEXT: sub sp, x29, #80 ; =80 +; DISABLE-NEXT: sub sp, x29, #80 ; DISABLE-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload ; DISABLE-NEXT: ldp x20, x19, [sp, #64] ; 16-byte Folded Reload ; DISABLE-NEXT: ldp x22, x21, [sp, #48] ; 16-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll --- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll @@ -1628,12 +1628,12 @@ ; CHECK-NEXT: sbcs x11, x12, x14 ; CHECK-NEXT: negs x12, x8 ; CHECK-NEXT: ngcs x13, x11 -; CHECK-NEXT: cmp x11, #0 // =0 +; CHECK-NEXT: cmp x11, #0 ; CHECK-NEXT: csel x2, x12, x8, lt ; CHECK-NEXT: csel x3, x13, x11, lt ; CHECK-NEXT: negs x8, x9 ; CHECK-NEXT: ngcs x11, x10 -; CHECK-NEXT: cmp x10, #0 // =0 +; CHECK-NEXT: cmp x10, #0 ; CHECK-NEXT: csel x8, x8, x9, lt ; CHECK-NEXT: csel x1, x11, x10, lt ; CHECK-NEXT: fmov d0, x8 diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll --- a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll +++ b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll @@ -7,7 +7,7 @@ define i8 @test_rmw_add_8(i8* %dst) { ; NOLSE-LABEL: test_rmw_add_8: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldrb w8, [x0] @@ -18,7 +18,7 @@ ; NOLSE-NEXT: // Child Loop BB0_2 Depth 2 ; NOLSE-NEXT: ldr w9, [sp, #28] // 4-byte Folded Reload ; NOLSE-NEXT: ldr x11, [sp, #16] // 8-byte Folded Reload -; NOLSE-NEXT: add w12, w9, #1 // =1 +; NOLSE-NEXT: add w12, w9, #1 ; NOLSE-NEXT: .LBB0_2: // %atomicrmw.start ; NOLSE-NEXT: // Parent Loop BB0_1 Depth=1 ; NOLSE-NEXT: // => This Inner Loop Header: Depth=2 @@ -34,13 +34,13 @@ ; NOLSE-NEXT: subs w9, w8, w9, uxtb ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB0_1 ; NOLSE-NEXT: b .LBB0_5 ; NOLSE-NEXT: .LBB0_5: // %atomicrmw.end ; NOLSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_add_8: @@ -56,7 +56,7 @@ define i16 @test_rmw_add_16(i16* %dst) { ; NOLSE-LABEL: test_rmw_add_16: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldrh w8, [x0] @@ -67,7 +67,7 @@ ; NOLSE-NEXT: // Child Loop BB1_2 Depth 2 ; NOLSE-NEXT: ldr w9, [sp, #28] // 4-byte Folded Reload ; NOLSE-NEXT: ldr x11, [sp, #16] // 8-byte Folded Reload -; NOLSE-NEXT: add w12, w9, #1 // =1 +; NOLSE-NEXT: add w12, w9, #1 ; NOLSE-NEXT: .LBB1_2: // %atomicrmw.start ; NOLSE-NEXT: // Parent Loop BB1_1 Depth=1 ; NOLSE-NEXT: // => This Inner Loop Header: Depth=2 @@ -83,13 +83,13 @@ ; NOLSE-NEXT: subs w9, w8, w9, uxth ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB1_1 ; NOLSE-NEXT: b .LBB1_5 ; NOLSE-NEXT: .LBB1_5: // %atomicrmw.end ; NOLSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_add_16: @@ -105,7 +105,7 @@ define i32 @test_rmw_add_32(i32* %dst) { ; NOLSE-LABEL: test_rmw_add_32: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldr w8, [x0] @@ -116,7 +116,7 @@ ; NOLSE-NEXT: // Child Loop BB2_2 Depth 2 ; NOLSE-NEXT: ldr w9, [sp, #28] // 4-byte Folded Reload ; NOLSE-NEXT: ldr x11, [sp, #16] // 8-byte Folded Reload -; NOLSE-NEXT: add w12, w9, #1 // =1 +; NOLSE-NEXT: add w12, w9, #1 ; NOLSE-NEXT: .LBB2_2: // %atomicrmw.start ; NOLSE-NEXT: // Parent Loop BB2_1 Depth=1 ; NOLSE-NEXT: // => This Inner Loop Header: Depth=2 @@ -132,13 +132,13 @@ ; NOLSE-NEXT: subs w9, w8, w9 ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB2_1 ; NOLSE-NEXT: b .LBB2_5 ; NOLSE-NEXT: .LBB2_5: // %atomicrmw.end ; NOLSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_add_32: @@ -154,7 +154,7 @@ define i64 @test_rmw_add_64(i64* %dst) { ; NOLSE-LABEL: test_rmw_add_64: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldr x8, [x0] @@ -165,7 +165,7 @@ ; NOLSE-NEXT: // Child Loop BB3_2 Depth 2 ; NOLSE-NEXT: ldr x9, [sp, #24] // 8-byte Folded Reload ; NOLSE-NEXT: ldr x11, [sp, #16] // 8-byte Folded Reload -; NOLSE-NEXT: add x12, x9, #1 // =1 +; NOLSE-NEXT: add x12, x9, #1 ; NOLSE-NEXT: .LBB3_2: // %atomicrmw.start ; NOLSE-NEXT: // Parent Loop BB3_1 Depth=1 ; NOLSE-NEXT: // => This Inner Loop Header: Depth=2 @@ -181,13 +181,13 @@ ; NOLSE-NEXT: subs x9, x8, x9 ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str x8, [sp, #8] // 8-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str x8, [sp, #24] // 8-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB3_1 ; NOLSE-NEXT: b .LBB3_5 ; NOLSE-NEXT: .LBB3_5: // %atomicrmw.end ; NOLSE-NEXT: ldr x0, [sp, #8] // 8-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_add_64: @@ -204,7 +204,7 @@ define i128 @test_rmw_add_128(i128* %dst) { ; NOLSE-LABEL: test_rmw_add_128: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #48 // =48 +; NOLSE-NEXT: sub sp, sp, #48 ; NOLSE-NEXT: .cfi_def_cfa_offset 48 ; NOLSE-NEXT: str x0, [sp, #24] // 8-byte Folded Spill ; NOLSE-NEXT: ldr x8, [x0, #8] @@ -218,7 +218,7 @@ ; NOLSE-NEXT: ldr x11, [sp, #40] // 8-byte Folded Reload ; NOLSE-NEXT: ldr x8, [sp, #32] // 8-byte Folded Reload ; NOLSE-NEXT: ldr x13, [sp, #24] // 8-byte Folded Reload -; NOLSE-NEXT: adds x14, x8, #1 // =1 +; NOLSE-NEXT: adds x14, x8, #1 ; NOLSE-NEXT: mov x9, xzr ; NOLSE-NEXT: adcs x15, x11, x9 ; NOLSE-NEXT: .LBB4_2: // %atomicrmw.start @@ -253,12 +253,12 @@ ; NOLSE-NEXT: .LBB4_6: // %atomicrmw.end ; NOLSE-NEXT: ldr x1, [sp, #8] // 8-byte Folded Reload ; NOLSE-NEXT: ldr x0, [sp, #16] // 8-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #48 // =48 +; NOLSE-NEXT: add sp, sp, #48 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_add_128: ; LSE: // %bb.0: // %entry -; LSE-NEXT: sub sp, sp, #80 // =80 +; LSE-NEXT: sub sp, sp, #80 ; LSE-NEXT: .cfi_def_cfa_offset 80 ; LSE-NEXT: str x0, [sp, #56] // 8-byte Folded Spill ; LSE-NEXT: ldr x8, [x0, #8] @@ -271,7 +271,7 @@ ; LSE-NEXT: ldr x10, [sp, #72] // 8-byte Folded Reload ; LSE-NEXT: ldr x8, [sp, #64] // 8-byte Folded Reload ; LSE-NEXT: ldr x9, [sp, #56] // 8-byte Folded Reload -; LSE-NEXT: adds x2, x8, #1 // =1 +; LSE-NEXT: adds x2, x8, #1 ; LSE-NEXT: mov x11, xzr ; LSE-NEXT: adcs x11, x10, x11 ; LSE-NEXT: // kill: def $x2 killed $x2 def $x2_x3 @@ -295,7 +295,7 @@ ; LSE-NEXT: .LBB4_2: // %atomicrmw.end ; LSE-NEXT: ldr x1, [sp, #40] // 8-byte Folded Reload ; LSE-NEXT: ldr x0, [sp, #48] // 8-byte Folded Reload -; LSE-NEXT: add sp, sp, #80 // =80 +; LSE-NEXT: add sp, sp, #80 ; LSE-NEXT: ret entry: %res = atomicrmw add i128* %dst, i128 1 seq_cst @@ -304,7 +304,7 @@ define i8 @test_rmw_nand_8(i8* %dst) { ; NOLSE-LABEL: test_rmw_nand_8: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldrb w8, [x0] @@ -332,18 +332,18 @@ ; NOLSE-NEXT: subs w9, w8, w9, uxtb ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB5_1 ; NOLSE-NEXT: b .LBB5_5 ; NOLSE-NEXT: .LBB5_5: // %atomicrmw.end ; NOLSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_nand_8: ; LSE: // %bb.0: // %entry -; LSE-NEXT: sub sp, sp, #32 // =32 +; LSE-NEXT: sub sp, sp, #32 ; LSE-NEXT: .cfi_def_cfa_offset 32 ; LSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; LSE-NEXT: ldrb w8, [x0] @@ -361,13 +361,13 @@ ; LSE-NEXT: subs w9, w8, w9, uxtb ; LSE-NEXT: cset w9, eq ; LSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; LSE-NEXT: subs w9, w9, #1 // =1 +; LSE-NEXT: subs w9, w9, #1 ; LSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; LSE-NEXT: b.ne .LBB5_1 ; LSE-NEXT: b .LBB5_2 ; LSE-NEXT: .LBB5_2: // %atomicrmw.end ; LSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; LSE-NEXT: add sp, sp, #32 // =32 +; LSE-NEXT: add sp, sp, #32 ; LSE-NEXT: ret entry: %res = atomicrmw nand i8* %dst, i8 1 seq_cst @@ -377,7 +377,7 @@ define i16 @test_rmw_nand_16(i16* %dst) { ; NOLSE-LABEL: test_rmw_nand_16: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldrh w8, [x0] @@ -405,18 +405,18 @@ ; NOLSE-NEXT: subs w9, w8, w9, uxth ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB6_1 ; NOLSE-NEXT: b .LBB6_5 ; NOLSE-NEXT: .LBB6_5: // %atomicrmw.end ; NOLSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_nand_16: ; LSE: // %bb.0: // %entry -; LSE-NEXT: sub sp, sp, #32 // =32 +; LSE-NEXT: sub sp, sp, #32 ; LSE-NEXT: .cfi_def_cfa_offset 32 ; LSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; LSE-NEXT: ldrh w8, [x0] @@ -434,13 +434,13 @@ ; LSE-NEXT: subs w9, w8, w9, uxth ; LSE-NEXT: cset w9, eq ; LSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; LSE-NEXT: subs w9, w9, #1 // =1 +; LSE-NEXT: subs w9, w9, #1 ; LSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; LSE-NEXT: b.ne .LBB6_1 ; LSE-NEXT: b .LBB6_2 ; LSE-NEXT: .LBB6_2: // %atomicrmw.end ; LSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; LSE-NEXT: add sp, sp, #32 // =32 +; LSE-NEXT: add sp, sp, #32 ; LSE-NEXT: ret entry: %res = atomicrmw nand i16* %dst, i16 1 seq_cst @@ -450,7 +450,7 @@ define i32 @test_rmw_nand_32(i32* %dst) { ; NOLSE-LABEL: test_rmw_nand_32: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldr w8, [x0] @@ -478,18 +478,18 @@ ; NOLSE-NEXT: subs w9, w8, w9 ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB7_1 ; NOLSE-NEXT: b .LBB7_5 ; NOLSE-NEXT: .LBB7_5: // %atomicrmw.end ; NOLSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_nand_32: ; LSE: // %bb.0: // %entry -; LSE-NEXT: sub sp, sp, #32 // =32 +; LSE-NEXT: sub sp, sp, #32 ; LSE-NEXT: .cfi_def_cfa_offset 32 ; LSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; LSE-NEXT: ldr w8, [x0] @@ -507,13 +507,13 @@ ; LSE-NEXT: subs w9, w8, w9 ; LSE-NEXT: cset w9, eq ; LSE-NEXT: str w8, [sp, #12] // 4-byte Folded Spill -; LSE-NEXT: subs w9, w9, #1 // =1 +; LSE-NEXT: subs w9, w9, #1 ; LSE-NEXT: str w8, [sp, #28] // 4-byte Folded Spill ; LSE-NEXT: b.ne .LBB7_1 ; LSE-NEXT: b .LBB7_2 ; LSE-NEXT: .LBB7_2: // %atomicrmw.end ; LSE-NEXT: ldr w0, [sp, #12] // 4-byte Folded Reload -; LSE-NEXT: add sp, sp, #32 // =32 +; LSE-NEXT: add sp, sp, #32 ; LSE-NEXT: ret entry: %res = atomicrmw nand i32* %dst, i32 1 seq_cst @@ -523,7 +523,7 @@ define i64 @test_rmw_nand_64(i64* %dst) { ; NOLSE-LABEL: test_rmw_nand_64: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; NOLSE-NEXT: ldr x8, [x0] @@ -554,18 +554,18 @@ ; NOLSE-NEXT: subs x9, x8, x9 ; NOLSE-NEXT: cset w9, eq ; NOLSE-NEXT: str x8, [sp, #8] // 8-byte Folded Spill -; NOLSE-NEXT: subs w9, w9, #1 // =1 +; NOLSE-NEXT: subs w9, w9, #1 ; NOLSE-NEXT: str x8, [sp, #24] // 8-byte Folded Spill ; NOLSE-NEXT: b.ne .LBB8_1 ; NOLSE-NEXT: b .LBB8_5 ; NOLSE-NEXT: .LBB8_5: // %atomicrmw.end ; NOLSE-NEXT: ldr x0, [sp, #8] // 8-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #32 // =32 +; NOLSE-NEXT: add sp, sp, #32 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_nand_64: ; LSE: // %bb.0: // %entry -; LSE-NEXT: sub sp, sp, #32 // =32 +; LSE-NEXT: sub sp, sp, #32 ; LSE-NEXT: .cfi_def_cfa_offset 32 ; LSE-NEXT: str x0, [sp, #16] // 8-byte Folded Spill ; LSE-NEXT: ldr x8, [x0] @@ -586,13 +586,13 @@ ; LSE-NEXT: subs x9, x8, x9 ; LSE-NEXT: cset w9, eq ; LSE-NEXT: str x8, [sp, #8] // 8-byte Folded Spill -; LSE-NEXT: subs w9, w9, #1 // =1 +; LSE-NEXT: subs w9, w9, #1 ; LSE-NEXT: str x8, [sp, #24] // 8-byte Folded Spill ; LSE-NEXT: b.ne .LBB8_1 ; LSE-NEXT: b .LBB8_2 ; LSE-NEXT: .LBB8_2: // %atomicrmw.end ; LSE-NEXT: ldr x0, [sp, #8] // 8-byte Folded Reload -; LSE-NEXT: add sp, sp, #32 // =32 +; LSE-NEXT: add sp, sp, #32 ; LSE-NEXT: ret entry: %res = atomicrmw nand i64* %dst, i64 1 seq_cst @@ -602,7 +602,7 @@ define i128 @test_rmw_nand_128(i128* %dst) { ; NOLSE-LABEL: test_rmw_nand_128: ; NOLSE: // %bb.0: // %entry -; NOLSE-NEXT: sub sp, sp, #48 // =48 +; NOLSE-NEXT: sub sp, sp, #48 ; NOLSE-NEXT: .cfi_def_cfa_offset 48 ; NOLSE-NEXT: str x0, [sp, #24] // 8-byte Folded Spill ; NOLSE-NEXT: ldr x8, [x0, #8] @@ -654,12 +654,12 @@ ; NOLSE-NEXT: .LBB9_6: // %atomicrmw.end ; NOLSE-NEXT: ldr x1, [sp, #8] // 8-byte Folded Reload ; NOLSE-NEXT: ldr x0, [sp, #16] // 8-byte Folded Reload -; NOLSE-NEXT: add sp, sp, #48 // =48 +; NOLSE-NEXT: add sp, sp, #48 ; NOLSE-NEXT: ret ; ; LSE-LABEL: test_rmw_nand_128: ; LSE: // %bb.0: // %entry -; LSE-NEXT: sub sp, sp, #80 // =80 +; LSE-NEXT: sub sp, sp, #80 ; LSE-NEXT: .cfi_def_cfa_offset 80 ; LSE-NEXT: str x0, [sp, #56] // 8-byte Folded Spill ; LSE-NEXT: ldr x8, [x0, #8] @@ -699,7 +699,7 @@ ; LSE-NEXT: .LBB9_2: // %atomicrmw.end ; LSE-NEXT: ldr x1, [sp, #40] // 8-byte Folded Reload ; LSE-NEXT: ldr x0, [sp, #48] // 8-byte Folded Reload -; LSE-NEXT: add sp, sp, #80 // =80 +; LSE-NEXT: add sp, sp, #80 ; LSE-NEXT: ret entry: %res = atomicrmw nand i128* %dst, i128 1 seq_cst diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll b/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll --- a/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll +++ b/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll @@ -78,7 +78,7 @@ define fp128 @test_rmw_xchg_f128(fp128* %dst, fp128 %new) { ; NOLSE-LABEL: test_rmw_xchg_f128: ; NOLSE: // %bb.0: -; NOLSE-NEXT: sub sp, sp, #32 // =32 +; NOLSE-NEXT: sub sp, sp, #32 ; NOLSE-NEXT: .cfi_def_cfa_offset 32 ; NOLSE-NEXT: str q0, [sp, #16] ; NOLSE-NEXT: ldp x9, x8, [sp, #16] @@ -94,7 +94,7 @@ ; ; LSE-LABEL: test_rmw_xchg_f128: ; LSE: // %bb.0: -; LSE-NEXT: sub sp, sp, #32 // =32 +; LSE-NEXT: sub sp, sp, #32 ; LSE-NEXT: .cfi_def_cfa_offset 32 ; LSE-NEXT: str q0, [sp, #16] ; LSE-NEXT: ldp x9, x8, [sp, #16] diff --git a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll --- a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll +++ b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll @@ -44,14 +44,14 @@ define i32 @block_split(i32 %a, i32 %b) #0 { ; CHECK-LABEL: block_split: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cmp w0, #5 ; =5 +; CHECK-NEXT: cmp w0, #5 ; CHECK-NEXT: b.ne LBB1_1 ; CHECK-NEXT: b LBB1_2 ; CHECK-NEXT: LBB1_1: ; %lor.lhs.false ; CHECK-NEXT: lsl w8, w1, #1 -; CHECK-NEXT: cmp w1, #7 ; =7 +; CHECK-NEXT: cmp w1, #7 ; CHECK-NEXT: csinc w8, w8, w1, lt -; CHECK-NEXT: cmp w8, #16 ; =16 +; CHECK-NEXT: cmp w8, #16 ; CHECK-NEXT: b.le LBB1_2 ; CHECK-NEXT: b LBB1_3 ; CHECK-NEXT: LBB1_2: ; %if.then diff --git a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll --- a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll +++ b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: split_block_no_fallthrough: ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill -; CHECK-NEXT: cmn x0, #5 ; =5 +; CHECK-NEXT: cmn x0, #5 ; CHECK-NEXT: b.le LBB0_3 ; CHECK-NEXT: ; %bb.1: ; %b3 ; CHECK-NEXT: ldr w8, [x8] diff --git a/llvm/test/CodeGen/AArch64/cgp-usubo.ll b/llvm/test/CodeGen/AArch64/cgp-usubo.ll --- a/llvm/test/CodeGen/AArch64/cgp-usubo.ll +++ b/llvm/test/CodeGen/AArch64/cgp-usubo.ll @@ -38,7 +38,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff ; CHECK-NEXT: mov w9, #42 -; CHECK-NEXT: cmp w8, #42 // =42 +; CHECK-NEXT: cmp w8, #42 ; CHECK-NEXT: sub w9, w9, w0 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: strb w9, [x1] @@ -56,7 +56,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff ; CHECK-NEXT: mov w9, #43 -; CHECK-NEXT: cmp w8, #43 // =43 +; CHECK-NEXT: cmp w8, #43 ; CHECK-NEXT: sub w9, w9, w0 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: strh w9, [x1] @@ -73,8 +73,8 @@ ; CHECK-LABEL: usubo_ult_constant_op1_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: cmp w8, #44 // =44 -; CHECK-NEXT: sub w9, w0, #44 // =44 +; CHECK-NEXT: cmp w8, #44 +; CHECK-NEXT: sub w9, w0, #44 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: strh w9, [x1] ; CHECK-NEXT: ret @@ -88,9 +88,9 @@ ; CHECK-LABEL: usubo_ugt_constant_op1_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: cmp w8, #45 // =45 +; CHECK-NEXT: cmp w8, #45 ; CHECK-NEXT: cset w8, lo -; CHECK-NEXT: sub w9, w0, #45 // =45 +; CHECK-NEXT: sub w9, w0, #45 ; CHECK-NEXT: mov w0, w8 ; CHECK-NEXT: strb w9, [x1] ; CHECK-NEXT: ret @@ -105,8 +105,8 @@ define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) nounwind { ; CHECK-LABEL: usubo_eq_constant1_op1_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 -; CHECK-NEXT: sub w8, w0, #1 // =1 +; CHECK-NEXT: cmp w0, #0 +; CHECK-NEXT: sub w8, w0, #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: str w8, [x1] ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll --- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll +++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll @@ -14,7 +14,7 @@ ; CHECK-LABEL: f_i8_sign_extend_inreg: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w8, w1, w2, ge ; CHECK-NEXT: add w0, w8, w0, uxtb ; CHECK-NEXT: ret @@ -36,7 +36,7 @@ ; CHECK-LABEL: f_i16_sign_extend_inreg: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w8, w1, w2, ge ; CHECK-NEXT: add w0, w8, w0, uxth ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ define i64 @f_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: f_i32_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel x8, x1, x2, ge ; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ ; CHECK-LABEL: g_i8_sign_extend_inreg: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w8, w1, w2, lt ; CHECK-NEXT: add w0, w8, w0, uxtb ; CHECK-NEXT: ret @@ -101,7 +101,7 @@ ; CHECK-LABEL: g_i16_sign_extend_inreg: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w8, w1, w2, lt ; CHECK-NEXT: add w0, w8, w0, uxth ; CHECK-NEXT: ret @@ -122,7 +122,7 @@ define i64 @g_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: g_i32_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel x8, x1, x2, lt ; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret @@ -145,7 +145,7 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: csel x8, x1, x2, ge ; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret @@ -169,7 +169,7 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: csel x8, x1, x2, lt ; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll --- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll +++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll @@ -71,7 +71,7 @@ define i64 @not_sign_i64(i64 %a) { ; CHECK-LABEL: not_sign_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: cneg x0, x8, le ; CHECK-NEXT: ret @@ -172,7 +172,7 @@ define <4 x i32> @sign_4xi32_multi_use(<4 x i32> %a) { ; CHECK-LABEL: sign_4xi32_multi_use: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 @@ -185,7 +185,7 @@ ; CHECK-NEXT: bl use_4xi1 ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %c = icmp sgt <4 x i32> %a, %res = select <4 x i1> %c, <4 x i32> , <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll --- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll +++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmp w8, #10 // =10 +; CHECK-NEXT: cmp w8, #10 ; CHECK-NEXT: adrp x8, :got:b ; CHECK-NEXT: ldr x8, [x8, :got_lo12:b] ; CHECK-NEXT: b.le .LBB0_3 @@ -79,7 +79,7 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmp w8, #5 // =5 +; CHECK-NEXT: cmp w8, #5 ; CHECK-NEXT: b.le .LBB1_3 ; CHECK-NEXT: // %bb.1: // %land.lhs.true ; CHECK-NEXT: adrp x8, :got:b @@ -146,7 +146,7 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmp w8, #5 // =5 +; CHECK-NEXT: cmp w8, #5 ; CHECK-NEXT: adrp x8, :got:b ; CHECK-NEXT: ldr x8, [x8, :got_lo12:b] ; CHECK-NEXT: b.ge .LBB2_3 @@ -211,7 +211,7 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmp w8, #5 // =5 +; CHECK-NEXT: cmp w8, #5 ; CHECK-NEXT: b.ge .LBB3_3 ; CHECK-NEXT: // %bb.1: // %land.lhs.true ; CHECK-NEXT: adrp x8, :got:b @@ -278,7 +278,7 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmn w8, #5 // =5 +; CHECK-NEXT: cmn w8, #5 ; CHECK-NEXT: b.le .LBB4_3 ; CHECK-NEXT: // %bb.1: // %land.lhs.true ; CHECK-NEXT: adrp x8, :got:b @@ -345,7 +345,7 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmn w8, #5 // =5 +; CHECK-NEXT: cmn w8, #5 ; CHECK-NEXT: b.ge .LBB5_3 ; CHECK-NEXT: // %bb.1: // %land.lhs.true ; CHECK-NEXT: adrp x8, :got:b @@ -427,17 +427,17 @@ ; CHECK-NEXT: ldr x19, [x0] ; CHECK-NEXT: mov w20, #24 ; CHECK-NEXT: adrp x22, glob -; CHECK-NEXT: add x21, x19, #2 // =2 +; CHECK-NEXT: add x21, x19, #2 ; CHECK-NEXT: .LBB6_1: // %land.rhs ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr x8, [x20] -; CHECK-NEXT: cmp x8, #1 // =1 +; CHECK-NEXT: cmp x8, #1 ; CHECK-NEXT: b.lt .LBB6_3 ; CHECK-NEXT: // %bb.2: // %while.body ; CHECK-NEXT: // in Loop: Header=BB6_1 Depth=1 ; CHECK-NEXT: ldr x0, [x22, :lo12:glob] ; CHECK-NEXT: bl Update -; CHECK-NEXT: sub x21, x21, #2 // =2 +; CHECK-NEXT: sub x21, x21, #2 ; CHECK-NEXT: cmp x19, x21 ; CHECK-NEXT: b.lt .LBB6_1 ; CHECK-NEXT: .LBB6_3: // %while.end @@ -482,19 +482,19 @@ ; CHECK-NEXT: adrp x19, :got:a ; CHECK-NEXT: ldr x19, [x19, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x19] -; CHECK-NEXT: cmn w8, #2 // =2 +; CHECK-NEXT: cmn w8, #2 ; CHECK-NEXT: b.gt .LBB7_4 ; CHECK-NEXT: // %bb.1: // %while.body.preheader -; CHECK-NEXT: sub w20, w8, #1 // =1 +; CHECK-NEXT: sub w20, w8, #1 ; CHECK-NEXT: .LBB7_2: // %while.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: bl do_something -; CHECK-NEXT: adds w20, w20, #1 // =1 +; CHECK-NEXT: adds w20, w20, #1 ; CHECK-NEXT: b.mi .LBB7_2 ; CHECK-NEXT: // %bb.3: // %while.cond.while.end_crit_edge ; CHECK-NEXT: ldr w8, [x19] ; CHECK-NEXT: .LBB7_4: // %while.end -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: b.gt .LBB7_7 ; CHECK-NEXT: // %bb.5: // %land.lhs.true ; CHECK-NEXT: adrp x8, :got:b @@ -562,20 +562,20 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: b.gt .LBB8_3 ; CHECK-NEXT: // %bb.1: // %while.body.preheader -; CHECK-NEXT: sub w19, w8, #1 // =1 +; CHECK-NEXT: sub w19, w8, #1 ; CHECK-NEXT: .LBB8_2: // %while.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: bl do_something -; CHECK-NEXT: adds w19, w19, #1 // =1 +; CHECK-NEXT: adds w19, w19, #1 ; CHECK-NEXT: b.mi .LBB8_2 ; CHECK-NEXT: .LBB8_3: // %while.end ; CHECK-NEXT: adrp x8, :got:c ; CHECK-NEXT: ldr x8, [x8, :got_lo12:c] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmn w8, #2 // =2 +; CHECK-NEXT: cmn w8, #2 ; CHECK-NEXT: b.lt .LBB8_6 ; CHECK-NEXT: // %bb.4: // %land.lhs.true ; CHECK-NEXT: adrp x8, :got:b @@ -647,7 +647,7 @@ ; CHECK-NEXT: .cfi_offset w19, -8 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: .cfi_offset b8, -32 -; CHECK-NEXT: cmp w0, #2 // =2 +; CHECK-NEXT: cmp w0, #2 ; CHECK-NEXT: b.lt .LBB9_3 ; CHECK-NEXT: // %bb.1: // %land.lhs.true ; CHECK-NEXT: ldr x8, [x1, #8] @@ -661,7 +661,7 @@ ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: mov w0, #-1 ; CHECK-NEXT: bl yoo -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: cinc w0, w19, gt ; CHECK-NEXT: mov w1, #2 ; CHECK-NEXT: mov v8.16b, v0.16b @@ -720,7 +720,7 @@ ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #42 ; CHECK-NEXT: csinc w8, w8, wzr, gt ; CHECK-NEXT: cmp w0, #2, lsl #12 // =8192 @@ -757,12 +757,12 @@ ; CHECK-NEXT: adrp x8, :got:a ; CHECK-NEXT: ldr x8, [x8, :got_lo12:a] ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel x9, x0, xzr, gt ; CHECK-NEXT: str x9, [x1] ; CHECK-NEXT: b.le .LBB11_2 ; CHECK-NEXT: // %bb.1: // %lor.lhs.false -; CHECK-NEXT: cmp w8, #2 // =2 +; CHECK-NEXT: cmp w8, #2 ; CHECK-NEXT: b.ge .LBB11_4 ; CHECK-NEXT: b .LBB11_6 ; CHECK-NEXT: .LBB11_2: // %land.lhs.true diff --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll --- a/llvm/test/CodeGen/AArch64/extract-bits.ll +++ b/llvm/test/CodeGen/AArch64/extract-bits.ll @@ -24,7 +24,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 ; CHECK-NEXT: lsr w8, w0, w1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %shifted = lshr i32 %val, %numskipbits @@ -40,7 +40,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 ; CHECK-NEXT: asr w8, w0, w1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %shifted = ashr i32 %val, %numskipbits @@ -56,7 +56,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 ; CHECK-NEXT: lsr w8, w0, w1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %skip = zext i8 %numskipbits to i32 @@ -74,7 +74,7 @@ ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: lsr w8, w8, w1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: lsr w8, w8, w1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret @@ -112,7 +112,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 ; CHECK-NEXT: lsr w8, w0, w1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i32 %val, %numskipbits @@ -130,7 +130,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl x9, x9, x2 ; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: and x0, x9, x8 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits @@ -146,7 +146,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl x9, x9, x2 ; CHECK-NEXT: asr x8, x0, x1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: and x0, x9, x8 ; CHECK-NEXT: ret %shifted = ashr i64 %val, %numskipbits @@ -164,7 +164,7 @@ ; CHECK-NEXT: lsl x9, x9, x2 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: and x0, x9, x8 ; CHECK-NEXT: ret %skip = zext i8 %numskipbits to i64 @@ -182,7 +182,7 @@ ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl x9, x9, x2 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: lsr x8, x8, x1 ; CHECK-NEXT: and x0, x9, x8 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ ; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2 ; CHECK-NEXT: lsl x9, x9, x2 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: lsr x8, x8, x1 ; CHECK-NEXT: and x0, x9, x8 ; CHECK-NEXT: ret @@ -222,7 +222,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl x9, x9, x2 ; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: and x0, x8, x9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits @@ -241,7 +241,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl x9, x9, x2 ; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits @@ -259,7 +259,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 ; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits @@ -278,7 +278,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w2 ; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits @@ -974,7 +974,7 @@ ; CHECK-NEXT: ubfx x8, x8, #21, #10 ; CHECK-NEXT: lsl x8, x8, #2 ; CHECK-NEXT: ldr w9, [x0, x8] -; CHECK-NEXT: add w9, w9, #1 // =1 +; CHECK-NEXT: add w9, w9, #1 ; CHECK-NEXT: str w9, [x0, x8] ; CHECK-NEXT: ret %tmp = load i64, i64* %a1, align 8 diff --git a/llvm/test/CodeGen/AArch64/extract-lowbits.ll b/llvm/test/CodeGen/AArch64/extract-lowbits.ll --- a/llvm/test/CodeGen/AArch64/extract-lowbits.ll +++ b/llvm/test/CodeGen/AArch64/extract-lowbits.ll @@ -23,7 +23,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: lsl w8, w8, w1 -; CHECK-NEXT: sub w8, w8, #1 // =1 +; CHECK-NEXT: sub w8, w8, #1 ; CHECK-NEXT: and w0, w8, w0 ; CHECK-NEXT: ret %onebit = shl i32 1, %numlowbits @@ -37,7 +37,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: lsl w8, w8, w1 -; CHECK-NEXT: sub w8, w8, #1 // =1 +; CHECK-NEXT: sub w8, w8, #1 ; CHECK-NEXT: and w0, w8, w0 ; CHECK-NEXT: ret %conv = zext i8 %numlowbits to i32 @@ -53,7 +53,7 @@ ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %val = load i32, i32* %w @@ -69,7 +69,7 @@ ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl w9, w9, w1 -; CHECK-NEXT: sub w9, w9, #1 // =1 +; CHECK-NEXT: sub w9, w9, #1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %val = load i32, i32* %w @@ -85,7 +85,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: lsl w8, w8, w1 -; CHECK-NEXT: sub w8, w8, #1 // =1 +; CHECK-NEXT: sub w8, w8, #1 ; CHECK-NEXT: and w0, w0, w8 ; CHECK-NEXT: ret %onebit = shl i32 1, %numlowbits @@ -101,7 +101,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: lsl x8, x8, x1 -; CHECK-NEXT: sub x8, x8, #1 // =1 +; CHECK-NEXT: sub x8, x8, #1 ; CHECK-NEXT: and x0, x8, x0 ; CHECK-NEXT: ret %onebit = shl i64 1, %numlowbits @@ -116,7 +116,7 @@ ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: lsl x8, x8, x1 -; CHECK-NEXT: sub x8, x8, #1 // =1 +; CHECK-NEXT: sub x8, x8, #1 ; CHECK-NEXT: and x0, x8, x0 ; CHECK-NEXT: ret %conv = zext i8 %numlowbits to i64 @@ -132,7 +132,7 @@ ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: lsl x9, x9, x1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: and x0, x9, x8 ; CHECK-NEXT: ret %val = load i64, i64* %w @@ -149,7 +149,7 @@ ; CHECK-NEXT: mov w9, #1 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: lsl x9, x9, x1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: and x0, x9, x8 ; CHECK-NEXT: ret %val = load i64, i64* %w @@ -165,7 +165,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: lsl x8, x8, x1 -; CHECK-NEXT: sub x8, x8, #1 // =1 +; CHECK-NEXT: sub x8, x8, #1 ; CHECK-NEXT: and x0, x0, x8 ; CHECK-NEXT: ret %onebit = shl i64 1, %numlowbits diff --git a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll --- a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll @@ -138,9 +138,9 @@ define i64 @test_or_unpredictable(i32 %a, i32 %b) { ; CHECK-LABEL: test_or_unpredictable: ; CHECK: ; %bb.0: ; %bb1 -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w8, eq -; CHECK-NEXT: cmp w1, #0 ; =0 +; CHECK-NEXT: cmp w1, #0 ; CHECK-NEXT: cset w9, eq ; CHECK-NEXT: orr w8, w8, w9 ; CHECK-NEXT: mov x0, xzr @@ -171,9 +171,9 @@ define i64 @test_and_unpredictable(i32 %a, i32 %b) { ; CHECK-LABEL: test_and_unpredictable: ; CHECK: ; %bb.0: ; %bb1 -; CHECK-NEXT: cmp w0, #0 ; =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cset w8, ne -; CHECK-NEXT: cmp w1, #0 ; =0 +; CHECK-NEXT: cmp w1, #0 ; CHECK-NEXT: cset w9, ne ; CHECK-NEXT: and w8, w8, w9 ; CHECK-NEXT: mov x0, xzr diff --git a/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll b/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll --- a/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll @@ -14,8 +14,8 @@ define i32 @sdiv_i32_pos(i32 %a) { ; CHECK-LABEL: sdiv_i32_pos: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #7 // =7 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: add w8, w0, #7 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: asr w0, w8, #3 ; CHECK-NEXT: ret @@ -26,8 +26,8 @@ define i32 @sdiv_i32_neg(i32 %a) { ; CHECK-LABEL: sdiv_i32_neg: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #7 // =7 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: add w8, w0, #7 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: neg w0, w8, asr #3 ; CHECK-NEXT: ret @@ -47,8 +47,8 @@ define i64 @sdiv_i64_pos(i64 %a) { ; CHECK-LABEL: sdiv_i64_pos: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #15 // =15 -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: add x8, x0, #15 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel x8, x8, x0, lt ; CHECK-NEXT: asr x0, x8, #4 ; CHECK-NEXT: ret @@ -59,8 +59,8 @@ define i64 @sdiv_i64_neg(i64 %a) { ; CHECK-LABEL: sdiv_i64_neg: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #15 // =15 -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: add x8, x0, #15 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel x8, x8, x0, lt ; CHECK-NEXT: neg x0, x8, asr #4 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll --- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll @@ -272,7 +272,7 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) { ; CHECK-LABEL: test_signed_v1f128_v1i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w19, -8 @@ -284,7 +284,7 @@ ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: bl __fixtfsi -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: mov w8, #-2147483648 ; CHECK-NEXT: csel w19, w8, w0, lt ; CHECK-NEXT: adrp x8, .LCPI14_1 @@ -292,16 +292,16 @@ ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #2147483647 ; CHECK-NEXT: csel w19, w8, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, wzr, w19, ne ; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload ; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %x = call <1 x i32> @llvm.fptosi.sat.v1f128.v1i32(<1 x fp128> %f) ret <1 x i32> %x @@ -310,7 +310,7 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) { ; CHECK-LABEL: test_signed_v2f128_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 // =112 +; CHECK-NEXT: sub sp, sp, #112 ; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill @@ -333,20 +333,20 @@ ; CHECK-NEXT: adrp x8, .LCPI15_1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_1] ; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: mov w20, #-2147483648 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w21, #2147483647 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w22, wzr, w19, ne ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload @@ -354,15 +354,15 @@ ; CHECK-NEXT: bl __fixtfsi ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, wzr, w19, ne ; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: mov v0.s[1], w22 @@ -370,7 +370,7 @@ ; CHECK-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: add sp, sp, #112 // =112 +; CHECK-NEXT: add sp, sp, #112 ; CHECK-NEXT: ret %x = call <2 x i32> @llvm.fptosi.sat.v2f128.v2i32(<2 x fp128> %f) ret <2 x i32> %x @@ -379,7 +379,7 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) { ; CHECK-LABEL: test_signed_v3f128_v3i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #128 // =128 +; CHECK-NEXT: sub sp, sp, #128 ; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill @@ -403,20 +403,20 @@ ; CHECK-NEXT: adrp x8, .LCPI16_1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] ; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: mov w20, #-2147483648 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w21, #2147483647 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w22, wzr, w19, ne ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload @@ -424,15 +424,15 @@ ; CHECK-NEXT: bl __fixtfsi ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, wzr, w19, ne ; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: mov v0.s[1], w22 @@ -445,22 +445,22 @@ ; CHECK-NEXT: bl __fixtfsi ; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: csel w8, wzr, w19, ne ; CHECK-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload ; CHECK-NEXT: ldp x22, x21, [sp, #96] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload ; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: add sp, sp, #128 // =128 +; CHECK-NEXT: add sp, sp, #128 ; CHECK-NEXT: ret %x = call <3 x i32> @llvm.fptosi.sat.v3f128.v3i32(<3 x fp128> %f) ret <3 x i32> %x @@ -469,7 +469,7 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) { ; CHECK-LABEL: test_signed_v4f128_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #144 // =144 +; CHECK-NEXT: sub sp, sp, #144 ; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill ; CHECK-NEXT: stp x22, x21, [sp, #112] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill @@ -494,19 +494,19 @@ ; CHECK-NEXT: adrp x8, .LCPI17_1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: mov w20, #-2147483648 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w21, #2147483647 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w22, wzr, w19, ne ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload @@ -514,15 +514,15 @@ ; CHECK-NEXT: bl __fixtfsi ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, wzr, w19, ne ; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: mov v0.s[1], w22 @@ -535,16 +535,16 @@ ; CHECK-NEXT: bl __fixtfsi ; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 ; CHECK-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, wzr, w19, ne ; CHECK-NEXT: mov v0.s[2], w8 ; CHECK-NEXT: str q0, [sp, #48] // 16-byte Folded Spill @@ -555,22 +555,22 @@ ; CHECK-NEXT: bl __fixtfsi ; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, w20, w0, lt ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w19, w21, w19, gt ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: bl __unordtf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: csel w8, wzr, w19, ne ; CHECK-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload ; CHECK-NEXT: ldp x22, x21, [sp, #112] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload ; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: add sp, sp, #144 // =144 +; CHECK-NEXT: add sp, sp, #144 ; CHECK-NEXT: ret %x = call <4 x i32> @llvm.fptosi.sat.v4f128.v4i32(<4 x fp128> %f) ret <4 x i32> %x @@ -1004,7 +1004,7 @@ define <2 x i100> @test_signed_v2f32_v2i100(<2 x float> %f) { ; CHECK-LABEL: test_signed_v2f32_v2i100: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 // =80 +; CHECK-NEXT: sub sp, sp, #80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -1062,7 +1062,7 @@ ; CHECK-NEXT: fmov d0, x9 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #80 // =80 +; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret %x = call <2 x i100> @llvm.fptosi.sat.v2f32.v2i100(<2 x float> %f) ret <2 x i100> %x @@ -1071,7 +1071,7 @@ define <2 x i128> @test_signed_v2f32_v2i128(<2 x float> %f) { ; CHECK-LABEL: test_signed_v2f32_v2i128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 // =80 +; CHECK-NEXT: sub sp, sp, #80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -1129,7 +1129,7 @@ ; CHECK-NEXT: fmov d0, x9 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #80 // =80 +; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret %x = call <2 x i128> @llvm.fptosi.sat.v2f32.v2i128(<2 x float> %f) ret <2 x i128> %x @@ -1337,7 +1337,7 @@ define <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) { ; CHECK-LABEL: test_signed_v2f64_v2i100: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 // =80 +; CHECK-NEXT: sub sp, sp, #80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -1394,7 +1394,7 @@ ; CHECK-NEXT: fmov d0, x9 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #80 // =80 +; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret %x = call <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double> %f) ret <2 x i100> %x @@ -1403,7 +1403,7 @@ define <2 x i128> @test_signed_v2f64_v2i128(<2 x double> %f) { ; CHECK-LABEL: test_signed_v2f64_v2i128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 // =80 +; CHECK-NEXT: sub sp, sp, #80 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -1460,7 +1460,7 @@ ; CHECK-NEXT: fmov d0, x9 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #80 // =80 +; CHECK-NEXT: add sp, sp, #80 ; CHECK-NEXT: ret %x = call <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double> %f) ret <2 x i128> %x @@ -1808,7 +1808,7 @@ define <4 x i100> @test_signed_v4f16_v4i100(<4 x half> %f) { ; CHECK-LABEL: test_signed_v4f16_v4i100: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 // =112 +; CHECK-NEXT: sub sp, sp, #112 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -1907,7 +1907,7 @@ ; CHECK-NEXT: fmov d0, x9 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #112 // =112 +; CHECK-NEXT: add sp, sp, #112 ; CHECK-NEXT: ret %x = call <4 x i100> @llvm.fptosi.sat.v4f16.v4i100(<4 x half> %f) ret <4 x i100> %x @@ -1916,7 +1916,7 @@ define <4 x i128> @test_signed_v4f16_v4i128(<4 x half> %f) { ; CHECK-LABEL: test_signed_v4f16_v4i128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 // =112 +; CHECK-NEXT: sub sp, sp, #112 ; CHECK-NEXT: str d10, [sp, #16] // 8-byte Folded Spill ; CHECK-NEXT: stp d9, d8, [sp, #24] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #40] // 8-byte Folded Spill @@ -2015,7 +2015,7 @@ ; CHECK-NEXT: fmov d0, x9 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #112 // =112 +; CHECK-NEXT: add sp, sp, #112 ; CHECK-NEXT: ret %x = call <4 x i128> @llvm.fptosi.sat.v4f16.v4i128(<4 x half> %f) ret <4 x i128> %x diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll --- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll @@ -272,7 +272,7 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) { ; CHECK-LABEL: test_unsigned_v1f128_v1i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w19, -8 @@ -287,14 +287,14 @@ ; CHECK-NEXT: adrp x8, .LCPI14_1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w8, w19, wzr, le ; CHECK-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload ; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret %x = call <1 x i32> @llvm.fptoui.sat.v1f128.v1i32(<1 x fp128> %f) ret <1 x i32> %x @@ -303,7 +303,7 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) { ; CHECK-LABEL: test_unsigned_v2f128_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 // =96 +; CHECK-NEXT: sub sp, sp, #96 ; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 96 @@ -323,13 +323,13 @@ ; CHECK-NEXT: adrp x8, .LCPI15_1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_1] ; CHECK-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w20, w19, wzr, le ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload @@ -337,17 +337,17 @@ ; CHECK-NEXT: bl __fixunstfsi ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w8, w19, wzr, le ; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: mov v0.s[1], w20 ; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: add sp, sp, #96 // =96 +; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret %x = call <2 x i32> @llvm.fptoui.sat.v2f128.v2i32(<2 x fp128> %f) ret <2 x i32> %x @@ -356,7 +356,7 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) { ; CHECK-LABEL: test_unsigned_v3f128_v3i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #112 // =112 +; CHECK-NEXT: sub sp, sp, #112 ; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 112 @@ -377,12 +377,12 @@ ; CHECK-NEXT: adrp x8, .LCPI16_1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w20, w19, wzr, le ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload @@ -390,10 +390,10 @@ ; CHECK-NEXT: bl __fixunstfsi ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w8, w19, wzr, le ; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: mov v0.s[1], w20 @@ -406,16 +406,16 @@ ; CHECK-NEXT: bl __fixunstfsi ; CHECK-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-NEXT: csinv w8, w19, wzr, le ; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload ; CHECK-NEXT: mov v0.s[2], w8 -; CHECK-NEXT: add sp, sp, #112 // =112 +; CHECK-NEXT: add sp, sp, #112 ; CHECK-NEXT: ret %x = call <3 x i32> @llvm.fptoui.sat.v3f128.v3i32(<3 x fp128> %f) ret <3 x i32> %x @@ -424,7 +424,7 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) { ; CHECK-LABEL: test_unsigned_v4f128_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #128 // =128 +; CHECK-NEXT: sub sp, sp, #128 ; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 128 @@ -446,13 +446,13 @@ ; CHECK-NEXT: adrp x8, .LCPI17_1 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] ; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: str q1, [sp, #48] // 16-byte Folded Spill ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w20, w19, wzr, le ; CHECK-NEXT: bl __getf2 ; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload @@ -460,10 +460,10 @@ ; CHECK-NEXT: bl __fixunstfsi ; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w8, w19, wzr, le ; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: mov v0.s[1], w20 @@ -475,11 +475,11 @@ ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: bl __fixunstfsi ; CHECK-NEXT: ldp q0, q1, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: bl __gttf2 ; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinv w8, w19, wzr, le ; CHECK-NEXT: mov v0.s[2], w8 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill @@ -490,16 +490,16 @@ ; CHECK-NEXT: bl __fixunstfsi ; CHECK-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload ; CHECK-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: cmp w19, #0 // =0 +; CHECK-NEXT: cmp w19, #0 ; CHECK-NEXT: csel w19, wzr, w0, lt ; CHECK-NEXT: bl __gttf2 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-NEXT: csinv w8, w19, wzr, le ; CHECK-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload ; CHECK-NEXT: mov v0.s[3], w8 -; CHECK-NEXT: add sp, sp, #128 // =128 +; CHECK-NEXT: add sp, sp, #128 ; CHECK-NEXT: ret %x = call <4 x i32> @llvm.fptoui.sat.v4f128.v4i32(<4 x fp128> %f) ret <4 x i32> %x @@ -902,7 +902,7 @@ define <2 x i100> @test_unsigned_v2f32_v2i100(<2 x float> %f) { ; CHECK-LABEL: test_unsigned_v2f32_v2i100: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 // =64 +; CHECK-NEXT: sub sp, sp, #64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill @@ -945,7 +945,7 @@ ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #64 // =64 +; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret %x = call <2 x i100> @llvm.fptoui.sat.v2f32.v2i100(<2 x float> %f) ret <2 x i100> %x @@ -954,7 +954,7 @@ define <2 x i128> @test_unsigned_v2f32_v2i128(<2 x float> %f) { ; CHECK-LABEL: test_unsigned_v2f32_v2i128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 // =64 +; CHECK-NEXT: sub sp, sp, #64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill @@ -995,7 +995,7 @@ ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #64 // =64 +; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret %x = call <2 x i128> @llvm.fptoui.sat.v2f32.v2i128(<2 x float> %f) ret <2 x i128> %x @@ -1174,7 +1174,7 @@ define <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i100: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 // =64 +; CHECK-NEXT: sub sp, sp, #64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x30, x21, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill @@ -1216,7 +1216,7 @@ ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #64 // =64 +; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret %x = call <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double> %f) ret <2 x i100> %x @@ -1225,7 +1225,7 @@ define <2 x i128> @test_unsigned_v2f64_v2i128(<2 x double> %f) { ; CHECK-LABEL: test_unsigned_v2f64_v2i128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 // =64 +; CHECK-NEXT: sub sp, sp, #64 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill @@ -1265,7 +1265,7 @@ ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #64 // =64 +; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret %x = call <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double> %f) ret <2 x i128> %x @@ -1558,7 +1558,7 @@ define <4 x i100> @test_unsigned_v4f16_v4i100(<4 x half> %f) { ; CHECK-LABEL: test_unsigned_v4f16_v4i100: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 // =96 +; CHECK-NEXT: sub sp, sp, #96 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: stp x30, x25, [sp, #32] // 16-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill @@ -1636,7 +1636,7 @@ ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #96 // =96 +; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret %x = call <4 x i100> @llvm.fptoui.sat.v4f16.v4i100(<4 x half> %f) ret <4 x i100> %x @@ -1645,7 +1645,7 @@ define <4 x i128> @test_unsigned_v4f16_v4i128(<4 x half> %f) { ; CHECK-LABEL: test_unsigned_v4f16_v4i128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #96 // =96 +; CHECK-NEXT: sub sp, sp, #96 ; CHECK-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill ; CHECK-NEXT: stp x24, x23, [sp, #48] // 16-byte Folded Spill @@ -1721,7 +1721,7 @@ ; CHECK-NEXT: fmov d0, x8 ; CHECK-NEXT: mov v0.d[1], x1 ; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: add sp, sp, #96 // =96 +; CHECK-NEXT: add sp, sp, #96 ; CHECK-NEXT: ret %x = call <4 x i128> @llvm.fptoui.sat.v4f16.v4i128(<4 x half> %f) ret <4 x i128> %x diff --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll --- a/llvm/test/CodeGen/AArch64/funnel-shift.ll +++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll @@ -193,7 +193,7 @@ ; CHECK-NEXT: lsr x8, x8, #5 ; CHECK-NEXT: msub w8, w8, w9, w2 ; CHECK-NEXT: lsl x10, x1, #27 -; CHECK-NEXT: add w8, w8, #27 // =27 +; CHECK-NEXT: add w8, w8, #27 ; CHECK-NEXT: lsr x9, x10, x8 ; CHECK-NEXT: mvn w8, w8 ; CHECK-NEXT: lsl x10, x0, #1 diff --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll --- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll @@ -301,7 +301,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: lsr w8, w8, w0 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %t0 = lshr i32 1, %y @@ -328,7 +328,7 @@ ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: lsr w8, w8, w1 ; CHECK-NEXT: and w8, w8, w0 -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %t0 = lshr i8 128, %y diff --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll --- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll @@ -339,7 +339,7 @@ ; CHECK-NEXT: lsl w8, w8, w1 ; CHECK-NEXT: and w8, w8, w0 ; CHECK-NEXT: and w8, w8, #0x80 -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %t0 = shl i8 128, %y diff --git a/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll b/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll --- a/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll +++ b/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll @@ -38,11 +38,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, x ; CHECK-NEXT: add x8, x8, :lo12:x -; CHECK-NEXT: add x8, x8, #512 // =512 +; CHECK-NEXT: add x8, x8, #512 ; CHECK-NEXT: ldp x8, x9, [x8] ; CHECK-NEXT: adrp x10, y ; CHECK-NEXT: add x10, x10, :lo12:y -; CHECK-NEXT: add x10, x10, #512 // =512 +; CHECK-NEXT: add x10, x10, #512 ; CHECK-NEXT: stp x8, x9, [x10] ; CHECK-NEXT: ret %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 512) to i128*) @@ -70,11 +70,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, x ; CHECK-NEXT: add x8, x8, :lo12:x -; CHECK-NEXT: sub x8, x8, #520 // =520 +; CHECK-NEXT: sub x8, x8, #520 ; CHECK-NEXT: ldp x8, x9, [x8] ; CHECK-NEXT: adrp x10, y ; CHECK-NEXT: add x10, x10, :lo12:y -; CHECK-NEXT: sub x10, x10, #520 // =520 +; CHECK-NEXT: sub x10, x10, #520 ; CHECK-NEXT: stp x8, x9, [x10] ; CHECK-NEXT: ret %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*) @@ -87,11 +87,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, x ; CHECK-NEXT: add x8, x8, :lo12:x -; CHECK-NEXT: sub x8, x8, #520 // =520 +; CHECK-NEXT: sub x8, x8, #520 ; CHECK-NEXT: ldp x8, x9, [x8] ; CHECK-NEXT: adrp x10, y ; CHECK-NEXT: add x10, x10, :lo12:y -; CHECK-NEXT: sub x10, x10, #520 // =520 +; CHECK-NEXT: sub x10, x10, #520 ; CHECK-NEXT: stp x8, x9, [x10] ; CHECK-NEXT: ret %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*) @@ -104,11 +104,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, x ; CHECK-NEXT: add x8, x8, :lo12:x -; CHECK-NEXT: add x8, x8, #503 // =503 +; CHECK-NEXT: add x8, x8, #503 ; CHECK-NEXT: ldp x8, x9, [x8] ; CHECK-NEXT: adrp x10, y ; CHECK-NEXT: add x10, x10, :lo12:y -; CHECK-NEXT: add x10, x10, #503 // =503 +; CHECK-NEXT: add x10, x10, #503 ; CHECK-NEXT: stp x8, x9, [x10] ; CHECK-NEXT: ret %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 503) to i128*) diff --git a/llvm/test/CodeGen/AArch64/implicit-null-check.ll b/llvm/test/CodeGen/AArch64/implicit-null-check.ll --- a/llvm/test/CodeGen/AArch64/implicit-null-check.ll +++ b/llvm/test/CodeGen/AArch64/implicit-null-check.ll @@ -284,7 +284,7 @@ ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: add w9, w0, w1 ; CHECK-NEXT: add w8, w9, w8 -; CHECK-NEXT: add w0, w8, #4 // =4 +; CHECK-NEXT: add w0, w8, #4 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB11_2: ; CHECK-NEXT: mov w0, #42 diff --git a/llvm/test/CodeGen/AArch64/inc-of-add.ll b/llvm/test/CodeGen/AArch64/inc-of-add.ll --- a/llvm/test/CodeGen/AArch64/inc-of-add.ll +++ b/llvm/test/CodeGen/AArch64/inc-of-add.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: scalar_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: add w0, w8, #1 // =1 +; CHECK-NEXT: add w0, w8, #1 ; CHECK-NEXT: ret %t0 = add i8 %x, 1 %t1 = add i8 %y, %t0 @@ -21,7 +21,7 @@ ; CHECK-LABEL: scalar_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: add w0, w8, #1 // =1 +; CHECK-NEXT: add w0, w8, #1 ; CHECK-NEXT: ret %t0 = add i16 %x, 1 %t1 = add i16 %y, %t0 @@ -32,7 +32,7 @@ ; CHECK-LABEL: scalar_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: add w0, w8, #1 // =1 +; CHECK-NEXT: add w0, w8, #1 ; CHECK-NEXT: ret %t0 = add i32 %x, 1 %t1 = add i32 %y, %t0 @@ -43,7 +43,7 @@ ; CHECK-LABEL: scalar_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: add x8, x0, x1 -; CHECK-NEXT: add x0, x8, #1 // =1 +; CHECK-NEXT: add x0, x8, #1 ; CHECK-NEXT: ret %t0 = add i64 %x, 1 %t1 = add i64 %y, %t0 diff --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll --- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll +++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll @@ -101,9 +101,9 @@ ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: cnth x9 -; CHECK-NEXT: sub x9, x9, #8 // =8 +; CHECK-NEXT: sub x9, x9, #8 ; CHECK-NEXT: mov w8, #8 -; CHECK-NEXT: cmp x9, #8 // =8 +; CHECK-NEXT: cmp x9, #8 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: lsl x8, x8, #1 ; CHECK-NEXT: st1h { z0.h }, p0, [sp] @@ -150,9 +150,9 @@ ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: cntw x9 -; CHECK-NEXT: sub x9, x9, #4 // =4 +; CHECK-NEXT: sub x9, x9, #4 ; CHECK-NEXT: mov w8, #4 -; CHECK-NEXT: cmp x9, #4 // =4 +; CHECK-NEXT: cmp x9, #4 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: lsl x8, x8, #2 ; CHECK-NEXT: st1w { z0.s }, p0, [sp] @@ -199,9 +199,9 @@ ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] ; CHECK-NEXT: ldr d1, [x1] ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: sub x9, x9, #2 // =2 +; CHECK-NEXT: sub x9, x9, #2 ; CHECK-NEXT: mov w8, #2 -; CHECK-NEXT: cmp x9, #2 // =2 +; CHECK-NEXT: cmp x9, #2 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: lsl x8, x8, #3 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] @@ -228,10 +228,10 @@ ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] ; CHECK-NEXT: ld1w { z1.s }, p1/z, [x1] ; CHECK-NEXT: cntd x8 -; CHECK-NEXT: subs x8, x8, #8 // =8 +; CHECK-NEXT: subs x8, x8, #8 ; CHECK-NEXT: csel x8, xzr, x8, lo ; CHECK-NEXT: mov w9, #8 -; CHECK-NEXT: cmp x8, #8 // =8 +; CHECK-NEXT: cmp x8, #8 ; CHECK-NEXT: ptrue p1.d, vl8 ; CHECK-NEXT: csel x8, x8, x9, lo ; CHECK-NEXT: st1d { z0.d }, p0, [sp] diff --git a/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll --- a/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll +++ b/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll @@ -273,7 +273,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #255 // =255 +; CHECK-NEXT: cmp w8, #255 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %tmp0 = add i16 %x, %y @@ -285,7 +285,7 @@ define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind { ; CHECK-LABEL: add_ugecmp_bad_i16_i8_cmp: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: add w8, w0, #128 ; CHECK-NEXT: and w8, w8, #0xffff ; CHECK-NEXT: cmp w8, w1, uxth ; CHECK-NEXT: cset w0, hs @@ -299,9 +299,9 @@ define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind { ; CHECK-LABEL: add_ugecmp_bad_i8_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: add w8, w0, #128 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #127 // =127 +; CHECK-NEXT: cmp w8, #127 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %tmp0 = add i16 %x, 128 ; 1U << (8-1) @@ -313,9 +313,9 @@ define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind { ; CHECK-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #192 // =192 +; CHECK-NEXT: add w8, w0, #192 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #255 // =255 +; CHECK-NEXT: cmp w8, #255 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1)) @@ -327,9 +327,9 @@ define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind { ; CHECK-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: add w8, w0, #128 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #767 // =767 +; CHECK-NEXT: cmp w8, #767 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %tmp0 = add i16 %x, 128 ; 1U << (8-1) @@ -341,9 +341,9 @@ define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind { ; CHECK-LABEL: add_ugecmp_bad_i16_i8_magic: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #64 // =64 +; CHECK-NEXT: add w8, w0, #64 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #255 // =255 +; CHECK-NEXT: cmp w8, #255 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %tmp0 = add i16 %x, 64 ; 1U << (8-1-1) @@ -355,9 +355,9 @@ define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind { ; CHECK-LABEL: add_ugecmp_bad_i16_i4: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #8 // =8 +; CHECK-NEXT: add w8, w0, #8 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #15 // =15 +; CHECK-NEXT: cmp w8, #15 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %tmp0 = add i16 %x, 8 ; 1U << (4-1) @@ -369,9 +369,9 @@ define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind { ; CHECK-LABEL: add_ugecmp_bad_i24_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: add w8, w0, #128 ; CHECK-NEXT: and w8, w8, #0xffffff -; CHECK-NEXT: cmp w8, #255 // =255 +; CHECK-NEXT: cmp w8, #255 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %tmp0 = add i24 %x, 128 ; 1U << (8-1) diff --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll --- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll +++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll @@ -12,20 +12,20 @@ ; Make sure the stores happen in the correct order (the exact instructions could change). ; CHECK-LABEL: main: ; CHECK: // %bb.0: // %for.body.lr.ph.i.i.i.i.i.i63 -; CHECK-NEXT: sub sp, sp, #112 // =112 +; CHECK-NEXT: sub sp, sp, #112 ; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 112 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl _Z5setupv ; CHECK-NEXT: movi v0.4s, #1 ; CHECK-NEXT: mov w9, #1 -; CHECK-NEXT: add x0, sp, #48 // =48 +; CHECK-NEXT: add x0, sp, #48 ; CHECK-NEXT: mov x1, sp ; CHECK-NEXT: str xzr, [sp, #80] ; CHECK-NEXT: str w9, [sp, #80] ; CHECK-NEXT: stp q0, q0, [sp, #48] ; CHECK-NEXT: ldr w8, [sp, #48] -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: b.ne .LBB0_2 ; CHECK-NEXT: // %bb.1: // %for.inc ; CHECK-NEXT: bl f @@ -35,7 +35,7 @@ ; CHECK-NEXT: .LBB0_3: // %common.ret ; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload ; CHECK-NEXT: mov w0, wzr -; CHECK-NEXT: add sp, sp, #112 // =112 +; CHECK-NEXT: add sp, sp, #112 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll --- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll +++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll @@ -266,7 +266,7 @@ ; CHECK-NEXT: b.lt .LBB2_4 ; CHECK-NEXT: // %bb.2: // %test3 ; CHECK-NEXT: and x10, x9, x10, asr #12 -; CHECK-NEXT: cmp x10, #1 // =1 +; CHECK-NEXT: cmp x10, #1 ; CHECK-NEXT: b.ge .LBB2_4 ; CHECK-NEXT: // %bb.3: // %other_exit ; CHECK-NEXT: str x9, [x8] diff --git a/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll b/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll --- a/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll +++ b/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll @@ -42,7 +42,7 @@ define void @store2(i32* %in, i8* %addr) { ; CHECK-LABEL: store2: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sub sp, sp, #64 // =64 +; CHECK-NEXT: sub sp, sp, #64 ; CHECK-NEXT: .cfi_def_cfa_offset 64 ; CHECK-NEXT: ldpsw x2, x3, [x0] ; CHECK-NEXT: ldrsw x4, [x0, #16] @@ -54,7 +54,7 @@ ; CHECK-NEXT: //APP ; CHECK-NEXT: st64b x2, [x1] ; CHECK-NEXT: //NO_APP -; CHECK-NEXT: add sp, sp, #64 // =64 +; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret entry: %0 = load i32, i32* %in, align 4 diff --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll --- a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll +++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: .cfi_offset w21, -24 ; CHECK-NEXT: .cfi_offset w30, -32 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: cmp w0, #1 // =1 +; CHECK-NEXT: cmp w0, #1 ; CHECK-NEXT: b.lt .LBB0_3 ; CHECK-NEXT: // %bb.1: // %for.body.preheader ; CHECK-NEXT: adrp x8, A @@ -26,7 +26,7 @@ ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: mov w0, w21 ; CHECK-NEXT: bl _Z3usei -; CHECK-NEXT: subs w19, w19, #1 // =1 +; CHECK-NEXT: subs w19, w19, #1 ; CHECK-NEXT: sdiv w20, w20, w0 ; CHECK-NEXT: b.ne .LBB0_2 ; CHECK-NEXT: b .LBB0_4 @@ -70,7 +70,7 @@ ; CHECK-NEXT: .cfi_offset w21, -24 ; CHECK-NEXT: .cfi_offset w30, -32 ; CHECK-NEXT: mov w19, w0 -; CHECK-NEXT: cmp w0, #1 // =1 +; CHECK-NEXT: cmp w0, #1 ; CHECK-NEXT: b.lt .LBB1_3 ; CHECK-NEXT: // %bb.1: // %for.body.preheader ; CHECK-NEXT: adrp x8, A @@ -82,7 +82,7 @@ ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: mov w0, w20 ; CHECK-NEXT: bl _Z3usei -; CHECK-NEXT: subs w19, w19, #1 // =1 +; CHECK-NEXT: subs w19, w19, #1 ; CHECK-NEXT: sdiv w21, w21, w0 ; CHECK-NEXT: b.ne .LBB1_2 ; CHECK-NEXT: b .LBB1_4 @@ -127,7 +127,7 @@ ; CHECK-NEXT: .cfi_offset w21, -24 ; CHECK-NEXT: .cfi_offset w30, -32 ; CHECK-NEXT: mov w19, w1 -; CHECK-NEXT: cmp w1, #1 // =1 +; CHECK-NEXT: cmp w1, #1 ; CHECK-NEXT: b.lt .LBB2_3 ; CHECK-NEXT: // %bb.1: // %for.body.preheader ; CHECK-NEXT: adrp x8, A @@ -139,7 +139,7 @@ ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: mov w0, w20 ; CHECK-NEXT: bl _Z3usei -; CHECK-NEXT: subs w19, w19, #1 // =1 +; CHECK-NEXT: subs w19, w19, #1 ; CHECK-NEXT: sdiv w21, w21, w0 ; CHECK-NEXT: b.ne .LBB2_2 ; CHECK-NEXT: b .LBB2_4 diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll b/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll --- a/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll +++ b/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl [[OUTLINED_DIRECT:OUTLINED_FUNCTION_[0-9]+]] -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl [[OUTLINED_DIRECT]] -; CHECK-NEXT: add w0, w0, #88 // =88 +; CHECK-NEXT: add w0, w0, #88 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: @@ -45,7 +45,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl [[OUTLINED_INDIRECT:OUTLINED_FUNCTION_[0-9]+]] -; CHECK-NEXT: add w0, w0, #8 // =8 +; CHECK-NEXT: add w0, w0, #8 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl [[OUTLINED_INDIRECT]] -; CHECK-NEXT: add w0, w0, #88 // =88 +; CHECK-NEXT: add w0, w0, #88 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll --- a/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll +++ b/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll @@ -106,7 +106,7 @@ ; ; CHECK-FASTISEL-LABEL: reverse_v8i32: ; CHECK-FASTISEL: // %bb.0: -; CHECK-FASTISEL-NEXT: sub sp, sp, #16 // =16 +; CHECK-FASTISEL-NEXT: sub sp, sp, #16 ; CHECK-FASTISEL-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-FASTISEL-NEXT: mov v1.16b, v0.16b ; CHECK-FASTISEL-NEXT: ldr q0, [sp] // 16-byte Folded Reload @@ -114,7 +114,7 @@ ; CHECK-FASTISEL-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-FASTISEL-NEXT: rev64 v1.4s, v1.4s ; CHECK-FASTISEL-NEXT: ext v1.16b, v1.16b, v1.16b, #8 -; CHECK-FASTISEL-NEXT: add sp, sp, #16 // =16 +; CHECK-FASTISEL-NEXT: add sp, sp, #16 ; CHECK-FASTISEL-NEXT: ret %res = call <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32> %a) @@ -137,7 +137,7 @@ ; ; CHECK-FASTISEL-LABEL: reverse_v16f32: ; CHECK-FASTISEL: // %bb.0: -; CHECK-FASTISEL-NEXT: sub sp, sp, #32 // =32 +; CHECK-FASTISEL-NEXT: sub sp, sp, #32 ; CHECK-FASTISEL-NEXT: str q3, [sp, #16] // 16-byte Folded Spill ; CHECK-FASTISEL-NEXT: str q2, [sp] // 16-byte Folded Spill ; CHECK-FASTISEL-NEXT: mov v2.16b, v1.16b @@ -152,7 +152,7 @@ ; CHECK-FASTISEL-NEXT: ext v2.16b, v2.16b, v2.16b, #8 ; CHECK-FASTISEL-NEXT: rev64 v3.4s, v3.4s ; CHECK-FASTISEL-NEXT: ext v3.16b, v3.16b, v3.16b, #8 -; CHECK-FASTISEL-NEXT: add sp, sp, #32 // =32 +; CHECK-FASTISEL-NEXT: add sp, sp, #32 ; CHECK-FASTISEL-NEXT: ret %res = call <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float> %a) diff --git a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll --- a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll +++ b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll @@ -32,11 +32,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: rdvl x9, #1 -; CHECK-NEXT: sub x9, x9, #1 // =1 +; CHECK-NEXT: sub x9, x9, #1 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #16 -; CHECK-NEXT: cmp x9, #16 // =16 +; CHECK-NEXT: cmp x9, #16 ; CHECK-NEXT: st1b { z0.b }, p0, [sp] ; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -73,11 +73,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cnth x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #8 -; CHECK-NEXT: cmp x10, #8 // =8 +; CHECK-NEXT: cmp x10, #8 ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -114,11 +114,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntw x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #4 -; CHECK-NEXT: cmp x10, #4 // =4 +; CHECK-NEXT: cmp x10, #4 ; CHECK-NEXT: st1w { z0.s }, p0, [sp] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -155,11 +155,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntd x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 // =2 +; CHECK-NEXT: cmp x10, #2 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -227,9 +227,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntd x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 // =2 +; CHECK-NEXT: cmp x10, #2 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: csel x9, x10, x9, lo @@ -301,9 +301,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntw x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: mov w9, #4 -; CHECK-NEXT: cmp x10, #4 // =4 +; CHECK-NEXT: cmp x10, #4 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: csel x9, x10, x9, lo @@ -345,11 +345,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cnth x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #8 -; CHECK-NEXT: cmp x10, #8 // =8 +; CHECK-NEXT: cmp x10, #8 ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -417,9 +417,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntd x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 // =2 +; CHECK-NEXT: cmp x10, #2 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: csel x9, x10, x9, lo @@ -460,11 +460,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntw x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #4 -; CHECK-NEXT: cmp x10, #4 // =4 +; CHECK-NEXT: cmp x10, #4 ; CHECK-NEXT: st1w { z0.s }, p0, [sp] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -501,11 +501,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-2 ; CHECK-NEXT: cntd x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #2 -; CHECK-NEXT: cmp x10, #2 // =2 +; CHECK-NEXT: cmp x10, #2 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -616,11 +616,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-8 ; CHECK-NEXT: rdvl x10, #1 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #16 -; CHECK-NEXT: cmp x10, #16 // =16 +; CHECK-NEXT: cmp x10, #16 ; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl] ; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] @@ -687,7 +687,7 @@ ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #17 -; CHECK-NEXT: cmp x9, #17 // =17 +; CHECK-NEXT: cmp x9, #17 ; CHECK-NEXT: st1b { z0.b }, p0, [sp] ; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -742,7 +742,7 @@ ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #18 -; CHECK-NEXT: cmp x9, #18 // =18 +; CHECK-NEXT: cmp x9, #18 ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -797,7 +797,7 @@ ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #20 -; CHECK-NEXT: cmp x9, #20 // =20 +; CHECK-NEXT: cmp x9, #20 ; CHECK-NEXT: st1w { z0.s }, p0, [sp] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -852,7 +852,7 @@ ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #24 -; CHECK-NEXT: cmp x9, #24 // =24 +; CHECK-NEXT: cmp x9, #24 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -907,7 +907,7 @@ ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #18 -; CHECK-NEXT: cmp x9, #18 // =18 +; CHECK-NEXT: cmp x9, #18 ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -962,7 +962,7 @@ ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #20 -; CHECK-NEXT: cmp x9, #20 // =20 +; CHECK-NEXT: cmp x9, #20 ; CHECK-NEXT: st1w { z0.s }, p0, [sp] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -1017,7 +1017,7 @@ ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #24 -; CHECK-NEXT: cmp x9, #24 // =24 +; CHECK-NEXT: cmp x9, #24 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] ; CHECK-NEXT: csel x9, x9, x10, lo @@ -1130,7 +1130,7 @@ ; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl] ; CHECK-NEXT: addvl x8, x8, #2 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2] -; CHECK-NEXT: sub x8, x8, #32 // =32 +; CHECK-NEXT: sub x8, x8, #32 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8, #1, mul vl] ; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload @@ -1149,7 +1149,7 @@ ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w10, #68 -; CHECK-NEXT: cmp x9, #68 // =68 +; CHECK-NEXT: cmp x9, #68 ; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl] ; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl] ; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl] diff --git a/llvm/test/CodeGen/AArch64/neg-abs.ll b/llvm/test/CodeGen/AArch64/neg-abs.ll --- a/llvm/test/CodeGen/AArch64/neg-abs.ll +++ b/llvm/test/CodeGen/AArch64/neg-abs.ll @@ -7,7 +7,7 @@ define i64 @neg_abs64(i64 %x) { ; CHECK-LABEL: neg_abs64: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: cneg x8, x0, mi ; CHECK-NEXT: neg x0, x8 ; CHECK-NEXT: ret @@ -21,7 +21,7 @@ define i32 @neg_abs32(i32 %x) { ; CHECK-LABEL: neg_abs32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cneg w8, w0, mi ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret @@ -66,7 +66,7 @@ define i64 @abs64(i64 %x) { ; CHECK-LABEL: abs64: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: cneg x0, x0, mi ; CHECK-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) @@ -76,7 +76,7 @@ define i32 @abs32(i32 %x) { ; CHECK-LABEL: abs32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: cneg w0, w0, mi ; CHECK-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) @@ -87,7 +87,7 @@ ; CHECK-LABEL: abs16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true) @@ -99,7 +99,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: negs x8, x0 ; CHECK-NEXT: ngcs x9, x1 -; CHECK-NEXT: cmp x1, #0 // =0 +; CHECK-NEXT: cmp x1, #0 ; CHECK-NEXT: csel x0, x8, x0, lt ; CHECK-NEXT: csel x1, x9, x1, lt ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/pow.ll b/llvm/test/CodeGen/AArch64/pow.ll --- a/llvm/test/CodeGen/AArch64/pow.ll +++ b/llvm/test/CodeGen/AArch64/pow.ll @@ -68,7 +68,7 @@ define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind { ; CHECK-LABEL: pow_v4f32_one_fourth_not_enough_fmf: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: fmov s1, #0.25000000 @@ -100,7 +100,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[3], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call afn nsz <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> ) ret <4 x float> %r @@ -109,7 +109,7 @@ define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwind { ; CHECK-LABEL: pow_v2f64_one_fourth_not_enough_fmf: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: mov d0, v0.d[1] ; CHECK-NEXT: fmov d1, #0.25000000 @@ -124,7 +124,7 @@ ; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: mov v0.d[1], v1.d[0] -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call nsz nnan reassoc <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> ) ret <2 x double> %r diff --git a/llvm/test/CodeGen/AArch64/pr48188.ll b/llvm/test/CodeGen/AArch64/pr48188.ll --- a/llvm/test/CodeGen/AArch64/pr48188.ll +++ b/llvm/test/CodeGen/AArch64/pr48188.ll @@ -5,7 +5,7 @@ define void @test() nounwind { ; CHECK-LABEL: test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sub sp, sp, #16 // =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: mov x1, xzr ; CHECK-NEXT: mov x0, x1 ; CHECK-NEXT: str x1, [sp] // 8-byte Folded Spill diff --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll --- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll +++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll @@ -138,10 +138,10 @@ ; CHECK-NEXT: add v28.2d, v28.2d, v14.2d ; CHECK-NEXT: fmov d14, x17 ; CHECK-NEXT: mov v14.d[1], x16 -; CHECK-NEXT: add x8, x8, #8 // =8 +; CHECK-NEXT: add x8, x8, #8 ; CHECK-NEXT: add v27.2d, v27.2d, v14.2d -; CHECK-NEXT: cmp x8, #64 // =64 -; CHECK-NEXT: add x9, x9, #1 // =1 +; CHECK-NEXT: cmp x8, #64 +; CHECK-NEXT: add x9, x9, #1 ; CHECK-NEXT: b.ne .LBB0_1 ; CHECK-NEXT: // %bb.2: // %for.cond.cleanup ; CHECK-NEXT: adrp x8, C diff --git a/llvm/test/CodeGen/AArch64/sadd_sat.ll b/llvm/test/CodeGen/AArch64/sadd_sat.ll --- a/llvm/test/CodeGen/AArch64/sadd_sat.ll +++ b/llvm/test/CodeGen/AArch64/sadd_sat.ll @@ -13,7 +13,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adds w8, w0, w1 ; CHECK-NEXT: mov w9, #2147483647 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: cinv w8, w9, ge ; CHECK-NEXT: adds w9, w0, w1 ; CHECK-NEXT: csel w0, w8, w9, vs @@ -27,7 +27,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adds x8, x0, x1 ; CHECK-NEXT: mov x9, #9223372036854775807 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: cinv x8, x9, ge ; CHECK-NEXT: adds x9, x0, x1 ; CHECK-NEXT: csel x0, x8, x9, vs @@ -58,9 +58,9 @@ ; CHECK-NEXT: sxtb w8, w0 ; CHECK-NEXT: add w8, w8, w1, sxtb ; CHECK-NEXT: mov w9, #127 -; CHECK-NEXT: cmp w8, #127 // =127 +; CHECK-NEXT: cmp w8, #127 ; CHECK-NEXT: csel w8, w8, w9, lt -; CHECK-NEXT: cmn w8, #128 // =128 +; CHECK-NEXT: cmn w8, #128 ; CHECK-NEXT: mov w9, #-128 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret @@ -75,9 +75,9 @@ ; CHECK-NEXT: sbfx w9, w0, #0, #4 ; CHECK-NEXT: add w8, w9, w8, asr #28 ; CHECK-NEXT: mov w10, #7 -; CHECK-NEXT: cmp w8, #7 // =7 +; CHECK-NEXT: cmp w8, #7 ; CHECK-NEXT: csel w8, w8, w10, lt -; CHECK-NEXT: cmn w8, #8 // =8 +; CHECK-NEXT: cmn w8, #8 ; CHECK-NEXT: mov w9, #-8 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll b/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll --- a/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll +++ b/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: mul w8, w1, w2 ; CHECK-NEXT: adds w10, w0, w8 ; CHECK-NEXT: mov w9, #2147483647 -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: cinv w9, w9, ge ; CHECK-NEXT: adds w8, w0, w8 ; CHECK-NEXT: csel w0, w9, w8, vs @@ -28,7 +28,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adds x8, x0, x2 ; CHECK-NEXT: mov x9, #9223372036854775807 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: cinv x8, x9, ge ; CHECK-NEXT: adds x9, x0, x2 ; CHECK-NEXT: csel x0, x8, x9, vs @@ -63,9 +63,9 @@ ; CHECK-NEXT: mul w9, w1, w2 ; CHECK-NEXT: add w8, w8, w9, sxtb ; CHECK-NEXT: mov w10, #127 -; CHECK-NEXT: cmp w8, #127 // =127 +; CHECK-NEXT: cmp w8, #127 ; CHECK-NEXT: csel w8, w8, w10, lt -; CHECK-NEXT: cmn w8, #128 // =128 +; CHECK-NEXT: cmn w8, #128 ; CHECK-NEXT: mov w9, #-128 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret @@ -82,9 +82,9 @@ ; CHECK-NEXT: lsl w9, w9, #28 ; CHECK-NEXT: add w8, w8, w9, asr #28 ; CHECK-NEXT: mov w10, #7 -; CHECK-NEXT: cmp w8, #7 // =7 +; CHECK-NEXT: cmp w8, #7 ; CHECK-NEXT: csel w8, w8, w10, lt -; CHECK-NEXT: cmn w8, #8 // =8 +; CHECK-NEXT: cmn w8, #8 ; CHECK-NEXT: mov w9, #-8 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll @@ -135,8 +135,8 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ld1 { v0.b }[0], [x1] ; CHECK-NEXT: ld1 { v1.b }[0], [x0] -; CHECK-NEXT: add x8, x0, #1 // =1 -; CHECK-NEXT: add x9, x1, #1 // =1 +; CHECK-NEXT: add x8, x0, #1 +; CHECK-NEXT: add x9, x1, #1 ; CHECK-NEXT: ld1 { v0.b }[4], [x9] ; CHECK-NEXT: ld1 { v1.b }[4], [x8] ; CHECK-NEXT: shl v0.2s, v0.2s, #24 @@ -175,8 +175,8 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ld1 { v0.h }[0], [x1] ; CHECK-NEXT: ld1 { v1.h }[0], [x0] -; CHECK-NEXT: add x8, x0, #2 // =2 -; CHECK-NEXT: add x9, x1, #2 // =2 +; CHECK-NEXT: add x8, x0, #2 +; CHECK-NEXT: add x9, x1, #2 ; CHECK-NEXT: ld1 { v0.h }[2], [x9] ; CHECK-NEXT: ld1 { v1.h }[2], [x8] ; CHECK-NEXT: shl v0.2s, v0.2s, #16 @@ -354,7 +354,7 @@ ; CHECK-NEXT: adcs x12, x3, x7 ; CHECK-NEXT: mov x9, #9223372036854775807 ; CHECK-NEXT: eor x10, x3, x7 -; CHECK-NEXT: cmp x12, #0 // =0 +; CHECK-NEXT: cmp x12, #0 ; CHECK-NEXT: eor x13, x3, x12 ; CHECK-NEXT: cinv x14, x9, ge ; CHECK-NEXT: bics xzr, x13, x10 @@ -364,7 +364,7 @@ ; CHECK-NEXT: adds x8, x0, x4 ; CHECK-NEXT: adcs x10, x1, x5 ; CHECK-NEXT: eor x11, x1, x5 -; CHECK-NEXT: cmp x10, #0 // =0 +; CHECK-NEXT: cmp x10, #0 ; CHECK-NEXT: eor x12, x1, x10 ; CHECK-NEXT: cinv x9, x9, ge ; CHECK-NEXT: bics xzr, x12, x11 diff --git a/llvm/test/CodeGen/AArch64/sat-add.ll b/llvm/test/CodeGen/AArch64/sat-add.ll --- a/llvm/test/CodeGen/AArch64/sat-add.ll +++ b/llvm/test/CodeGen/AArch64/sat-add.ll @@ -10,10 +10,10 @@ ; CHECK-LABEL: unsigned_sat_constant_i8_using_min: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: cmp w8, #213 // =213 +; CHECK-NEXT: cmp w8, #213 ; CHECK-NEXT: mov w8, #-43 ; CHECK-NEXT: csel w8, w0, w8, lo -; CHECK-NEXT: add w0, w8, #42 // =42 +; CHECK-NEXT: add w0, w8, #42 ; CHECK-NEXT: ret %c = icmp ult i8 %x, -43 %s = select i1 %c, i8 %x, i8 -43 @@ -25,7 +25,7 @@ ; CHECK-LABEL: unsigned_sat_constant_i8_using_cmp_sum: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: add w8, w8, #42 // =42 +; CHECK-NEXT: add w8, w8, #42 ; CHECK-NEXT: tst w8, #0x100 ; CHECK-NEXT: csinv w0, w8, wzr, eq ; CHECK-NEXT: ret @@ -39,8 +39,8 @@ ; CHECK-LABEL: unsigned_sat_constant_i8_using_cmp_notval: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: add w9, w0, #42 // =42 -; CHECK-NEXT: cmp w8, #213 // =213 +; CHECK-NEXT: add w9, w0, #42 +; CHECK-NEXT: cmp w8, #213 ; CHECK-NEXT: csinv w0, w9, wzr, ls ; CHECK-NEXT: ret %a = add i8 %x, 42 @@ -56,7 +56,7 @@ ; CHECK-NEXT: cmp w8, w0, uxth ; CHECK-NEXT: mov w8, #-43 ; CHECK-NEXT: csel w8, w0, w8, hi -; CHECK-NEXT: add w0, w8, #42 // =42 +; CHECK-NEXT: add w0, w8, #42 ; CHECK-NEXT: ret %c = icmp ult i16 %x, -43 %s = select i1 %c, i16 %x, i16 -43 @@ -68,7 +68,7 @@ ; CHECK-LABEL: unsigned_sat_constant_i16_using_cmp_sum: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: add w8, w8, #42 // =42 +; CHECK-NEXT: add w8, w8, #42 ; CHECK-NEXT: tst w8, #0x10000 ; CHECK-NEXT: csinv w0, w8, wzr, eq ; CHECK-NEXT: ret @@ -82,7 +82,7 @@ ; CHECK-LABEL: unsigned_sat_constant_i16_using_cmp_notval: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w9, #65493 -; CHECK-NEXT: add w8, w0, #42 // =42 +; CHECK-NEXT: add w8, w0, #42 ; CHECK-NEXT: cmp w9, w0, uxth ; CHECK-NEXT: csinv w0, w8, wzr, hs ; CHECK-NEXT: ret @@ -95,10 +95,10 @@ define i32 @unsigned_sat_constant_i32_using_min(i32 %x) { ; CHECK-LABEL: unsigned_sat_constant_i32_using_min: ; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #43 // =43 +; CHECK-NEXT: cmn w0, #43 ; CHECK-NEXT: mov w8, #-43 ; CHECK-NEXT: csel w8, w0, w8, lo -; CHECK-NEXT: add w0, w8, #42 // =42 +; CHECK-NEXT: add w0, w8, #42 ; CHECK-NEXT: ret %c = icmp ult i32 %x, -43 %s = select i1 %c, i32 %x, i32 -43 @@ -109,7 +109,7 @@ define i32 @unsigned_sat_constant_i32_using_cmp_sum(i32 %x) { ; CHECK-LABEL: unsigned_sat_constant_i32_using_cmp_sum: ; CHECK: // %bb.0: -; CHECK-NEXT: adds w8, w0, #42 // =42 +; CHECK-NEXT: adds w8, w0, #42 ; CHECK-NEXT: csinv w0, w8, wzr, lo ; CHECK-NEXT: ret %a = add i32 %x, 42 @@ -121,7 +121,7 @@ define i32 @unsigned_sat_constant_i32_using_cmp_notval(i32 %x) { ; CHECK-LABEL: unsigned_sat_constant_i32_using_cmp_notval: ; CHECK: // %bb.0: -; CHECK-NEXT: adds w8, w0, #42 // =42 +; CHECK-NEXT: adds w8, w0, #42 ; CHECK-NEXT: csinv w0, w8, wzr, lo ; CHECK-NEXT: ret %a = add i32 %x, 42 @@ -133,10 +133,10 @@ define i64 @unsigned_sat_constant_i64_using_min(i64 %x) { ; CHECK-LABEL: unsigned_sat_constant_i64_using_min: ; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #43 // =43 +; CHECK-NEXT: cmn x0, #43 ; CHECK-NEXT: mov x8, #-43 ; CHECK-NEXT: csel x8, x0, x8, lo -; CHECK-NEXT: add x0, x8, #42 // =42 +; CHECK-NEXT: add x0, x8, #42 ; CHECK-NEXT: ret %c = icmp ult i64 %x, -43 %s = select i1 %c, i64 %x, i64 -43 @@ -147,7 +147,7 @@ define i64 @unsigned_sat_constant_i64_using_cmp_sum(i64 %x) { ; CHECK-LABEL: unsigned_sat_constant_i64_using_cmp_sum: ; CHECK: // %bb.0: -; CHECK-NEXT: adds x8, x0, #42 // =42 +; CHECK-NEXT: adds x8, x0, #42 ; CHECK-NEXT: csinv x0, x8, xzr, lo ; CHECK-NEXT: ret %a = add i64 %x, 42 @@ -159,7 +159,7 @@ define i64 @unsigned_sat_constant_i64_using_cmp_notval(i64 %x) { ; CHECK-LABEL: unsigned_sat_constant_i64_using_cmp_notval: ; CHECK: // %bb.0: -; CHECK-NEXT: adds x8, x0, #42 // =42 +; CHECK-NEXT: adds x8, x0, #42 ; CHECK-NEXT: csinv x0, x8, xzr, lo ; CHECK-NEXT: ret %a = add i64 %x, 42 diff --git a/llvm/test/CodeGen/AArch64/sdivpow2.ll b/llvm/test/CodeGen/AArch64/sdivpow2.ll --- a/llvm/test/CodeGen/AArch64/sdivpow2.ll +++ b/llvm/test/CodeGen/AArch64/sdivpow2.ll @@ -5,8 +5,8 @@ define i32 @test1(i32 %x) { ; CHECK-LABEL: test1: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #7 // =7 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: add w8, w0, #7 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: asr w0, w8, #3 ; CHECK-NEXT: ret @@ -17,8 +17,8 @@ define i32 @test2(i32 %x) { ; CHECK-LABEL: test2: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #7 // =7 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: add w8, w0, #7 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: neg w0, w8, asr #3 ; CHECK-NEXT: ret @@ -29,8 +29,8 @@ define i32 @test3(i32 %x) { ; CHECK-LABEL: test3: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #31 // =31 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: add w8, w0, #31 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: asr w0, w8, #5 ; CHECK-NEXT: ret @@ -41,8 +41,8 @@ define i64 @test4(i64 %x) { ; CHECK-LABEL: test4: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #7 // =7 -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: add x8, x0, #7 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel x8, x8, x0, lt ; CHECK-NEXT: asr x0, x8, #3 ; CHECK-NEXT: ret @@ -53,8 +53,8 @@ define i64 @test5(i64 %x) { ; CHECK-LABEL: test5: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #7 // =7 -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: add x8, x0, #7 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel x8, x8, x0, lt ; CHECK-NEXT: neg x0, x8, asr #3 ; CHECK-NEXT: ret @@ -65,8 +65,8 @@ define i64 @test6(i64 %x) { ; CHECK-LABEL: test6: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #63 // =63 -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: add x8, x0, #63 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel x8, x8, x0, lt ; CHECK-NEXT: asr x0, x8, #6 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov x8, #281474976710655 ; CHECK-NEXT: add x8, x0, x8 -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel x8, x8, x0, lt ; CHECK-NEXT: asr x0, x8, #48 ; CHECK-NEXT: ret @@ -90,15 +90,15 @@ define i64 @test8(i64 %x) { ; ISEL-LABEL: test8: ; ISEL: // %bb.0: -; ISEL-NEXT: cmp x0, #0 // =0 +; ISEL-NEXT: cmp x0, #0 ; ISEL-NEXT: cinc x8, x0, lt ; ISEL-NEXT: asr x0, x8, #1 ; ISEL-NEXT: ret ; ; FAST-LABEL: test8: ; FAST: // %bb.0: -; FAST-NEXT: add x8, x0, #1 // =1 -; FAST-NEXT: cmp x0, #0 // =0 +; FAST-NEXT: add x8, x0, #1 +; FAST-NEXT: cmp x0, #0 ; FAST-NEXT: csel x8, x8, x0, lt ; FAST-NEXT: asr x0, x8, #1 ; FAST-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/select_const.ll b/llvm/test/CodeGen/AArch64/select_const.ll --- a/llvm/test/CodeGen/AArch64/select_const.ll +++ b/llvm/test/CodeGen/AArch64/select_const.ll @@ -69,7 +69,7 @@ ; CHECK-LABEL: select_0_or_neg1: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0x1 -; CHECK-NEXT: sub w0, w8, #1 // =1 +; CHECK-NEXT: sub w0, w8, #1 ; CHECK-NEXT: ret %sel = select i1 %cond, i32 0, i32 -1 ret i32 %sel @@ -78,7 +78,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_neg1_zeroext: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w0, w0, #1 // =1 +; CHECK-NEXT: sub w0, w0, #1 ; CHECK-NEXT: ret %sel = select i1 %cond, i32 0, i32 -1 ret i32 %sel @@ -137,7 +137,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_Cplus1_C_zeroext: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #41 ; CHECK-NEXT: cinc w0, w8, ne ; CHECK-NEXT: ret @@ -172,7 +172,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_C_Cplus1_zeroext: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #41 ; CHECK-NEXT: cinc w0, w8, eq ; CHECK-NEXT: ret @@ -209,7 +209,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_C1_C2_zeroext: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #42 ; CHECK-NEXT: mov w9, #421 ; CHECK-NEXT: csel w0, w9, w8, ne diff --git a/llvm/test/CodeGen/AArch64/shift-mod.ll b/llvm/test/CodeGen/AArch64/shift-mod.ll --- a/llvm/test/CodeGen/AArch64/shift-mod.ll +++ b/llvm/test/CodeGen/AArch64/shift-mod.ll @@ -78,7 +78,7 @@ define i64 @ashr_add_shl_i32(i64 %r) { ; CHECK-LABEL: ashr_add_shl_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #1 // =1 +; CHECK-NEXT: add w8, w0, #1 ; CHECK-NEXT: sxtw x0, w8 ; CHECK-NEXT: ret %conv = shl i64 %r, 32 @@ -90,7 +90,7 @@ define i64 @ashr_add_shl_i8(i64 %r) { ; CHECK-LABEL: ashr_add_shl_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #1 // =1 +; CHECK-NEXT: add w8, w0, #1 ; CHECK-NEXT: sxtb x0, w8 ; CHECK-NEXT: ret %conv = shl i64 %r, 56 diff --git a/llvm/test/CodeGen/AArch64/signbit-shift.ll b/llvm/test/CodeGen/AArch64/signbit-shift.ll --- a/llvm/test/CodeGen/AArch64/signbit-shift.ll +++ b/llvm/test/CodeGen/AArch64/signbit-shift.ll @@ -18,7 +18,7 @@ ; CHECK-LABEL: add_zext_ifpos: ; CHECK: // %bb.0: ; CHECK-NEXT: asr w8, w0, #31 -; CHECK-NEXT: add w0, w8, #42 // =42 +; CHECK-NEXT: add w0, w8, #42 ; CHECK-NEXT: ret %c = icmp sgt i32 %x, -1 %e = zext i1 %c to i32 @@ -43,7 +43,7 @@ define i32 @sel_ifpos_tval_bigger(i32 %x) { ; CHECK-LABEL: sel_ifpos_tval_bigger: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #41 ; CHECK-NEXT: cinc w0, w8, ge ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ ; CHECK-LABEL: add_sext_ifpos: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr w8, w0, #31 -; CHECK-NEXT: add w0, w8, #41 // =41 +; CHECK-NEXT: add w0, w8, #41 ; CHECK-NEXT: ret %c = icmp sgt i32 %x, -1 %e = sext i1 %c to i32 @@ -92,7 +92,7 @@ define i32 @sel_ifpos_fval_bigger(i32 %x) { ; CHECK-LABEL: sel_ifpos_fval_bigger: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #41 ; CHECK-NEXT: cinc w0, w8, lt ; CHECK-NEXT: ret @@ -117,7 +117,7 @@ ; CHECK-LABEL: add_zext_ifneg: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr w8, w0, #31 -; CHECK-NEXT: add w0, w8, #41 // =41 +; CHECK-NEXT: add w0, w8, #41 ; CHECK-NEXT: ret %c = icmp slt i32 %x, 0 %e = zext i1 %c to i32 @@ -128,7 +128,7 @@ define i32 @sel_ifneg_tval_bigger(i32 %x) { ; CHECK-LABEL: sel_ifneg_tval_bigger: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #41 ; CHECK-NEXT: cinc w0, w8, lt ; CHECK-NEXT: ret @@ -151,7 +151,7 @@ ; CHECK-LABEL: add_sext_ifneg: ; CHECK: // %bb.0: ; CHECK-NEXT: asr w8, w0, #31 -; CHECK-NEXT: add w0, w8, #42 // =42 +; CHECK-NEXT: add w0, w8, #42 ; CHECK-NEXT: ret %c = icmp slt i32 %x, 0 %e = sext i1 %c to i32 @@ -162,7 +162,7 @@ define i32 @sel_ifneg_fval_bigger(i32 %x) { ; CHECK-LABEL: sel_ifneg_fval_bigger: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: mov w8, #41 ; CHECK-NEXT: cinc w0, w8, ge ; CHECK-NEXT: ret @@ -175,7 +175,7 @@ ; CHECK-LABEL: add_lshr_not: ; CHECK: // %bb.0: ; CHECK-NEXT: asr w8, w0, #31 -; CHECK-NEXT: add w0, w8, #42 // =42 +; CHECK-NEXT: add w0, w8, #42 ; CHECK-NEXT: ret %not = xor i32 %x, -1 %sh = lshr i32 %not, 31 @@ -247,7 +247,7 @@ ; CHECK-LABEL: sub_const_op_lshr: ; CHECK: // %bb.0: ; CHECK-NEXT: asr w8, w0, #31 -; CHECK-NEXT: add w0, w8, #43 // =43 +; CHECK-NEXT: add w0, w8, #43 ; CHECK-NEXT: ret %sh = lshr i32 %x, 31 %r = sub i32 43, %sh diff --git a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll --- a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll +++ b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll @@ -273,7 +273,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #256 // =256 +; CHECK-NEXT: cmp w8, #256 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %tmp0 = add i16 %x, %y @@ -285,7 +285,7 @@ define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind { ; CHECK-LABEL: add_ultcmp_bad_i16_i8_cmp: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: add w8, w0, #128 ; CHECK-NEXT: and w8, w8, #0xffff ; CHECK-NEXT: cmp w8, w1, uxth ; CHECK-NEXT: cset w0, lo @@ -300,7 +300,7 @@ ; CHECK-LABEL: add_ultcmp_bad_i8_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: add w8, w8, #128 // =128 +; CHECK-NEXT: add w8, w8, #128 ; CHECK-NEXT: lsr w0, w8, #16 ; CHECK-NEXT: ret %tmp0 = add i16 %x, 128 ; 1U << (8-1) @@ -312,9 +312,9 @@ define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind { ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #192 // =192 +; CHECK-NEXT: add w8, w0, #192 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #256 // =256 +; CHECK-NEXT: cmp w8, #256 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1)) @@ -326,9 +326,9 @@ define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind { ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: add w8, w0, #128 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #768 // =768 +; CHECK-NEXT: cmp w8, #768 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %tmp0 = add i16 %x, 128 ; 1U << (8-1) @@ -340,9 +340,9 @@ define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind { ; CHECK-LABEL: add_ultcmp_bad_i16_i8_magic: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #64 // =64 +; CHECK-NEXT: add w8, w0, #64 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #256 // =256 +; CHECK-NEXT: cmp w8, #256 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %tmp0 = add i16 %x, 64 ; 1U << (8-1-1) @@ -354,9 +354,9 @@ define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind { ; CHECK-LABEL: add_ultcmp_bad_i16_i4: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #8 // =8 +; CHECK-NEXT: add w8, w0, #8 ; CHECK-NEXT: and w8, w8, #0xffff -; CHECK-NEXT: cmp w8, #16 // =16 +; CHECK-NEXT: cmp w8, #16 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %tmp0 = add i16 %x, 8 ; 1U << (4-1) @@ -368,9 +368,9 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind { ; CHECK-LABEL: add_ultcmp_bad_i24_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #128 // =128 +; CHECK-NEXT: add w8, w0, #128 ; CHECK-NEXT: and w8, w8, #0xffffff -; CHECK-NEXT: cmp w8, #256 // =256 +; CHECK-NEXT: cmp w8, #256 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %tmp0 = add i24 %x, 128 ; 1U << (8-1) diff --git a/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll b/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll --- a/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll +++ b/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: sink_add_of_const_to_add0: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: add w0, w8, #32 // =32 +; CHECK-NEXT: add w0, w8, #32 ; CHECK-NEXT: ret %t0 = add i32 %a, 32 ; constant always on RHS %r = add i32 %t0, %b @@ -20,7 +20,7 @@ ; CHECK-LABEL: sink_add_of_const_to_add1: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: add w0, w8, #32 // =32 +; CHECK-NEXT: add w0, w8, #32 ; CHECK-NEXT: ret %t0 = add i32 %a, 32 ; constant always on RHS %r = add i32 %b, %t0 @@ -34,7 +34,7 @@ ; CHECK-LABEL: sink_sub_of_const_to_add0: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: sub w0, w8, #32 // =32 +; CHECK-NEXT: sub w0, w8, #32 ; CHECK-NEXT: ret %t0 = sub i32 %a, 32 %r = add i32 %t0, %b @@ -44,7 +44,7 @@ ; CHECK-LABEL: sink_sub_of_const_to_add1: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: sub w0, w8, #32 // =32 +; CHECK-NEXT: sub w0, w8, #32 ; CHECK-NEXT: ret %t0 = sub i32 %a, 32 %r = add i32 %b, %t0 @@ -58,7 +58,7 @@ ; CHECK-LABEL: sink_sub_from_const_to_add0: ; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: add w0, w8, #32 // =32 +; CHECK-NEXT: add w0, w8, #32 ; CHECK-NEXT: ret %t0 = sub i32 32, %a %r = add i32 %t0, %b @@ -68,7 +68,7 @@ ; CHECK-LABEL: sink_sub_from_const_to_add1: ; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: add w0, w8, #32 // =32 +; CHECK-NEXT: add w0, w8, #32 ; CHECK-NEXT: ret %t0 = sub i32 32, %a %r = add i32 %b, %t0 @@ -82,7 +82,7 @@ ; CHECK-LABEL: sink_add_of_const_to_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w0, w1 -; CHECK-NEXT: add w0, w8, #32 // =32 +; CHECK-NEXT: add w0, w8, #32 ; CHECK-NEXT: ret %t0 = add i32 %a, 32 ; constant always on RHS %r = sub i32 %t0, %b @@ -92,7 +92,7 @@ ; CHECK-LABEL: sink_add_of_const_to_sub2: ; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: sub w0, w8, #32 // =32 +; CHECK-NEXT: sub w0, w8, #32 ; CHECK-NEXT: ret %t0 = add i32 %a, 32 ; constant always on RHS %r = sub i32 %b, %t0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: sink_sub_of_const_to_sub: ; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w0, w1 -; CHECK-NEXT: sub w0, w8, #32 // =32 +; CHECK-NEXT: sub w0, w8, #32 ; CHECK-NEXT: ret %t0 = sub i32 %a, 32 %r = sub i32 %t0, %b @@ -116,7 +116,7 @@ ; CHECK-LABEL: sink_sub_of_const_to_sub2: ; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: add w0, w8, #32 // =32 +; CHECK-NEXT: add w0, w8, #32 ; CHECK-NEXT: ret %t0 = sub i32 %a, 32 %r = sub i32 %b, %t0 @@ -141,7 +141,7 @@ ; CHECK-LABEL: sink_sub_from_const_to_sub2: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w0, w1 -; CHECK-NEXT: sub w0, w8, #32 // =32 +; CHECK-NEXT: sub w0, w8, #32 ; CHECK-NEXT: ret %t0 = sub i32 32, %a %r = sub i32 %b, %t0 diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll --- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll +++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll @@ -24,10 +24,10 @@ ; CHECK-NEXT: cntd x9 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: str q1, [sp] -; CHECK-NEXT: sub x9, x9, #2 // =2 +; CHECK-NEXT: sub x9, x9, #2 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] ; CHECK-NEXT: mov w8, #2 -; CHECK-NEXT: cmp x9, #2 // =2 +; CHECK-NEXT: cmp x9, #2 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: addvl x10, sp, #1 ; CHECK-NEXT: lsl x8, x8, #3 @@ -35,7 +35,7 @@ ; CHECK-NEXT: str q2, [x10, x8] ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl] ; CHECK-NEXT: mov w8, #4 -; CHECK-NEXT: cmp x9, #4 // =4 +; CHECK-NEXT: cmp x9, #4 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: addvl x10, sp, #2 ; CHECK-NEXT: lsl x8, x8, #3 @@ -43,7 +43,7 @@ ; CHECK-NEXT: str q3, [x10, x8] ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl] ; CHECK-NEXT: mov w8, #6 -; CHECK-NEXT: cmp x9, #6 // =6 +; CHECK-NEXT: cmp x9, #6 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: addvl x10, sp, #3 ; CHECK-NEXT: lsl x8, x8, #3 @@ -74,10 +74,10 @@ ; CHECK-NEXT: cntd x9 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: str q1, [sp] -; CHECK-NEXT: sub x9, x9, #2 // =2 +; CHECK-NEXT: sub x9, x9, #2 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp] ; CHECK-NEXT: mov w8, #2 -; CHECK-NEXT: cmp x9, #2 // =2 +; CHECK-NEXT: cmp x9, #2 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: addvl x10, sp, #1 ; CHECK-NEXT: lsl x8, x8, #3 @@ -85,7 +85,7 @@ ; CHECK-NEXT: str q2, [x10, x8] ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl] ; CHECK-NEXT: mov w8, #4 -; CHECK-NEXT: cmp x9, #4 // =4 +; CHECK-NEXT: cmp x9, #4 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: addvl x10, sp, #2 ; CHECK-NEXT: lsl x8, x8, #3 @@ -93,7 +93,7 @@ ; CHECK-NEXT: str q3, [x10, x8] ; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl] ; CHECK-NEXT: mov w8, #6 -; CHECK-NEXT: cmp x9, #6 // =6 +; CHECK-NEXT: cmp x9, #6 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: addvl x10, sp, #3 ; CHECK-NEXT: lsl x8, x8, #3 diff --git a/llvm/test/CodeGen/AArch64/srem-lkk.ll b/llvm/test/CodeGen/AArch64/srem-lkk.ll --- a/llvm/test/CodeGen/AArch64/srem-lkk.ll +++ b/llvm/test/CodeGen/AArch64/srem-lkk.ll @@ -95,8 +95,8 @@ define i32 @dont_fold_srem_power_of_two(i32 %x) { ; CHECK-LABEL: dont_fold_srem_power_of_two: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #63 // =63 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: add w8, w0, #63 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: and w8, w8, #0xffffffc0 ; CHECK-NEXT: sub w0, w0, w8 @@ -121,7 +121,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #2147483647 ; CHECK-NEXT: add w8, w0, w8 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: and w8, w8, #0x80000000 ; CHECK-NEXT: add w0, w0, w8 diff --git a/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll @@ -32,7 +32,7 @@ ; CHECK-NEXT: add w9, w9, w10 ; CHECK-NEXT: mov w10, #6 ; CHECK-NEXT: msub w8, w9, w10, w8 -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %srem = srem i4 %X, 6 diff --git a/llvm/test/CodeGen/AArch64/srem-seteq.ll b/llvm/test/CodeGen/AArch64/srem-seteq.ll --- a/llvm/test/CodeGen/AArch64/srem-seteq.ll +++ b/llvm/test/CodeGen/AArch64/srem-seteq.ll @@ -50,7 +50,7 @@ ; CHECK-NEXT: movk w8, #27306, lsl #16 ; CHECK-NEXT: orr w9, wzr, #0x1 ; CHECK-NEXT: madd w8, w0, w8, w9 -; CHECK-NEXT: cmp w8, #3 // =3 +; CHECK-NEXT: cmp w8, #3 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %srem = srem i32 %X, 1073741827 @@ -67,7 +67,7 @@ ; CHECK-NEXT: movk w8, #54613, lsl #16 ; CHECK-NEXT: orr w9, wzr, #0x1 ; CHECK-NEXT: madd w8, w0, w8, w9 -; CHECK-NEXT: cmp w8, #3 // =3 +; CHECK-NEXT: cmp w8, #3 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %srem = srem i32 %X, 2147483651 @@ -126,7 +126,7 @@ ; CHECK-NEXT: orr w9, wzr, #0x8 ; CHECK-NEXT: madd w8, w0, w8, w9 ; CHECK-NEXT: ror w8, w8, #3 -; CHECK-NEXT: cmp w8, #3 // =3 +; CHECK-NEXT: cmp w8, #3 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %srem = srem i32 %X, 1073741928 @@ -144,7 +144,7 @@ ; CHECK-NEXT: orr w9, wzr, #0x2 ; CHECK-NEXT: madd w8, w0, w8, w9 ; CHECK-NEXT: ror w8, w8, #1 -; CHECK-NEXT: cmp w8, #3 // =3 +; CHECK-NEXT: cmp w8, #3 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %srem = srem i32 %X, 2147483750 @@ -234,8 +234,8 @@ define i32 @test_srem_pow2(i32 %X) nounwind { ; CHECK-LABEL: test_srem_pow2: ; CHECK: // %bb.0: -; CHECK-NEXT: add w8, w0, #15 // =15 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: add w8, w0, #15 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: and w8, w8, #0xfffffff0 ; CHECK-NEXT: cmp w0, w8 @@ -253,7 +253,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #2147483647 ; CHECK-NEXT: add w8, w0, w8 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w8, w0, lt ; CHECK-NEXT: and w8, w8, #0x80000000 ; CHECK-NEXT: cmn w0, w8 diff --git a/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll b/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll --- a/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll @@ -157,8 +157,8 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: smov w8, v0.h[1] -; CHECK-NEXT: add w12, w8, #31 // =31 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: add w12, w8, #31 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: mov w11, #37253 ; CHECK-NEXT: csel w12, w12, w8, lt ; CHECK-NEXT: smov w9, v0.h[0] @@ -166,9 +166,9 @@ ; CHECK-NEXT: movk w11, #44150, lsl #16 ; CHECK-NEXT: and w12, w12, #0xffffffe0 ; CHECK-NEXT: sub w8, w8, w12 -; CHECK-NEXT: add w12, w9, #63 // =63 +; CHECK-NEXT: add w12, w9, #63 ; CHECK-NEXT: smull x11, w10, w11 -; CHECK-NEXT: cmp w9, #0 // =0 +; CHECK-NEXT: cmp w9, #0 ; CHECK-NEXT: lsr x11, x11, #32 ; CHECK-NEXT: csel w12, w12, w9, lt ; CHECK-NEXT: add w11, w11, w10 @@ -178,8 +178,8 @@ ; CHECK-NEXT: add w11, w12, w11, lsr #31 ; CHECK-NEXT: smov w12, v0.h[2] ; CHECK-NEXT: fmov s0, w9 -; CHECK-NEXT: add w9, w12, #7 // =7 -; CHECK-NEXT: cmp w12, #0 // =0 +; CHECK-NEXT: add w9, w12, #7 +; CHECK-NEXT: cmp w12, #0 ; CHECK-NEXT: csel w9, w9, w12, lt ; CHECK-NEXT: and w9, w9, #0xfffffff8 ; CHECK-NEXT: sub w9, w12, w9 @@ -263,7 +263,7 @@ ; CHECK-NEXT: add w10, w10, w11 ; CHECK-NEXT: mov w11, #32767 ; CHECK-NEXT: add w11, w8, w11 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w11, w11, w8, lt ; CHECK-NEXT: and w11, w11, #0xffff8000 ; CHECK-NEXT: sub w8, w8, w11 diff --git a/llvm/test/CodeGen/AArch64/ssub_sat.ll b/llvm/test/CodeGen/AArch64/ssub_sat.ll --- a/llvm/test/CodeGen/AArch64/ssub_sat.ll +++ b/llvm/test/CodeGen/AArch64/ssub_sat.ll @@ -13,7 +13,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: subs w8, w0, w1 ; CHECK-NEXT: mov w9, #2147483647 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: cinv w8, w9, ge ; CHECK-NEXT: subs w9, w0, w1 ; CHECK-NEXT: csel w0, w8, w9, vs @@ -27,7 +27,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x1 ; CHECK-NEXT: mov x9, #9223372036854775807 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: cinv x8, x9, ge ; CHECK-NEXT: subs x9, x0, x1 ; CHECK-NEXT: csel x0, x8, x9, vs @@ -58,9 +58,9 @@ ; CHECK-NEXT: sxtb w8, w0 ; CHECK-NEXT: sub w8, w8, w1, sxtb ; CHECK-NEXT: mov w9, #127 -; CHECK-NEXT: cmp w8, #127 // =127 +; CHECK-NEXT: cmp w8, #127 ; CHECK-NEXT: csel w8, w8, w9, lt -; CHECK-NEXT: cmn w8, #128 // =128 +; CHECK-NEXT: cmn w8, #128 ; CHECK-NEXT: mov w9, #-128 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret @@ -75,9 +75,9 @@ ; CHECK-NEXT: sbfx w9, w0, #0, #4 ; CHECK-NEXT: sub w8, w9, w8, asr #28 ; CHECK-NEXT: mov w10, #7 -; CHECK-NEXT: cmp w8, #7 // =7 +; CHECK-NEXT: cmp w8, #7 ; CHECK-NEXT: csel w8, w8, w10, lt -; CHECK-NEXT: cmn w8, #8 // =8 +; CHECK-NEXT: cmn w8, #8 ; CHECK-NEXT: mov w9, #-8 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll b/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll --- a/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll +++ b/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: mul w8, w1, w2 ; CHECK-NEXT: subs w10, w0, w8 ; CHECK-NEXT: mov w9, #2147483647 -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: cinv w9, w9, ge ; CHECK-NEXT: subs w8, w0, w8 ; CHECK-NEXT: csel w0, w9, w8, vs @@ -28,7 +28,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 ; CHECK-NEXT: mov x9, #9223372036854775807 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: cinv x8, x9, ge ; CHECK-NEXT: subs x9, x0, x2 ; CHECK-NEXT: csel x0, x8, x9, vs @@ -63,9 +63,9 @@ ; CHECK-NEXT: mul w9, w1, w2 ; CHECK-NEXT: sub w8, w8, w9, sxtb ; CHECK-NEXT: mov w10, #127 -; CHECK-NEXT: cmp w8, #127 // =127 +; CHECK-NEXT: cmp w8, #127 ; CHECK-NEXT: csel w8, w8, w10, lt -; CHECK-NEXT: cmn w8, #128 // =128 +; CHECK-NEXT: cmn w8, #128 ; CHECK-NEXT: mov w9, #-128 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret @@ -82,9 +82,9 @@ ; CHECK-NEXT: lsl w9, w9, #28 ; CHECK-NEXT: sub w8, w8, w9, asr #28 ; CHECK-NEXT: mov w10, #7 -; CHECK-NEXT: cmp w8, #7 // =7 +; CHECK-NEXT: cmp w8, #7 ; CHECK-NEXT: csel w8, w8, w10, lt -; CHECK-NEXT: cmn w8, #8 // =8 +; CHECK-NEXT: cmn w8, #8 ; CHECK-NEXT: mov w9, #-8 ; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll @@ -136,8 +136,8 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ld1 { v0.b }[0], [x1] ; CHECK-NEXT: ld1 { v1.b }[0], [x0] -; CHECK-NEXT: add x8, x0, #1 // =1 -; CHECK-NEXT: add x9, x1, #1 // =1 +; CHECK-NEXT: add x8, x0, #1 +; CHECK-NEXT: add x9, x1, #1 ; CHECK-NEXT: ld1 { v0.b }[4], [x9] ; CHECK-NEXT: ld1 { v1.b }[4], [x8] ; CHECK-NEXT: shl v0.2s, v0.2s, #24 @@ -176,8 +176,8 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ld1 { v0.h }[0], [x1] ; CHECK-NEXT: ld1 { v1.h }[0], [x0] -; CHECK-NEXT: add x8, x0, #2 // =2 -; CHECK-NEXT: add x9, x1, #2 // =2 +; CHECK-NEXT: add x8, x0, #2 +; CHECK-NEXT: add x9, x1, #2 ; CHECK-NEXT: ld1 { v0.h }[2], [x9] ; CHECK-NEXT: ld1 { v1.h }[2], [x8] ; CHECK-NEXT: shl v0.2s, v0.2s, #16 @@ -357,7 +357,7 @@ ; CHECK-NEXT: sbcs x12, x3, x7 ; CHECK-NEXT: mov x9, #9223372036854775807 ; CHECK-NEXT: eor x10, x3, x7 -; CHECK-NEXT: cmp x12, #0 // =0 +; CHECK-NEXT: cmp x12, #0 ; CHECK-NEXT: eor x13, x3, x12 ; CHECK-NEXT: cinv x14, x9, ge ; CHECK-NEXT: tst x10, x13 @@ -367,7 +367,7 @@ ; CHECK-NEXT: subs x8, x0, x4 ; CHECK-NEXT: sbcs x10, x1, x5 ; CHECK-NEXT: eor x11, x1, x5 -; CHECK-NEXT: cmp x10, #0 // =0 +; CHECK-NEXT: cmp x10, #0 ; CHECK-NEXT: eor x12, x1, x10 ; CHECK-NEXT: cinv x9, x9, ge ; CHECK-NEXT: tst x11, x12 diff --git a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll --- a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll @@ -8,9 +8,9 @@ define i32 @test_stack_guard_remat2() ssp { ; CHECK-LABEL: test_stack_guard_remat2: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: sub sp, sp, #64 ; =64 +; CHECK-NEXT: sub sp, sp, #64 ; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill -; CHECK-NEXT: add x29, sp, #48 ; =48 +; CHECK-NEXT: add x29, sp, #48 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 @@ -41,7 +41,7 @@ ; CHECK-NEXT: ; %bb.1: ; %entry ; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload ; CHECK-NEXT: mov w0, #-1 -; CHECK-NEXT: add sp, sp, #64 ; =64 +; CHECK-NEXT: add sp, sp, #64 ; CHECK-NEXT: ret ; CHECK-NEXT: LBB0_2: ; %entry ; CHECK-NEXT: bl ___stack_chk_fail diff --git a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll --- a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll +++ b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll @@ -43,7 +43,7 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: mov x29, sp -; CHECK-NEXT: sub sp, sp, #16 // =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 @@ -58,7 +58,7 @@ ; CHECK-MINUS-257-OFFSET: sub x8, x8, #257 ; CHECK-MINUS-257-OFFSET-NEXT: ldr x8, [x8] ; CHECK-NEXT: lsl x9, x0, #2 -; CHECK-NEXT: add x9, x9, #15 // =15 +; CHECK-NEXT: add x9, x9, #15 ; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0 ; CHECK-NEXT: stur x8, [x29, #-8] ; CHECK-NEXT: mov x8, sp diff --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll --- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll +++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll @@ -81,13 +81,13 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK-LABEL: test_relocate: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sub sp, sp, #16 // =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: str x0, [sp, #8] ; CHECK-NEXT: bl return_i1 ; CHECK-NEXT: .Ltmp7: ; CHECK-NEXT: and w0, w0, #0x1 -; CHECK-NEXT: add sp, sp, #16 // =16 +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret ; Check that an ununsed relocate has no code-generation impact entry: @@ -176,7 +176,7 @@ define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-example" { ; CHECK-LABEL: test_attributes: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: ldr x8, [sp, #48] ; CHECK-NEXT: ldr q0, [sp, #32] @@ -187,7 +187,7 @@ ; CHECK-NEXT: str q0, [sp] ; CHECK-NEXT: bl consume_attributes ; CHECK-NEXT: .Ltmp11: -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret entry: ; Check that arguments with attributes are lowered correctly. diff --git a/llvm/test/CodeGen/AArch64/sub-of-not.ll b/llvm/test/CodeGen/AArch64/sub-of-not.ll --- a/llvm/test/CodeGen/AArch64/sub-of-not.ll +++ b/llvm/test/CodeGen/AArch64/sub-of-not.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: scalar_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w1, w0 -; CHECK-NEXT: add w0, w8, #1 // =1 +; CHECK-NEXT: add w0, w8, #1 ; CHECK-NEXT: ret %t0 = xor i8 %x, -1 %t1 = sub i8 %y, %t0 @@ -21,7 +21,7 @@ ; CHECK-LABEL: scalar_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w1, w0 -; CHECK-NEXT: add w0, w8, #1 // =1 +; CHECK-NEXT: add w0, w8, #1 ; CHECK-NEXT: ret %t0 = xor i16 %x, -1 %t1 = sub i16 %y, %t0 @@ -32,7 +32,7 @@ ; CHECK-LABEL: scalar_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: add w8, w1, w0 -; CHECK-NEXT: add w0, w8, #1 // =1 +; CHECK-NEXT: add w0, w8, #1 ; CHECK-NEXT: ret %t0 = xor i32 %x, -1 %t1 = sub i32 %y, %t0 @@ -43,7 +43,7 @@ ; CHECK-LABEL: scalar_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: add x8, x1, x0 -; CHECK-NEXT: add x0, x8, #1 // =1 +; CHECK-NEXT: add x0, x8, #1 ; CHECK-NEXT: ret %t0 = xor i64 %x, -1 %t1 = sub i64 %y, %t0 diff --git a/llvm/test/CodeGen/AArch64/sub1.ll b/llvm/test/CodeGen/AArch64/sub1.ll --- a/llvm/test/CodeGen/AArch64/sub1.ll +++ b/llvm/test/CodeGen/AArch64/sub1.ll @@ -4,7 +4,7 @@ define i64 @sub1_disguised_constant(i64 %x) { ; CHECK-LABEL: sub1_disguised_constant: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w0, #1 // =1 +; CHECK-NEXT: sub w8, w0, #1 ; CHECK-NEXT: and w8, w0, w8 ; CHECK-NEXT: and x0, x8, #0xffff ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll @@ -42,13 +42,13 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-4 -; CHECK-NEXT: sub sp, sp, #16 // =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: ld4d { z1.d, z2.d, z3.d, z4.d }, p0/z, [x0] ; CHECK-NEXT: ld4d { z16.d, z17.d, z18.d, z19.d }, p0/z, [x1] ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: add x8, sp, #16 // =16 -; CHECK-NEXT: add x9, sp, #16 // =16 +; CHECK-NEXT: add x8, sp, #16 +; CHECK-NEXT: add x9, sp, #16 ; CHECK-NEXT: fmov s0, #1.00000000 ; CHECK-NEXT: mov w1, #1 ; CHECK-NEXT: mov w2, #2 @@ -65,7 +65,7 @@ ; CHECK-NEXT: str x8, [sp] ; CHECK-NEXT: bl callee2 ; CHECK-NEXT: addvl sp, sp, #4 -; CHECK-NEXT: add sp, sp, #16 // =16 +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/sve-extract-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-extract-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-vector.ll @@ -18,9 +18,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: sub x9, x9, #2 // =2 +; CHECK-NEXT: sub x9, x9, #2 ; CHECK-NEXT: mov w8, #2 -; CHECK-NEXT: cmp x9, #2 // =2 +; CHECK-NEXT: cmp x9, #2 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: st1d { z0.d }, p0, [sp] @@ -51,9 +51,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cntw x9 -; CHECK-NEXT: sub x9, x9, #4 // =4 +; CHECK-NEXT: sub x9, x9, #4 ; CHECK-NEXT: mov w8, #4 -; CHECK-NEXT: cmp x9, #4 // =4 +; CHECK-NEXT: cmp x9, #4 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: st1w { z0.s }, p0, [sp] @@ -84,9 +84,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cnth x9 -; CHECK-NEXT: sub x9, x9, #8 // =8 +; CHECK-NEXT: sub x9, x9, #8 ; CHECK-NEXT: mov w8, #8 -; CHECK-NEXT: cmp x9, #8 // =8 +; CHECK-NEXT: cmp x9, #8 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: st1h { z0.h }, p0, [sp] @@ -117,10 +117,10 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: rdvl x9, #1 -; CHECK-NEXT: sub x9, x9, #16 // =16 +; CHECK-NEXT: sub x9, x9, #16 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: mov w8, #16 -; CHECK-NEXT: cmp x9, #16 // =16 +; CHECK-NEXT: cmp x9, #16 ; CHECK-NEXT: st1b { z0.b }, p0, [sp] ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: mov x9, sp @@ -159,9 +159,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: sub x9, x9, #2 // =2 +; CHECK-NEXT: sub x9, x9, #2 ; CHECK-NEXT: mov w8, #2 -; CHECK-NEXT: cmp x9, #2 // =2 +; CHECK-NEXT: cmp x9, #2 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: st1d { z0.d }, p0, [sp] @@ -181,11 +181,11 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: subs x9, x9, #4 // =4 +; CHECK-NEXT: subs x9, x9, #4 ; CHECK-NEXT: csel x9, xzr, x9, lo ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov w10, #4 -; CHECK-NEXT: cmp x9, #4 // =4 +; CHECK-NEXT: cmp x9, #4 ; CHECK-NEXT: ptrue p1.d, vl4 ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: csel x9, x9, x10, lo diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll --- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll @@ -506,7 +506,7 @@ ; CHECK-NEXT: rdvl x10, #2 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: sxtw x9, w1 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: cmp x9, x10 ; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1 ; CHECK-NEXT: ptrue p1.b diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll @@ -23,9 +23,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: sub x9, x9, #2 // =2 +; CHECK-NEXT: sub x9, x9, #2 ; CHECK-NEXT: mov w8, #2 -; CHECK-NEXT: cmp x9, #2 // =2 +; CHECK-NEXT: cmp x9, #2 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: lsl x8, x8, #3 @@ -62,9 +62,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cntw x9 -; CHECK-NEXT: sub x9, x9, #4 // =4 +; CHECK-NEXT: sub x9, x9, #4 ; CHECK-NEXT: mov w8, #4 -; CHECK-NEXT: cmp x9, #4 // =4 +; CHECK-NEXT: cmp x9, #4 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: lsl x8, x8, #2 @@ -101,9 +101,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cnth x9 -; CHECK-NEXT: sub x9, x9, #8 // =8 +; CHECK-NEXT: sub x9, x9, #8 ; CHECK-NEXT: mov w8, #8 -; CHECK-NEXT: cmp x9, #8 // =8 +; CHECK-NEXT: cmp x9, #8 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: lsl x8, x8, #1 @@ -140,9 +140,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: rdvl x9, #1 -; CHECK-NEXT: sub x9, x9, #16 // =16 +; CHECK-NEXT: sub x9, x9, #16 ; CHECK-NEXT: mov w8, #16 -; CHECK-NEXT: cmp x9, #16 // =16 +; CHECK-NEXT: cmp x9, #16 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: mov x9, sp @@ -307,9 +307,9 @@ ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: sub x9, x9, #2 // =2 +; CHECK-NEXT: sub x9, x9, #2 ; CHECK-NEXT: mov w8, #2 -; CHECK-NEXT: cmp x9, #2 // =2 +; CHECK-NEXT: cmp x9, #2 ; CHECK-NEXT: csel x8, x9, x8, lo ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: lsl x8, x8, #3 @@ -332,10 +332,10 @@ ; CHECK-NEXT: ptrue p0.d, vl4 ; CHECK-NEXT: cntd x8 ; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0] -; CHECK-NEXT: subs x8, x8, #4 // =4 +; CHECK-NEXT: subs x8, x8, #4 ; CHECK-NEXT: csel x8, xzr, x8, lo ; CHECK-NEXT: mov w9, #4 -; CHECK-NEXT: cmp x8, #4 // =4 +; CHECK-NEXT: cmp x8, #4 ; CHECK-NEXT: ptrue p1.d ; CHECK-NEXT: csel x8, x8, x9, lo ; CHECK-NEXT: mov x9, sp diff --git a/llvm/test/CodeGen/AArch64/sve-ld1r.ll b/llvm/test/CodeGen/AArch64/sve-ld1r.ll --- a/llvm/test/CodeGen/AArch64/sve-ld1r.ll +++ b/llvm/test/CodeGen/AArch64/sve-ld1r.ll @@ -17,7 +17,7 @@ define @ld1r_stack() { ; CHECK-LABEL: ld1r_stack: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 // =16 +; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, :got:g8 ; CHECK-NEXT: ldr x8, [x8, :got_lo12:g8] @@ -25,7 +25,7 @@ ; CHECK-NEXT: ldrb w8, [x8] ; CHECK-NEXT: strb w8, [sp, #12] ; CHECK-NEXT: ld1rb { z0.b }, p0/z, [sp, #14] -; CHECK-NEXT: add sp, sp, #16 // =16 +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %valp = alloca i8 %valp2 = load volatile i8, i8* @g8 @@ -65,7 +65,7 @@ define @ld1rb_gep_out_of_range_up(i8* %valp) { ; CHECK-LABEL: ld1rb_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #64 // =64 +; CHECK-NEXT: add x8, x0, #64 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: ld1rb { z0.b }, p0/z, [x8] ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @ld1rb_gep_out_of_range_down(i8* %valp) { ; CHECK-LABEL: ld1rb_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #1 // =1 +; CHECK-NEXT: sub x8, x0, #1 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: ld1rb { z0.b }, p0/z, [x8] ; CHECK-NEXT: ret @@ -196,7 +196,7 @@ define @ld1rh_gep_out_of_range_up(i16* %valp) { ; CHECK-LABEL: ld1rh_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #128 // =128 +; CHECK-NEXT: add x8, x0, #128 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8] ; CHECK-NEXT: ret @@ -210,7 +210,7 @@ define @ld1rh_gep_out_of_range_down(i16* %valp) { ; CHECK-LABEL: ld1rh_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #2 // =2 +; CHECK-NEXT: sub x8, x0, #2 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8] ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ define @ld1rw_gep_out_of_range_up(i32* %valp) { ; CHECK-LABEL: ld1rw_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #256 // =256 +; CHECK-NEXT: add x8, x0, #256 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8] ; CHECK-NEXT: ret @@ -315,7 +315,7 @@ define @ld1rw_gep_out_of_range_down(i32* %valp) { ; CHECK-LABEL: ld1rw_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #4 // =4 +; CHECK-NEXT: sub x8, x0, #4 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8] ; CHECK-NEXT: ret @@ -380,7 +380,7 @@ define @ld1rd_gep_out_of_range_up(i64* %valp) { ; CHECK-LABEL: ld1rd_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #512 // =512 +; CHECK-NEXT: add x8, x0, #512 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @ld1rd_gep_out_of_range_down(i64* %valp) { ; CHECK-LABEL: ld1rd_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #8 // =8 +; CHECK-NEXT: sub x8, x0, #8 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret @@ -433,7 +433,7 @@ define @ld1rh_half_gep_out_of_range_up(half* %valp) { ; CHECK-LABEL: ld1rh_half_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #128 // =128 +; CHECK-NEXT: add x8, x0, #128 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8] ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @ld1rh_half_gep_out_of_range_down(half* %valp) { ; CHECK-LABEL: ld1rh_half_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #2 // =2 +; CHECK-NEXT: sub x8, x0, #2 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8] ; CHECK-NEXT: ret @@ -486,7 +486,7 @@ define @ld1rh_half_unpacked4_gep_out_of_range_up(half* %valp) { ; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #128 // =128 +; CHECK-NEXT: add x8, x0, #128 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ld1rh { z0.s }, p0/z, [x8] ; CHECK-NEXT: ret @@ -500,7 +500,7 @@ define @ld1rh_half_unpacked4_gep_out_of_range_down(half* %valp) { ; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #2 // =2 +; CHECK-NEXT: sub x8, x0, #2 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ld1rh { z0.s }, p0/z, [x8] ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @ld1rh_half_unpacked2_gep_out_of_range_up(half* %valp) { ; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #128 // =128 +; CHECK-NEXT: add x8, x0, #128 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rh { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret @@ -553,7 +553,7 @@ define @ld1rh_half_unpacked2_gep_out_of_range_down(half* %valp) { ; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #2 // =2 +; CHECK-NEXT: sub x8, x0, #2 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rh { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret @@ -592,7 +592,7 @@ define @ld1rw_float_gep_out_of_range_up(float* %valp) { ; CHECK-LABEL: ld1rw_float_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #256 // =256 +; CHECK-NEXT: add x8, x0, #256 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8] ; CHECK-NEXT: ret @@ -606,7 +606,7 @@ define @ld1rw_float_gep_out_of_range_down(float* %valp) { ; CHECK-LABEL: ld1rw_float_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #4 // =4 +; CHECK-NEXT: sub x8, x0, #4 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8] ; CHECK-NEXT: ret @@ -645,7 +645,7 @@ define @ld1rw_float_unpacked2_gep_out_of_range_up(float* %valp) { ; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #256 // =256 +; CHECK-NEXT: add x8, x0, #256 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rw { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret @@ -659,7 +659,7 @@ define @ld1rw_float_unpacked2_gep_out_of_range_down(float* %valp) { ; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #4 // =4 +; CHECK-NEXT: sub x8, x0, #4 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rw { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret @@ -698,7 +698,7 @@ define @ld1rd_double_gep_out_of_range_up(double* %valp) { ; CHECK-LABEL: ld1rd_double_gep_out_of_range_up: ; CHECK: // %bb.0: -; CHECK-NEXT: add x8, x0, #512 // =512 +; CHECK-NEXT: add x8, x0, #512 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret @@ -712,7 +712,7 @@ define @ld1rd_double_gep_out_of_range_down(double* %valp) { ; CHECK-LABEL: ld1rd_double_gep_out_of_range_down: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x0, #8 // =8 +; CHECK-NEXT: sub x8, x0, #8 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8] ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll --- a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll +++ b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll @@ -47,7 +47,7 @@ ; ASM-NEXT: add z1.h, z1.h, z0.h ; ASM-NEXT: st1h { z1.h }, p0, [x1, x8, lsl #1] ; ASM-NEXT: add x8, x8, x9 -; ASM-NEXT: cmp x8, #1024 // =1024 +; ASM-NEXT: cmp x8, #1024 ; ASM-NEXT: b.ne .LBB0_1 ; ASM-NEXT: // %bb.2: // %exit ; ASM-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll --- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll @@ -26,7 +26,7 @@ ; CHECK-NEXT: rdvl x10, #2 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: cmp x9, x10 @@ -51,7 +51,7 @@ ; CHECK-NEXT: rdvl x10, #1 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: cmp x9, x10 @@ -76,7 +76,7 @@ ; CHECK-NEXT: cnth x10 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: cmp x9, x10 @@ -101,7 +101,7 @@ ; CHECK-NEXT: cnth x10 ; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 ; CHECK-NEXT: sxtw x9, w0 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: cmp x9, x10 @@ -146,11 +146,11 @@ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x10, #1 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #128 -; CHECK-NEXT: cmp x10, #128 // =128 +; CHECK-NEXT: cmp x10, #128 ; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl] ; CHECK-NEXT: st1h { z0.h }, p0, [sp] ; CHECK-NEXT: csel x9, x10, x9, lo @@ -172,7 +172,7 @@ ; CHECK-NEXT: mov w9, #34464 ; CHECK-NEXT: rdvl x10, #1 ; CHECK-NEXT: movk w9, #1, lsl #16 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: cmp x10, x9 @@ -197,11 +197,11 @@ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cntw x10 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: mov w9, #10 -; CHECK-NEXT: cmp x10, #10 // =10 +; CHECK-NEXT: cmp x10, #10 ; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl] ; CHECK-NEXT: st1d { z0.d }, p0, [sp] ; CHECK-NEXT: csel x9, x10, x9, lo diff --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll --- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll +++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll @@ -24,7 +24,7 @@ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x8, #2 -; CHECK-NEXT: sub x8, x8, #1 // =1 +; CHECK-NEXT: sub x8, x8, #1 ; CHECK-NEXT: cmp x1, x8 ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: csel x8, x1, x8, lo @@ -49,7 +49,7 @@ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 -; CHECK-NEXT: sub x8, x8, #1 // =1 +; CHECK-NEXT: sub x8, x8, #1 ; CHECK-NEXT: cmp x0, x8 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: csel x8, x0, x8, lo @@ -74,7 +74,7 @@ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: cnth x8 -; CHECK-NEXT: sub x8, x8, #1 // =1 +; CHECK-NEXT: sub x8, x8, #1 ; CHECK-NEXT: cmp x1, x8 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: csel x8, x1, x8, lo @@ -136,9 +136,9 @@ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: rdvl x10, #2 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: mov w9, #128 -; CHECK-NEXT: cmp x10, #128 // =128 +; CHECK-NEXT: cmp x10, #128 ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: mov x8, sp ; CHECK-NEXT: csel x9, x10, x9, lo @@ -168,7 +168,7 @@ ; CHECK-NEXT: mov w9, #16960 ; CHECK-NEXT: cnth x10 ; CHECK-NEXT: movk w9, #15, lsl #16 -; CHECK-NEXT: sub x10, x10, #1 // =1 +; CHECK-NEXT: sub x10, x10, #1 ; CHECK-NEXT: cmp x10, x9 ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: mov x8, sp diff --git a/llvm/test/CodeGen/AArch64/uadd_sat.ll b/llvm/test/CodeGen/AArch64/uadd_sat.ll --- a/llvm/test/CodeGen/AArch64/uadd_sat.ll +++ b/llvm/test/CodeGen/AArch64/uadd_sat.ll @@ -45,7 +45,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xff ; CHECK-NEXT: add w8, w8, w1, uxtb -; CHECK-NEXT: cmp w8, #255 // =255 +; CHECK-NEXT: cmp w8, #255 ; CHECK-NEXT: mov w9, #255 ; CHECK-NEXT: csel w0, w8, w9, lo ; CHECK-NEXT: ret @@ -59,7 +59,7 @@ ; CHECK-NEXT: and w8, w1, #0xf ; CHECK-NEXT: and w9, w0, #0xf ; CHECK-NEXT: add w8, w9, w8 -; CHECK-NEXT: cmp w8, #15 // =15 +; CHECK-NEXT: cmp w8, #15 ; CHECK-NEXT: mov w9, #15 ; CHECK-NEXT: csel w0, w8, w9, lo ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll b/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll --- a/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll +++ b/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll @@ -51,7 +51,7 @@ ; CHECK-NEXT: and w8, w0, #0xff ; CHECK-NEXT: mul w9, w1, w2 ; CHECK-NEXT: add w8, w8, w9, uxtb -; CHECK-NEXT: cmp w8, #255 // =255 +; CHECK-NEXT: cmp w8, #255 ; CHECK-NEXT: mov w9, #255 ; CHECK-NEXT: csel w0, w8, w9, lo ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ ; CHECK-NEXT: and w8, w0, #0xf ; CHECK-NEXT: and w9, w9, #0xf ; CHECK-NEXT: add w8, w8, w9 -; CHECK-NEXT: cmp w8, #15 // =15 +; CHECK-NEXT: cmp w8, #15 ; CHECK-NEXT: mov w9, #15 ; CHECK-NEXT: csel w0, w8, w9, lo ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll @@ -355,7 +355,7 @@ ; CHECK-NEXT: cmp x9, x3 ; CHECK-NEXT: cset w11, lo ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csinv x3, x9, xzr, eq ; CHECK-NEXT: csinv x2, x8, xzr, eq ; CHECK-NEXT: adds x8, x0, x4 @@ -365,7 +365,7 @@ ; CHECK-NEXT: cmp x9, x1 ; CHECK-NEXT: cset w11, lo ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csinv x8, x8, xzr, eq ; CHECK-NEXT: csinv x1, x9, xzr, eq ; CHECK-NEXT: fmov d0, x8 diff --git a/llvm/test/CodeGen/AArch64/uaddo.ll b/llvm/test/CodeGen/AArch64/uaddo.ll --- a/llvm/test/CodeGen/AArch64/uaddo.ll +++ b/llvm/test/CodeGen/AArch64/uaddo.ll @@ -8,7 +8,7 @@ define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) { ; CHECK-LABEL: uaddo_i64_increment_alt: ; CHECK: // %bb.0: -; CHECK-NEXT: adds x8, x0, #1 // =1 +; CHECK-NEXT: adds x8, x0, #1 ; CHECK-NEXT: cset w0, hs ; CHECK-NEXT: str x8, [x1] ; CHECK-NEXT: ret @@ -23,7 +23,7 @@ define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) { ; CHECK-LABEL: uaddo_i64_increment_alt_dom: ; CHECK: // %bb.0: -; CHECK-NEXT: adds x8, x0, #1 // =1 +; CHECK-NEXT: adds x8, x0, #1 ; CHECK-NEXT: cset w0, hs ; CHECK-NEXT: str x8, [x1] ; CHECK-NEXT: ret @@ -38,7 +38,7 @@ define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) { ; CHECK-LABEL: uaddo_i64_decrement_alt: ; CHECK: // %bb.0: -; CHECK-NEXT: subs x8, x0, #1 // =1 +; CHECK-NEXT: subs x8, x0, #1 ; CHECK-NEXT: cset w0, hs ; CHECK-NEXT: str x8, [x1] ; CHECK-NEXT: ret @@ -53,7 +53,7 @@ define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) { ; CHECK-LABEL: uaddo_i64_decrement_alt_dom: ; CHECK: // %bb.0: -; CHECK-NEXT: subs x8, x0, #1 // =1 +; CHECK-NEXT: subs x8, x0, #1 ; CHECK-NEXT: cset w0, hs ; CHECK-NEXT: str x8, [x1] ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll --- a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll +++ b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll @@ -4,10 +4,10 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 { ; AARCH-LABEL: muloti_test: ; AARCH: // %bb.0: // %start -; AARCH-NEXT: cmp x3, #0 // =0 +; AARCH-NEXT: cmp x3, #0 ; AARCH-NEXT: umulh x8, x1, x2 ; AARCH-NEXT: cset w10, ne -; AARCH-NEXT: cmp x1, #0 // =0 +; AARCH-NEXT: cmp x1, #0 ; AARCH-NEXT: mul x9, x3, x0 ; AARCH-NEXT: cset w11, ne ; AARCH-NEXT: cmp xzr, x8 diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll --- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll +++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll @@ -264,7 +264,7 @@ ; CHECK: .Lfunc_begin1: ; CHECK-NEXT: .cfi_startproc ; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: sub sp, sp, #304 // =304 +; CHECK-NEXT: sub sp, sp, #304 ; CHECK-NEXT: stp q23, q22, [sp, #32] // 32-byte Folded Spill ; CHECK-NEXT: stp q21, q20, [sp, #64] // 32-byte Folded Spill ; CHECK-NEXT: stp q19, q18, [sp, #96] // 32-byte Folded Spill @@ -310,7 +310,7 @@ ; CHECK-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload ; CHECK-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload ; CHECK-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: add sp, sp, #304 // =304 +; CHECK-NEXT: add sp, sp, #304 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: // %.Lunwind ; CHECK-NEXT: .Ltmp5: @@ -324,14 +324,14 @@ ; CHECK-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload ; CHECK-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload ; CHECK-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload -; CHECK-NEXT: add sp, sp, #304 // =304 +; CHECK-NEXT: add sp, sp, #304 ; CHECK-NEXT: ret ; ; GISEL-LABEL: invoke_callee_may_throw_neon: ; GISEL: .Lfunc_begin1: ; GISEL-NEXT: .cfi_startproc ; GISEL-NEXT: // %bb.0: -; GISEL-NEXT: sub sp, sp, #304 // =304 +; GISEL-NEXT: sub sp, sp, #304 ; GISEL-NEXT: stp q23, q22, [sp, #32] // 32-byte Folded Spill ; GISEL-NEXT: stp q21, q20, [sp, #64] // 32-byte Folded Spill ; GISEL-NEXT: stp q19, q18, [sp, #96] // 32-byte Folded Spill @@ -377,7 +377,7 @@ ; GISEL-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload ; GISEL-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload ; GISEL-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload -; GISEL-NEXT: add sp, sp, #304 // =304 +; GISEL-NEXT: add sp, sp, #304 ; GISEL-NEXT: ret ; GISEL-NEXT: .LBB1_2: // %.Lunwind ; GISEL-NEXT: .Ltmp5: @@ -391,7 +391,7 @@ ; GISEL-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload ; GISEL-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload ; GISEL-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload -; GISEL-NEXT: add sp, sp, #304 // =304 +; GISEL-NEXT: add sp, sp, #304 ; GISEL-NEXT: ret %result = invoke aarch64_vector_pcs <4 x i32> @may_throw_neon(<4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind .Lcontinue: diff --git a/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll @@ -7,7 +7,7 @@ ; CHECK-NEXT: mov w8, #3277 ; CHECK-NEXT: mul w8, w0, w8 ; CHECK-NEXT: and w8, w8, #0x1fff -; CHECK-NEXT: cmp w8, #1639 // =1639 +; CHECK-NEXT: cmp w8, #1639 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %urem = urem i13 %X, 5 @@ -40,7 +40,7 @@ ; CHECK-NEXT: mov w8, #13 ; CHECK-NEXT: mul w8, w0, w8 ; CHECK-NEXT: and w8, w8, #0xf -; CHECK-NEXT: cmp w8, #3 // =3 +; CHECK-NEXT: cmp w8, #3 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %urem = urem i4 %X, 5 @@ -54,7 +54,7 @@ ; CHECK-NEXT: mov w8, #307 ; CHECK-NEXT: mul w8, w0, w8 ; CHECK-NEXT: and w8, w8, #0x1ff -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %urem = urem i9 %X, -5 diff --git a/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll b/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll --- a/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll +++ b/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll @@ -139,7 +139,7 @@ ; CHECK-NEXT: mov w8, #43691 ; CHECK-NEXT: movk w8, #43690, lsl #16 ; CHECK-NEXT: mul w8, w0, w8 -; CHECK-NEXT: sub w8, w8, #1 // =1 +; CHECK-NEXT: sub w8, w8, #1 ; CHECK-NEXT: mov w9, #43691 ; CHECK-NEXT: ror w8, w8, #1 ; CHECK-NEXT: movk w9, #10922, lsl #16 @@ -212,9 +212,9 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #-85 ; CHECK-NEXT: mul w8, w0, w8 -; CHECK-NEXT: sub w8, w8, #86 // =86 +; CHECK-NEXT: sub w8, w8, #86 ; CHECK-NEXT: and w8, w8, #0xff -; CHECK-NEXT: cmp w8, #85 // =85 +; CHECK-NEXT: cmp w8, #85 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %urem = urem i8 %X, 3 diff --git a/llvm/test/CodeGen/AArch64/urem-seteq.ll b/llvm/test/CodeGen/AArch64/urem-seteq.ll --- a/llvm/test/CodeGen/AArch64/urem-seteq.ll +++ b/llvm/test/CodeGen/AArch64/urem-seteq.ll @@ -46,7 +46,7 @@ ; CHECK-NEXT: mov w8, #43691 ; CHECK-NEXT: movk w8, #27306, lsl #16 ; CHECK-NEXT: mul w8, w0, w8 -; CHECK-NEXT: cmp w8, #4 // =4 +; CHECK-NEXT: cmp w8, #4 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %urem = urem i32 %X, 1073741827 @@ -62,7 +62,7 @@ ; CHECK-NEXT: mov w8, #43691 ; CHECK-NEXT: movk w8, #10922, lsl #16 ; CHECK-NEXT: mul w8, w0, w8 -; CHECK-NEXT: cmp w8, #2 // =2 +; CHECK-NEXT: cmp w8, #2 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %urem = urem i32 %X, 2147483651 @@ -84,7 +84,7 @@ ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: bfi w9, w8, #15, #17 ; CHECK-NEXT: ubfx w8, w9, #1, #15 -; CHECK-NEXT: cmp w8, #2340 // =2340 +; CHECK-NEXT: cmp w8, #2340 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %urem = urem i16 %X, 14 @@ -119,7 +119,7 @@ ; CHECK-NEXT: movk w8, #64748, lsl #16 ; CHECK-NEXT: mul w8, w0, w8 ; CHECK-NEXT: ror w8, w8, #3 -; CHECK-NEXT: cmp w8, #4 // =4 +; CHECK-NEXT: cmp w8, #4 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %urem = urem i32 %X, 1073741928 @@ -136,7 +136,7 @@ ; CHECK-NEXT: movk w8, #47866, lsl #16 ; CHECK-NEXT: mul w8, w0, w8 ; CHECK-NEXT: ror w8, w8, #1 -; CHECK-NEXT: cmp w8, #2 // =2 +; CHECK-NEXT: cmp w8, #2 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %urem = urem i32 %X, 2147483750 @@ -172,7 +172,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #858993459 ; CHECK-NEXT: mul w8, w0, w8 -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %urem = urem i32 %X, -5 @@ -187,7 +187,7 @@ ; CHECK-NEXT: movk w8, #51492, lsl #16 ; CHECK-NEXT: mul w8, w0, w8 ; CHECK-NEXT: ror w8, w8, #1 -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: cset w0, hi ; CHECK-NEXT: ret %urem = urem i32 %X, -14 @@ -243,7 +243,7 @@ ; CHECK-LABEL: test_urem_allones: ; CHECK: // %bb.0: ; CHECK-NEXT: neg w8, w0 -; CHECK-NEXT: cmp w8, #2 // =2 +; CHECK-NEXT: cmp w8, #2 ; CHECK-NEXT: cset w0, lo ; CHECK-NEXT: ret %urem = urem i32 %X, 4294967295 diff --git a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll --- a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll +++ b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll @@ -18,7 +18,7 @@ ; CHECK-LABEL: ll_a_op_b__2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: cmn x8, #2 // =2 +; CHECK-NEXT: cmn x8, #2 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -42,10 +42,10 @@ ; CHECK-LABEL: ll_a_op_b__1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: cmn x8, #1 // =1 +; CHECK-NEXT: cmn x8, #1 ; CHECK-NEXT: csinc x9, x1, xzr, eq ; CHECK-NEXT: mul x9, x9, x0 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: csel x0, x1, x9, ge ; CHECK-NEXT: ret entry: @@ -67,7 +67,7 @@ ; CHECK-LABEL: ll_a_op_b_0: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: cmp x8, #0 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -91,7 +91,7 @@ ; CHECK-LABEL: ll_a_op_b_1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: cmp x8, #1 // =1 +; CHECK-NEXT: cmp x8, #1 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -115,7 +115,7 @@ ; CHECK-LABEL: ll_a_op_b_2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: cmp x8, #2 // =2 +; CHECK-NEXT: cmp x8, #2 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -138,7 +138,7 @@ define i64 @ll_a__2(i64 %a, i64 %b) { ; CHECK-LABEL: ll_a__2: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x0, #2 // =2 +; CHECK-NEXT: cmn x0, #2 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -160,10 +160,10 @@ define i64 @ll_a__1(i64 %a, i64 %b) { ; CHECK-LABEL: ll_a__1: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x0, #1 // =1 +; CHECK-NEXT: cmn x0, #1 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel x0, x1, x8, ge ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define i64 @ll_a_0(i64 %a, i64 %b) { ; CHECK-LABEL: ll_a_0: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -205,7 +205,7 @@ define i64 @ll_a_1(i64 %a, i64 %b) { ; CHECK-LABEL: ll_a_1: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp x0, #1 // =1 +; CHECK-NEXT: cmp x0, #1 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -227,7 +227,7 @@ define i64 @ll_a_2(i64 %a, i64 %b) { ; CHECK-LABEL: ll_a_2: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp x0, #2 // =2 +; CHECK-NEXT: cmp x0, #2 ; CHECK-NEXT: csinc x8, x1, xzr, eq ; CHECK-NEXT: mul x8, x8, x0 ; CHECK-NEXT: csel x0, x1, x8, gt @@ -250,7 +250,7 @@ ; CHECK-LABEL: i_a_op_b__2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: cmn w8, #2 // =2 +; CHECK-NEXT: cmn w8, #2 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt @@ -277,10 +277,10 @@ ; CHECK-LABEL: i_a_op_b__1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: cmn w8, #1 // =1 +; CHECK-NEXT: cmn w8, #1 ; CHECK-NEXT: csinc w9, w1, wzr, eq ; CHECK-NEXT: mul w9, w9, w0 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w8, w1, w9, ge ; CHECK-NEXT: sxtw x0, w8 ; CHECK-NEXT: ret @@ -305,7 +305,7 @@ ; CHECK-LABEL: i_a_op_b_0: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt @@ -332,7 +332,7 @@ ; CHECK-LABEL: i_a_op_b_1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: cmp w8, #1 // =1 +; CHECK-NEXT: cmp w8, #1 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt @@ -359,7 +359,7 @@ ; CHECK-LABEL: i_a_op_b_2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl w8, w0, w1 -; CHECK-NEXT: cmp w8, #2 // =2 +; CHECK-NEXT: cmp w8, #2 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt @@ -385,7 +385,7 @@ define i64 @i_a__2(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: i_a__2: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn w0, #2 // =2 +; CHECK-NEXT: cmn w0, #2 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt @@ -410,10 +410,10 @@ define i64 @i_a__1(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: i_a__1: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn w0, #1 // =1 +; CHECK-NEXT: cmn w0, #1 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel w8, w1, w8, ge ; CHECK-NEXT: sxtw x0, w8 ; CHECK-NEXT: ret @@ -436,7 +436,7 @@ define i64 @i_a_0(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: i_a_0: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt @@ -461,7 +461,7 @@ define i64 @i_a_1(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: i_a_1: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp w0, #1 // =1 +; CHECK-NEXT: cmp w0, #1 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt @@ -486,7 +486,7 @@ define i64 @i_a_2(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: i_a_2: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp w0, #2 // =2 +; CHECK-NEXT: cmp w0, #2 ; CHECK-NEXT: csinc w8, w1, wzr, eq ; CHECK-NEXT: mul w8, w8, w0 ; CHECK-NEXT: csel w8, w1, w8, gt diff --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll --- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll +++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll @@ -351,7 +351,7 @@ ; CHECK-NEXT: cmp x9, x3 ; CHECK-NEXT: cset w11, hi ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x3, xzr, x9, ne ; CHECK-NEXT: csel x2, xzr, x8, ne ; CHECK-NEXT: subs x8, x0, x4 @@ -361,7 +361,7 @@ ; CHECK-NEXT: cmp x9, x1 ; CHECK-NEXT: cset w11, hi ; CHECK-NEXT: csel w10, w10, w11, eq -; CHECK-NEXT: cmp w10, #0 // =0 +; CHECK-NEXT: cmp w10, #0 ; CHECK-NEXT: csel x8, xzr, x8, ne ; CHECK-NEXT: csel x1, xzr, x9, ne ; CHECK-NEXT: fmov d0, x8 diff --git a/llvm/test/CodeGen/AArch64/vec-libcalls.ll b/llvm/test/CodeGen/AArch64/vec-libcalls.ll --- a/llvm/test/CodeGen/AArch64/vec-libcalls.ll +++ b/llvm/test/CodeGen/AArch64/vec-libcalls.ll @@ -50,7 +50,7 @@ define <2 x float> @sin_v2f32(<2 x float> %x) nounwind { ; CHECK-LABEL: sin_v2f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] @@ -65,7 +65,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v0.s[1], v1.s[0] ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <2 x float> @llvm.sin.v2f32(<2 x float> %x) ret <2 x float> %r @@ -74,7 +74,7 @@ define <3 x float> @sin_v3f32(<3 x float> %x) nounwind { ; CHECK-LABEL: sin_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -95,7 +95,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <3 x float> @llvm.sin.v3f32(<3 x float> %x) ret <3 x float> %r @@ -104,7 +104,7 @@ define <4 x float> @sin_v4f32(<4 x float> %x) nounwind { ; CHECK-LABEL: sin_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -132,7 +132,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[3], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <4 x float> @llvm.sin.v4f32(<4 x float> %x) ret <4 x float> %r @@ -266,7 +266,7 @@ define <3 x float> @cos_v3f32(<3 x float> %x) nounwind { ; CHECK-LABEL: cos_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -287,7 +287,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <3 x float> @llvm.cos.v3f32(<3 x float> %x) ret <3 x float> %r @@ -296,7 +296,7 @@ define <3 x float> @exp_v3f32(<3 x float> %x) nounwind { ; CHECK-LABEL: exp_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -317,7 +317,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <3 x float> @llvm.exp.v3f32(<3 x float> %x) ret <3 x float> %r @@ -326,7 +326,7 @@ define <3 x float> @exp2_v3f32(<3 x float> %x) nounwind { ; CHECK-LABEL: exp2_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -347,7 +347,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <3 x float> @llvm.exp2.v3f32(<3 x float> %x) ret <3 x float> %r @@ -365,7 +365,7 @@ define <3 x float> @log_v3f32(<3 x float> %x) nounwind { ; CHECK-LABEL: log_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -386,7 +386,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <3 x float> @llvm.log.v3f32(<3 x float> %x) ret <3 x float> %r @@ -395,7 +395,7 @@ define <3 x float> @log10_v3f32(<3 x float> %x) nounwind { ; CHECK-LABEL: log10_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -416,7 +416,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <3 x float> @llvm.log10.v3f32(<3 x float> %x) ret <3 x float> %r @@ -425,7 +425,7 @@ define <3 x float> @log2_v3f32(<3 x float> %x) nounwind { ; CHECK-LABEL: log2_v3f32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: mov s0, v0.s[1] ; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill @@ -446,7 +446,7 @@ ; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 ; CHECK-NEXT: mov v1.s[2], v0.s[0] ; CHECK-NEXT: mov v0.16b, v1.16b -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret %r = call <3 x float> @llvm.log2.v3f32(<3 x float> %x) ret <3 x float> %r diff --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll --- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll +++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll @@ -50,7 +50,7 @@ ; CHECK-LABEL: uaddo_v3i32: ; CHECK: // %bb.0: ; CHECK-NEXT: add v1.4s, v0.4s, v1.4s -; CHECK-NEXT: add x8, x0, #8 // =8 +; CHECK-NEXT: add x8, x0, #8 ; CHECK-NEXT: cmhi v0.4s, v0.4s, v1.4s ; CHECK-NEXT: st1 { v1.s }[2], [x8] ; CHECK-NEXT: str d1, [x0] @@ -86,8 +86,8 @@ ; CHECK-NEXT: mov x9, sp ; CHECK-NEXT: mov v2.s[1], w7 ; CHECK-NEXT: ld1 { v2.s }[2], [x9] -; CHECK-NEXT: add x8, sp, #24 // =24 -; CHECK-NEXT: add x10, sp, #8 // =8 +; CHECK-NEXT: add x8, sp, #24 +; CHECK-NEXT: add x10, sp, #8 ; CHECK-NEXT: ld1 { v0.s }[1], [x8] ; CHECK-NEXT: fmov s3, w0 ; CHECK-NEXT: ldr x11, [sp, #32] diff --git a/llvm/test/CodeGen/AArch64/vec_umulo.ll b/llvm/test/CodeGen/AArch64/vec_umulo.ll --- a/llvm/test/CodeGen/AArch64/vec_umulo.ll +++ b/llvm/test/CodeGen/AArch64/vec_umulo.ll @@ -57,7 +57,7 @@ ; CHECK-NEXT: umull v3.2d, v0.2s, v1.2s ; CHECK-NEXT: mul v1.4s, v0.4s, v1.4s ; CHECK-NEXT: uzp2 v0.4s, v3.4s, v2.4s -; CHECK-NEXT: add x8, x0, #8 // =8 +; CHECK-NEXT: add x8, x0, #8 ; CHECK-NEXT: cmtst v0.4s, v0.4s, v0.4s ; CHECK-NEXT: st1 { v1.s }[2], [x8] ; CHECK-NEXT: str d1, [x0] @@ -97,8 +97,8 @@ ; CHECK-NEXT: mov x9, sp ; CHECK-NEXT: mov v2.s[1], w7 ; CHECK-NEXT: ld1 { v2.s }[2], [x9] -; CHECK-NEXT: add x8, sp, #24 // =24 -; CHECK-NEXT: add x10, sp, #8 // =8 +; CHECK-NEXT: add x8, sp, #24 +; CHECK-NEXT: add x10, sp, #8 ; CHECK-NEXT: ld1 { v0.s }[1], [x8] ; CHECK-NEXT: fmov s3, w0 ; CHECK-NEXT: ldr x11, [sp, #32] @@ -316,10 +316,10 @@ define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind { ; CHECK-LABEL: umulo_v2i128: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x7, #0 // =0 +; CHECK-NEXT: cmp x7, #0 ; CHECK-NEXT: umulh x8, x3, x6 ; CHECK-NEXT: cset w13, ne -; CHECK-NEXT: cmp x3, #0 // =0 +; CHECK-NEXT: cmp x3, #0 ; CHECK-NEXT: umulh x9, x7, x2 ; CHECK-NEXT: mul x10, x7, x2 ; CHECK-NEXT: cset w14, ne @@ -336,11 +336,11 @@ ; CHECK-NEXT: mul x12, x2, x6 ; CHECK-NEXT: orr w13, w13, w14 ; CHECK-NEXT: cset w14, hs -; CHECK-NEXT: cmp x5, #0 // =0 +; CHECK-NEXT: cmp x5, #0 ; CHECK-NEXT: umulh x17, x1, x4 ; CHECK-NEXT: stp x12, x10, [x8, #16] ; CHECK-NEXT: cset w10, ne -; CHECK-NEXT: cmp x1, #0 // =0 +; CHECK-NEXT: cmp x1, #0 ; CHECK-NEXT: umulh x9, x5, x0 ; CHECK-NEXT: mul x11, x5, x0 ; CHECK-NEXT: cset w12, ne diff --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll @@ -20,7 +20,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w0, w0, w1, lt ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer @@ -114,7 +114,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: cmp w8, #0 ; CHECK-NEXT: csel w0, w0, w1, lt ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll @@ -141,7 +141,7 @@ define fp128 @test_v2f128(<2 x fp128> %a, fp128 %s) nounwind { ; CHECK-LABEL: test_v2f128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 // =32 +; CHECK-NEXT: sub sp, sp, #32 ; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: mov v0.16b, v2.16b @@ -149,7 +149,7 @@ ; CHECK-NEXT: bl __addtf3 ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: add sp, sp, #32 // =32 +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: b __addtf3 %b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 %s, <2 x fp128> %a) ret fp128 %b diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll @@ -186,7 +186,7 @@ ; CHECK-FP-NEXT: movi v16.8h, #252, lsl #8 ; CHECK-FP-NEXT: mov x8, sp ; CHECK-FP-NEXT: ld1 { v16.h }[0], [x8] -; CHECK-FP-NEXT: add x8, sp, #8 // =8 +; CHECK-FP-NEXT: add x8, sp, #8 ; CHECK-FP-NEXT: // kill: def $h0 killed $h0 def $q0 ; CHECK-FP-NEXT: // kill: def $h1 killed $h1 def $q1 ; CHECK-FP-NEXT: // kill: def $h2 killed $h2 def $q2 @@ -199,7 +199,7 @@ ; CHECK-FP-NEXT: ld1 { v16.h }[1], [x8] ; CHECK-FP-NEXT: mov v0.h[2], v2.h[0] ; CHECK-FP-NEXT: mov v0.h[3], v3.h[0] -; CHECK-FP-NEXT: add x8, sp, #16 // =16 +; CHECK-FP-NEXT: add x8, sp, #16 ; CHECK-FP-NEXT: mov v0.h[4], v4.h[0] ; CHECK-FP-NEXT: ld1 { v16.h }[2], [x8] ; CHECK-FP-NEXT: mov v0.h[5], v5.h[0] @@ -294,7 +294,7 @@ ; CHECK-FP-NEXT: mvni v16.8h, #4, lsl #8 ; CHECK-FP-NEXT: mov x8, sp ; CHECK-FP-NEXT: ld1 { v16.h }[0], [x8] -; CHECK-FP-NEXT: add x8, sp, #8 // =8 +; CHECK-FP-NEXT: add x8, sp, #8 ; CHECK-FP-NEXT: // kill: def $h0 killed $h0 def $q0 ; CHECK-FP-NEXT: // kill: def $h1 killed $h1 def $q1 ; CHECK-FP-NEXT: // kill: def $h2 killed $h2 def $q2 @@ -307,7 +307,7 @@ ; CHECK-FP-NEXT: ld1 { v16.h }[1], [x8] ; CHECK-FP-NEXT: mov v0.h[2], v2.h[0] ; CHECK-FP-NEXT: mov v0.h[3], v3.h[0] -; CHECK-FP-NEXT: add x8, sp, #16 // =16 +; CHECK-FP-NEXT: add x8, sp, #16 ; CHECK-FP-NEXT: mov v0.h[4], v4.h[0] ; CHECK-FP-NEXT: ld1 { v16.h }[2], [x8] ; CHECK-FP-NEXT: mov v0.h[5], v5.h[0] diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll @@ -186,7 +186,7 @@ ; CHECK-FP-NEXT: movi v16.8h, #124, lsl #8 ; CHECK-FP-NEXT: mov x8, sp ; CHECK-FP-NEXT: ld1 { v16.h }[0], [x8] -; CHECK-FP-NEXT: add x8, sp, #8 // =8 +; CHECK-FP-NEXT: add x8, sp, #8 ; CHECK-FP-NEXT: // kill: def $h0 killed $h0 def $q0 ; CHECK-FP-NEXT: // kill: def $h1 killed $h1 def $q1 ; CHECK-FP-NEXT: // kill: def $h2 killed $h2 def $q2 @@ -199,7 +199,7 @@ ; CHECK-FP-NEXT: ld1 { v16.h }[1], [x8] ; CHECK-FP-NEXT: mov v0.h[2], v2.h[0] ; CHECK-FP-NEXT: mov v0.h[3], v3.h[0] -; CHECK-FP-NEXT: add x8, sp, #16 // =16 +; CHECK-FP-NEXT: add x8, sp, #16 ; CHECK-FP-NEXT: mov v0.h[4], v4.h[0] ; CHECK-FP-NEXT: ld1 { v16.h }[2], [x8] ; CHECK-FP-NEXT: mov v0.h[5], v5.h[0] @@ -294,7 +294,7 @@ ; CHECK-FP-NEXT: mvni v16.8h, #132, lsl #8 ; CHECK-FP-NEXT: mov x8, sp ; CHECK-FP-NEXT: ld1 { v16.h }[0], [x8] -; CHECK-FP-NEXT: add x8, sp, #8 // =8 +; CHECK-FP-NEXT: add x8, sp, #8 ; CHECK-FP-NEXT: // kill: def $h0 killed $h0 def $q0 ; CHECK-FP-NEXT: // kill: def $h1 killed $h1 def $q1 ; CHECK-FP-NEXT: // kill: def $h2 killed $h2 def $q2 @@ -307,7 +307,7 @@ ; CHECK-FP-NEXT: ld1 { v16.h }[1], [x8] ; CHECK-FP-NEXT: mov v0.h[2], v2.h[0] ; CHECK-FP-NEXT: mov v0.h[3], v3.h[0] -; CHECK-FP-NEXT: add x8, sp, #16 // =16 +; CHECK-FP-NEXT: add x8, sp, #16 ; CHECK-FP-NEXT: mov v0.h[4], v4.h[0] ; CHECK-FP-NEXT: ld1 { v16.h }[2], [x8] ; CHECK-FP-NEXT: mov v0.h[5], v5.h[0] diff --git a/llvm/test/CodeGen/AArch64/vldn_shuffle.ll b/llvm/test/CodeGen/AArch64/vldn_shuffle.ll --- a/llvm/test/CodeGen/AArch64/vldn_shuffle.ll +++ b/llvm/test/CodeGen/AArch64/vldn_shuffle.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: fmul v2.4s, v0.4s, v0.4s ; CHECK-NEXT: fmla v2.4s, v1.4s, v1.4s ; CHECK-NEXT: str q2, [x1, x8] -; CHECK-NEXT: add x8, x8, #16 // =16 +; CHECK-NEXT: add x8, x8, #16 ; CHECK-NEXT: cmp x8, #1, lsl #12 // =4096 ; CHECK-NEXT: b.ne .LBB0_1 ; CHECK-NEXT: // %bb.2: // %while.end @@ -52,7 +52,7 @@ ; CHECK-NEXT: fmla v3.4s, v1.4s, v1.4s ; CHECK-NEXT: fmla v3.4s, v2.4s, v2.4s ; CHECK-NEXT: str q3, [x1, x8] -; CHECK-NEXT: add x8, x8, #16 // =16 +; CHECK-NEXT: add x8, x8, #16 ; CHECK-NEXT: cmp x8, #1, lsl #12 // =4096 ; CHECK-NEXT: b.ne .LBB1_1 ; CHECK-NEXT: // %bb.2: // %while.end @@ -93,7 +93,7 @@ ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0], #64 ; CHECK-NEXT: add x9, x1, x8 -; CHECK-NEXT: add x8, x8, #32 // =32 +; CHECK-NEXT: add x8, x8, #32 ; CHECK-NEXT: cmp x8, #2, lsl #12 // =8192 ; CHECK-NEXT: fmul v4.4s, v0.4s, v0.4s ; CHECK-NEXT: fmla v4.4s, v1.4s, v1.4s @@ -145,7 +145,7 @@ ; CHECK-NEXT: add x10, x1, x8 ; CHECK-NEXT: ld2 { v0.4s, v1.4s }, [x9] ; CHECK-NEXT: ld2 { v2.4s, v3.4s }, [x10] -; CHECK-NEXT: add x8, x8, #32 // =32 +; CHECK-NEXT: add x8, x8, #32 ; CHECK-NEXT: cmp x8, #2, lsl #12 // =8192 ; CHECK-NEXT: fmul v4.4s, v2.4s, v0.4s ; CHECK-NEXT: fmla v4.4s, v1.4s, v3.4s diff --git a/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll b/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll --- a/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll +++ b/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll @@ -9,7 +9,7 @@ ; CHECK: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill ; CHECK-NEXT: .seh_save_fplr_x 16 ; CHECK-NEXT: .seh_endprologue -; CHECK-NEXT: sub x0, x29, #16 // =16 +; CHECK-NEXT: sub x0, x29, #16 ; CHECK-NEXT: mov x1, xzr ; CHECK-NEXT: bl "?bb@@YAXPEAHH@Z" ; CHECK-NEXT: adrp x0, .LBB0_1 diff --git a/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll b/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll --- a/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll +++ b/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll @@ -7,12 +7,12 @@ define void @f(i8* %p, i32 %n, i32 %m) { ; CHECK-LABEL: f: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: add w8, w2, #1 // =1 +; CHECK-NEXT: add w8, w2, #1 ; CHECK-NEXT: .LBB0_1: // %loop ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: strb wzr, [x0, w8, sxtw] -; CHECK-NEXT: subs w1, w1, #1 // =1 -; CHECK-NEXT: add w8, w8, #1 // =1 +; CHECK-NEXT: subs w1, w1, #1 +; CHECK-NEXT: add w8, w8, #1 ; CHECK-NEXT: b.ne .LBB0_1 ; CHECK-NEXT: // %bb.2: // %exit ; CHECK-NEXT: ret @@ -34,12 +34,12 @@ define void @f_without_freeze(i8* %p, i32 %n, i32 %m) { ; CHECK-LABEL: f_without_freeze: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: add w8, w2, #1 // =1 +; CHECK-NEXT: add w8, w2, #1 ; CHECK-NEXT: .LBB1_1: // %loop ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: strb wzr, [x0, w8, sxtw] -; CHECK-NEXT: subs w1, w1, #1 // =1 -; CHECK-NEXT: add w8, w8, #1 // =1 +; CHECK-NEXT: subs w1, w1, #1 +; CHECK-NEXT: add w8, w8, #1 ; CHECK-NEXT: b.ne .LBB1_1 ; CHECK-NEXT: // %bb.2: // %exit ; CHECK-NEXT: ret diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll --- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll @@ -18,15 +18,15 @@ define void @test_lsr_pre_inc_offset_check(%"Type"* %p) { ; CHECK-LABEL: test_lsr_pre_inc_offset_check: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: add x8, x0, #340 // =340 +; CHECK-NEXT: add x8, x0, #340 ; CHECK-NEXT: mov w9, #165 ; CHECK-NEXT: mov w10, #2 ; CHECK-NEXT: .LBB0_1: // %main ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: stur wzr, [x8, #-1] ; CHECK-NEXT: strb w10, [x8] -; CHECK-NEXT: subs x9, x9, #1 // =1 -; CHECK-NEXT: add x8, x8, #338 // =338 +; CHECK-NEXT: subs x9, x9, #1 +; CHECK-NEXT: add x8, x8, #338 ; CHECK-NEXT: b.ne .LBB0_1 ; CHECK-NEXT: // %bb.2: // %exit ; CHECK-NEXT: ret diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll --- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll @@ -20,7 +20,7 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cbz x1, .LBB0_4 ; CHECK-NEXT: // %bb.1: // %for.body.preheader -; CHECK-NEXT: add x8, x0, #28 // =28 +; CHECK-NEXT: add x8, x0, #28 ; CHECK-NEXT: .LBB0_2: // %for.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr s1, [x8, x1, lsl #2] @@ -28,7 +28,7 @@ ; CHECK-NEXT: b.gt .LBB0_5 ; CHECK-NEXT: // %bb.3: // %for.cond ; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: add x1, x1, #1 // =1 +; CHECK-NEXT: add x1, x1, #1 ; CHECK-NEXT: cbnz x1, .LBB0_2 ; CHECK-NEXT: .LBB0_4: ; CHECK-NEXT: fmov s0, #-7.00000000 @@ -65,7 +65,7 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cbz x1, .LBB1_4 ; CHECK-NEXT: // %bb.1: // %for.body.preheader -; CHECK-NEXT: add x8, x0, #28 // =28 +; CHECK-NEXT: add x8, x0, #28 ; CHECK-NEXT: .LBB1_2: // %for.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr s1, [x8, x1, lsl #2] @@ -75,7 +75,7 @@ ; CHECK-NEXT: b.gt .LBB1_5 ; CHECK-NEXT: // %bb.3: // %for.cond ; CHECK-NEXT: // in Loop: Header=BB1_2 Depth=1 -; CHECK-NEXT: add x1, x1, #1 // =1 +; CHECK-NEXT: add x1, x1, #1 ; CHECK-NEXT: cbnz x1, .LBB1_2 ; CHECK-NEXT: .LBB1_4: ; CHECK-NEXT: fmov s0, #-7.00000000 diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected @@ -64,9 +64,9 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" } ; CHECK-LABEL: check_boundaries: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: add x29, sp, #32 // =32 +; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 @@ -94,9 +94,9 @@ ; ; CHECK-LABEL: main: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: add x29, sp, #32 // =32 +; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 @@ -128,5 +128,5 @@ ; CHECK-LABEL: OUTLINED_FUNCTION_1: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w0, wzr -; CHECK-NEXT: add sp, sp, #48 // =48 +; CHECK-NEXT: add sp, sp, #48 ; CHECK-NEXT: ret diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected @@ -5,9 +5,9 @@ define dso_local i32 @check_boundaries() #0 { ; CHECK-LABEL: check_boundaries: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: add x29, sp, #32 // =32 +; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 @@ -71,9 +71,9 @@ define dso_local i32 @main() #0 { ; CHECK-LABEL: main: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #48 // =48 +; CHECK-NEXT: sub sp, sp, #48 ; CHECK-NEXT: stp x29, x30, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: add x29, sp, #32 // =32 +; CHECK-NEXT: add x29, sp, #32 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 diff --git a/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s b/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s --- a/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s +++ b/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s @@ -3,7 +3,7 @@ ## Use '|' to show where the tabs line up. # CHECK:0000000000000000 <$x.0>: -# CHECK-NEXT: 0: 62 10 00 91 |add|x2, x3, #4 // =4 +# CHECK-NEXT: 0: 62 10 00 91 |add|x2, x3, #4{{$}} # CHECK-NEXT: 4: 1f 20 03 d5 |nop # CHECK-EMPTY: # CHECK-NEXT:0000000000000008 <$d.1>: