diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -985,6 +985,7 @@ TII->get(Opcode), MI.getDebugLoc(), /*NoImplicit=*/true); MBB.insert(MBBI, NewMI); MachineInstrBuilder MIB1(MF, NewMI); + MIB1->setPCSections(MF, MI.getPCSections()); MIB1.addReg(MI.getOperand(0).getReg(), RegState::Define) .add(MI.getOperand(1)) .add(MI.getOperand(2)) diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -2341,10 +2341,10 @@ } case TargetOpcode::G_FENCE: { if (I.getOperand(1).getImm() == 0) - BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CompilerBarrier)) + BuildMI(MBB, I, MIMetadata(I), TII.get(AArch64::CompilerBarrier)) .addImm(I.getOperand(0).getImm()); else - BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::DMB)) + BuildMI(MBB, I, MIMetadata(I), TII.get(AArch64::DMB)) .addImm(I.getOperand(0).getImm() == 4 ? 0x9 : 0xb); I.eraseFromParent(); return true; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll @@ -0,0 +1,1330 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -global-isel -global-isel-abort=1 -stop-after=aarch64-expand-pseudo -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-NOLSE +; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -global-isel -global-isel-abort=1 -stop-after=aarch64-expand-pseudo -mattr=+rcpc,+ldapr -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-LDAPR + +define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) { + ; CHECK-LABEL: name: val_compare_and_swap + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w9 = STXRW renamable $w2, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1 + ; CHECK-NEXT: B %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3.cmpxchg.nostore: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire, !pcsections !0 + %val = extractvalue { i32, i1 } %pair, 0 + ret i32 %val +} + +define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) { + ; CHECK-LABEL: name: val_compare_and_swap_from_load + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def $x9, pcsections !0 :: (load (s32) from %ir.pnew) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0, $x9 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $w1, $x0, $x8, $x9 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w10 = STXRW renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1 + ; CHECK-NEXT: B %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3.cmpxchg.nostore: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %new = load i32, i32* %pnew, !pcsections !0 + %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire, !pcsections !0 + %val = extractvalue { i32, i1 } %pair, 0 + ret i32 %val +} + +define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) { + ; CHECK-LABEL: name: val_compare_and_swap_rel + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w9 = STLXRW renamable $w2, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1 + ; CHECK-NEXT: B %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3.cmpxchg.nostore: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic, !pcsections !0 + %val = extractvalue { i32, i1 } %pair, 0 + ret i32 %val +} + +define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) { + ; CHECK-LABEL: name: val_compare_and_swap_64 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = LDXRX renamable $x0, pcsections !0 :: (volatile load (s64) from %ir.p) + ; CHECK-NEXT: $xzr = SUBSXrs renamable $x8, renamable $x1, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w9 = STXRX renamable $x2, renamable $x0, pcsections !0 :: (volatile store (s64) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1 + ; CHECK-NEXT: B %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3.cmpxchg.nostore: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x0 = ORRXrs $xzr, killed $x8, 0 + ; CHECK-NEXT: RET undef $lr, implicit $x0 + %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic, !pcsections !0 + %val = extractvalue { i64, i1 } %pair, 0 + ret i64 %val +} + +define i64 @val_compare_and_swap_64_monotonic_seqcst(i64* %p, i64 %cmp, i64 %new) { + ; CHECK-LABEL: name: val_compare_and_swap_64_monotonic_seqcst + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = LDAXRX renamable $x0, pcsections !0 :: (volatile load (s64) from %ir.p) + ; CHECK-NEXT: $xzr = SUBSXrs renamable $x8, renamable $x1, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w9 = STLXRX renamable $x2, renamable $x0, pcsections !0 :: (volatile store (s64) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1 + ; CHECK-NEXT: B %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3.cmpxchg.nostore: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x0 = ORRXrs $xzr, killed $x8, 0 + ; CHECK-NEXT: RET undef $lr, implicit $x0 + %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic seq_cst, !pcsections !0 + %val = extractvalue { i64, i1 } %pair, 0 + ret i64 %val +} + +define i64 @val_compare_and_swap_64_release_acquire(i64* %p, i64 %cmp, i64 %new) { + ; CHECK-LABEL: name: val_compare_and_swap_64_release_acquire + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = LDAXRX renamable $x0, pcsections !0 :: (volatile load (s64) from %ir.p) + ; CHECK-NEXT: $xzr = SUBSXrs renamable $x8, renamable $x1, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $x0, $x1, $x2, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w9 = STLXRX renamable $x2, renamable $x0, pcsections !0 :: (volatile store (s64) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1 + ; CHECK-NEXT: B %bb.4 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3.cmpxchg.nostore: + ; CHECK-NEXT: successors: %bb.4(0x80000000) + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x0 = ORRXrs $xzr, killed $x8, 0 + ; CHECK-NEXT: RET undef $lr, implicit $x0 + %pair = cmpxchg i64* %p, i64 %cmp, i64 %new release acquire, !pcsections !0 + %val = extractvalue { i64, i1 } %pair, 0 + ret i64 %val +} + +define i32 @fetch_and_nand(i32* %p) { + ; CHECK-LABEL: name: fetch_and_nand + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w8, 2, pcsections !0 + ; CHECK-NEXT: $w9 = ORNWrs $wzr, killed renamable $w9, 0, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRW killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %val = atomicrmw nand i32* %p, i32 7 release, !pcsections !0 + ret i32 %val +} + +define i64 @fetch_and_nand_64(i64* %p) { + ; CHECK-LABEL: name: fetch_and_nand_64 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = LDAXRX renamable $x0, pcsections !0 :: (volatile load (s64) from %ir.p) + ; CHECK-NEXT: renamable $x9 = ANDXri renamable $x8, 4098, pcsections !0 + ; CHECK-NEXT: $x9 = ORNXrs $xzr, killed renamable $x9, 0, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRX killed renamable $x9, renamable $x0, pcsections !0 :: (volatile store (s64) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x0 = ORRXrs $xzr, killed $x8, 0 + ; CHECK-NEXT: RET undef $lr, implicit $x0 + %val = atomicrmw nand i64* %p, i64 7 acq_rel, !pcsections !0 + ret i64 %val +} + +define i32 @fetch_and_or(i32* %p) { + ; CHECK-LABEL: name: fetch_and_or + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w9 = MOVZWi 5, 0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w9, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: $w10 = ORRWrs renamable $w8, renamable $w9, 0, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w11 = STLXRW killed renamable $w10, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %val = atomicrmw or i32* %p, i32 5 seq_cst, !pcsections !0 + ret i32 %val +} + +define i64 @fetch_and_or_64(i64* %p) { + ; CHECK-LABEL: name: fetch_and_or_64 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = LDXRX renamable $x0, pcsections !0 :: (volatile load (s64) from %ir.p) + ; CHECK-NEXT: renamable $x9 = ORRXri renamable $x8, 4098, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STXRX killed renamable $x9, renamable $x0, pcsections !0 :: (volatile store (s64) into %ir.p) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x0 = ORRXrs $xzr, killed $x8, 0 + ; CHECK-NEXT: RET undef $lr, implicit $x0 + %val = atomicrmw or i64* %p, i64 7 monotonic, !pcsections !0 + ret i64 %val +} + +define void @acquire_fence() { + ; CHECK-LABEL: name: acquire_fence + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: DMB 9, pcsections !0 + ; CHECK-NEXT: RET undef $lr + fence acquire, !pcsections !0 + ret void +} + +define void @release_fence() { + ; CHECK-LABEL: name: release_fence + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: DMB 11, pcsections !0 + ; CHECK-NEXT: RET undef $lr + fence release, !pcsections !0 + ret void +} + +define void @seq_cst_fence() { + ; CHECK-LABEL: name: seq_cst_fence + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: DMB 11, pcsections !0 + ; CHECK-NEXT: RET undef $lr + fence seq_cst, !pcsections !0 + ret void +} + +define i32 @atomic_load(i32* %p) { + ; CHECK-LABEL: name: atomic_load + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w0 = LDARW killed renamable $x0, pcsections !0 :: (load seq_cst (s32) from %ir.p) + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %r = load atomic i32, i32* %p seq_cst, align 4, !pcsections !0 + ret i32 %r +} + +define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) { + ; CHECK-LABEL: name: atomic_load_relaxed_8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: renamable $w9 = LDRBBui renamable $x0, 4095, pcsections !0 :: (load monotonic (s8) from %ir.ptr_unsigned) + ; CHECK-NEXT: renamable $w10 = LDRBBroW renamable $x0, killed renamable $w1, 1, 0, pcsections !0 :: (load unordered (s8) from %ir.ptr_regoff) + ; CHECK-NEXT: renamable $w11 = LDURBBi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s8) from %ir.ptr_unscaled) + ; CHECK-NEXT: renamable $w8 = LDRBBui killed renamable $x8, 0, pcsections !0 :: (load unordered (s8) from %ir.ptr_random) + ; CHECK-NEXT: $w9 = ADDWrs killed renamable $w9, killed renamable $w10, 0, pcsections !0 + ; CHECK-NEXT: $w9 = ADDWrs killed renamable $w9, killed renamable $w11, 0, pcsections !0 + ; CHECK-NEXT: $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %ptr_unsigned = getelementptr i8, i8* %p, i32 4095 + %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1, !pcsections !0 + + %ptr_regoff = getelementptr i8, i8* %p, i32 %off32 + %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1, !pcsections !0 + %tot1 = add i8 %val_unsigned, %val_regoff, !pcsections !0 + + %ptr_unscaled = getelementptr i8, i8* %p, i32 -256 + %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1, !pcsections !0 + %tot2 = add i8 %tot1, %val_unscaled, !pcsections !0 + + %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) + %val_random = load atomic i8, i8* %ptr_random unordered, align 1, !pcsections !0 + %tot3 = add i8 %tot2, %val_random, !pcsections !0 + + ret i8 %tot3 +} + +define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) { + ; CHECK-LABEL: name: atomic_load_relaxed_16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: renamable $w9 = LDRHHui renamable $x0, 4095, pcsections !0 :: (load monotonic (s16) from %ir.ptr_unsigned) + ; CHECK-NEXT: renamable $w10 = LDRHHroW renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (load unordered (s16) from %ir.ptr_regoff) + ; CHECK-NEXT: renamable $w11 = LDURHHi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s16) from %ir.ptr_unscaled) + ; CHECK-NEXT: renamable $w8 = LDRHHui killed renamable $x8, 0, pcsections !0 :: (load unordered (s16) from %ir.ptr_random) + ; CHECK-NEXT: $w9 = ADDWrs killed renamable $w9, killed renamable $w10, 0, pcsections !0 + ; CHECK-NEXT: $w9 = ADDWrs killed renamable $w9, killed renamable $w11, 0, pcsections !0 + ; CHECK-NEXT: $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %ptr_unsigned = getelementptr i16, i16* %p, i32 4095 + %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2, !pcsections !0 + + %ptr_regoff = getelementptr i16, i16* %p, i32 %off32 + %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2, !pcsections !0 + %tot1 = add i16 %val_unsigned, %val_regoff, !pcsections !0 + + %ptr_unscaled = getelementptr i16, i16* %p, i32 -128 + %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2, !pcsections !0 + %tot2 = add i16 %tot1, %val_unscaled, !pcsections !0 + + %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) + %val_random = load atomic i16, i16* %ptr_random unordered, align 2, !pcsections !0 + %tot3 = add i16 %tot2, %val_random, !pcsections !0 + + ret i16 %tot3 +} + +define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) { + ; CHECK-LABEL: name: atomic_load_relaxed_32 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: renamable $w9 = LDRWui renamable $x0, 4095, pcsections !0 :: (load monotonic (s32) from %ir.ptr_unsigned) + ; CHECK-NEXT: renamable $w10 = LDRWroW renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (load unordered (s32) from %ir.ptr_regoff) + ; CHECK-NEXT: renamable $w11 = LDURWi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s32) from %ir.ptr_unscaled) + ; CHECK-NEXT: renamable $w8 = LDRWui killed renamable $x8, 0, pcsections !0 :: (load unordered (s32) from %ir.ptr_random) + ; CHECK-NEXT: $w9 = ADDWrs killed renamable $w9, killed renamable $w10, 0, pcsections !0 + ; CHECK-NEXT: $w9 = ADDWrs killed renamable $w9, killed renamable $w11, 0, pcsections !0 + ; CHECK-NEXT: $w0 = ADDWrs killed renamable $w9, killed renamable $w8, 0, pcsections !0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %ptr_unsigned = getelementptr i32, i32* %p, i32 4095 + %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4, !pcsections !0 + + %ptr_regoff = getelementptr i32, i32* %p, i32 %off32 + %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4, !pcsections !0 + %tot1 = add i32 %val_unsigned, %val_regoff, !pcsections !0 + + %ptr_unscaled = getelementptr i32, i32* %p, i32 -64 + %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4, !pcsections !0 + %tot2 = add i32 %tot1, %val_unscaled, !pcsections !0 + + %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) + %val_random = load atomic i32, i32* %ptr_random unordered, align 4, !pcsections !0 + %tot3 = add i32 %tot2, %val_random, !pcsections !0 + + ret i32 %tot3 +} + +define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) { + ; CHECK-LABEL: name: atomic_load_relaxed_64 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: renamable $x9 = LDRXui renamable $x0, 4095, pcsections !0 :: (load monotonic (s64) from %ir.ptr_unsigned) + ; CHECK-NEXT: renamable $x10 = LDRXroW renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (load unordered (s64) from %ir.ptr_regoff) + ; CHECK-NEXT: renamable $x11 = LDURXi killed renamable $x0, -256, pcsections !0 :: (load monotonic (s64) from %ir.ptr_unscaled) + ; CHECK-NEXT: renamable $x8 = LDRXui killed renamable $x8, 0, pcsections !0 :: (load unordered (s64) from %ir.ptr_random) + ; CHECK-NEXT: $x9 = ADDXrs killed renamable $x9, killed renamable $x10, 0, pcsections !0 + ; CHECK-NEXT: $x9 = ADDXrs killed renamable $x9, killed renamable $x11, 0, pcsections !0 + ; CHECK-NEXT: $x0 = ADDXrs killed renamable $x9, killed renamable $x8, 0, pcsections !0 + ; CHECK-NEXT: RET undef $lr, implicit $x0 + %ptr_unsigned = getelementptr i64, i64* %p, i32 4095 + %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8, !pcsections !0 + + %ptr_regoff = getelementptr i64, i64* %p, i32 %off32 + %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8, !pcsections !0 + %tot1 = add i64 %val_unsigned, %val_regoff, !pcsections !0 + + %ptr_unscaled = getelementptr i64, i64* %p, i32 -32 + %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8, !pcsections !0 + %tot2 = add i64 %tot1, %val_unscaled, !pcsections !0 + + %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) + %val_random = load atomic i64, i64* %ptr_random unordered, align 8, !pcsections !0 + %tot3 = add i64 %tot2, %val_random, !pcsections !0 + + ret i64 %tot3 +} + + +define void @atomc_store(i32* %p) { + ; CHECK-LABEL: name: atomc_store + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = MOVZWi 4, 0 + ; CHECK-NEXT: STLRW killed renamable $w8, killed renamable $x0, pcsections !0 :: (store seq_cst (s32) into %ir.p) + ; CHECK-NEXT: RET undef $lr + store atomic i32 4, i32* %p seq_cst, align 4, !pcsections !0 + ret void +} + +define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) { + ; CHECK-LABEL: name: atomic_store_relaxed_8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: STRBBui renamable $w2, renamable $x0, 4095, pcsections !0 :: (store monotonic (s8) into %ir.ptr_unsigned) + ; CHECK-NEXT: STRBBroW renamable $w2, renamable $x0, killed renamable $w1, 1, 0, pcsections !0 :: (store unordered (s8) into %ir.ptr_regoff) + ; CHECK-NEXT: STURBBi renamable $w2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s8) into %ir.ptr_unscaled) + ; CHECK-NEXT: STRBBui killed renamable $w2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s8) into %ir.ptr_random) + ; CHECK-NEXT: RET undef $lr + %ptr_unsigned = getelementptr i8, i8* %p, i32 4095 + store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1, !pcsections !0 + + %ptr_regoff = getelementptr i8, i8* %p, i32 %off32 + store atomic i8 %val, i8* %ptr_regoff unordered, align 1, !pcsections !0 + + %ptr_unscaled = getelementptr i8, i8* %p, i32 -256 + store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1, !pcsections !0 + + %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) + store atomic i8 %val, i8* %ptr_random unordered, align 1, !pcsections !0 + + ret void +} + +define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) { + ; CHECK-LABEL: name: atomic_store_relaxed_16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: STRHHui renamable $w2, renamable $x0, 4095, pcsections !0 :: (store monotonic (s16) into %ir.ptr_unsigned) + ; CHECK-NEXT: STRHHroW renamable $w2, renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (store unordered (s16) into %ir.ptr_regoff) + ; CHECK-NEXT: STURHHi renamable $w2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s16) into %ir.ptr_unscaled) + ; CHECK-NEXT: STRHHui killed renamable $w2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s16) into %ir.ptr_random) + ; CHECK-NEXT: RET undef $lr + %ptr_unsigned = getelementptr i16, i16* %p, i32 4095 + store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2, !pcsections !0 + + %ptr_regoff = getelementptr i16, i16* %p, i32 %off32 + store atomic i16 %val, i16* %ptr_regoff unordered, align 2, !pcsections !0 + + %ptr_unscaled = getelementptr i16, i16* %p, i32 -128 + store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2, !pcsections !0 + + %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) + store atomic i16 %val, i16* %ptr_random unordered, align 2, !pcsections !0 + + ret void +} + +define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) { + ; CHECK-LABEL: name: atomic_store_relaxed_32 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: STRWui renamable $w2, renamable $x0, 4095, pcsections !0 :: (store monotonic (s32) into %ir.ptr_unsigned) + ; CHECK-NEXT: STRWroW renamable $w2, renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (store unordered (s32) into %ir.ptr_regoff) + ; CHECK-NEXT: STURWi renamable $w2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s32) into %ir.ptr_unscaled) + ; CHECK-NEXT: STRWui killed renamable $w2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s32) into %ir.ptr_random) + ; CHECK-NEXT: RET undef $lr + %ptr_unsigned = getelementptr i32, i32* %p, i32 4095 + store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4, !pcsections !0 + + %ptr_regoff = getelementptr i32, i32* %p, i32 %off32 + store atomic i32 %val, i32* %ptr_regoff unordered, align 4, !pcsections !0 + + %ptr_unscaled = getelementptr i32, i32* %p, i32 -64 + store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4, !pcsections !0 + + %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) + store atomic i32 %val, i32* %ptr_random unordered, align 4, !pcsections !0 + + ret void +} + +define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) { + ; CHECK-LABEL: name: atomic_store_relaxed_64 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w1, $x0, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x8 = ADDXri renamable $x0, 291, 12 + ; CHECK-NEXT: STRXui renamable $x2, renamable $x0, 4095, pcsections !0 :: (store monotonic (s64) into %ir.ptr_unsigned) + ; CHECK-NEXT: STRXroW renamable $x2, renamable $x0, killed renamable $w1, 1, 1, pcsections !0 :: (store unordered (s64) into %ir.ptr_regoff) + ; CHECK-NEXT: STURXi renamable $x2, killed renamable $x0, -256, pcsections !0 :: (store monotonic (s64) into %ir.ptr_unscaled) + ; CHECK-NEXT: STRXui killed renamable $x2, killed renamable $x8, 0, pcsections !0 :: (store unordered (s64) into %ir.ptr_random) + ; CHECK-NEXT: RET undef $lr + %ptr_unsigned = getelementptr i64, i64* %p, i32 4095 + store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8, !pcsections !0 + + %ptr_regoff = getelementptr i64, i64* %p, i32 %off32 + store atomic i64 %val, i64* %ptr_regoff unordered, align 8, !pcsections !0 + + %ptr_unscaled = getelementptr i64, i64* %p, i32 -32 + store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8, !pcsections !0 + + %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) + store atomic i64 %val, i64* %ptr_random unordered, align 8, !pcsections !0 + + ret void +} + +define i32 @load_zext(i8* %p8, i16* %p16) { + ; CHECK-NOLSE-LABEL: name: load_zext + ; CHECK-NOLSE: bb.0 (%ir-block.0): + ; CHECK-NOLSE-NEXT: liveins: $x0, $x1 + ; CHECK-NOLSE-NEXT: {{ $}} + ; CHECK-NOLSE-NEXT: renamable $w8 = LDARB killed renamable $x0, pcsections !0 :: (load acquire (s8) from %ir.p8) + ; CHECK-NOLSE-NEXT: renamable $w9 = LDRHHui killed renamable $x1, 0, pcsections !0 :: (load unordered (s16) from %ir.p16) + ; CHECK-NOLSE-NEXT: renamable $w0 = ADDWrx killed renamable $w9, killed renamable $w8, 0, pcsections !0 + ; CHECK-NOLSE-NEXT: RET undef $lr, implicit $w0 + ; CHECK-LDAPR-LABEL: name: load_zext + ; CHECK-LDAPR: bb.0 (%ir-block.0): + ; CHECK-LDAPR-NEXT: liveins: $x0, $x1 + ; CHECK-LDAPR-NEXT: {{ $}} + ; CHECK-LDAPR-NEXT: renamable $w8 = LDAPRB killed renamable $x0, pcsections !0 :: (load acquire (s8) from %ir.p8) + ; CHECK-LDAPR-NEXT: renamable $w9 = LDRHHui killed renamable $x1, 0, pcsections !0 :: (load unordered (s16) from %ir.p16) + ; CHECK-LDAPR-NEXT: renamable $w0 = ADDWrx killed renamable $w9, killed renamable $w8, 0, pcsections !0 + ; CHECK-LDAPR-NEXT: RET undef $lr, implicit $w0 + %val1.8 = load atomic i8, i8* %p8 acquire, align 1, !pcsections !0 + %val1 = zext i8 %val1.8 to i32 + + %val2.16 = load atomic i16, i16* %p16 unordered, align 2, !pcsections !0 + %val2 = zext i16 %val2.16 to i32 + + %res = add i32 %val1, %val2, !pcsections !0 + ret i32 %res +} + +define { i32, i64 } @load_acq(i32* %p32, i64* %p64) { + ; CHECK-NOLSE-LABEL: name: load_acq + ; CHECK-NOLSE: bb.0 (%ir-block.0): + ; CHECK-NOLSE-NEXT: liveins: $x0, $x1 + ; CHECK-NOLSE-NEXT: {{ $}} + ; CHECK-NOLSE-NEXT: renamable $w0 = LDARW killed renamable $x0, pcsections !0 :: (load seq_cst (s32) from %ir.p32) + ; CHECK-NOLSE-NEXT: renamable $x1 = LDARX killed renamable $x1, pcsections !0 :: (load acquire (s64) from %ir.p64) + ; CHECK-NOLSE-NEXT: RET undef $lr, implicit $w0, implicit $x1 + ; CHECK-LDAPR-LABEL: name: load_acq + ; CHECK-LDAPR: bb.0 (%ir-block.0): + ; CHECK-LDAPR-NEXT: liveins: $x0, $x1 + ; CHECK-LDAPR-NEXT: {{ $}} + ; CHECK-LDAPR-NEXT: renamable $w0 = LDARW killed renamable $x0, pcsections !0 :: (load seq_cst (s32) from %ir.p32) + ; CHECK-LDAPR-NEXT: renamable $x1 = LDAPRX killed renamable $x1, pcsections !0 :: (load acquire (s64) from %ir.p64) + ; CHECK-LDAPR-NEXT: RET undef $lr, implicit $w0, implicit $x1 + %val32 = load atomic i32, i32* %p32 seq_cst, align 4, !pcsections !0 + %tmp = insertvalue { i32, i64 } undef, i32 %val32, 0 + + %val64 = load atomic i64, i64* %p64 acquire, align 8, !pcsections !0 + %res = insertvalue { i32, i64 } %tmp, i64 %val64, 1 + + ret { i32, i64 } %res +} + +define i32 @load_sext(i8* %p8, i16* %p16) { + ; CHECK-NOLSE-LABEL: name: load_sext + ; CHECK-NOLSE: bb.0 (%ir-block.0): + ; CHECK-NOLSE-NEXT: liveins: $x0, $x1 + ; CHECK-NOLSE-NEXT: {{ $}} + ; CHECK-NOLSE-NEXT: renamable $w8 = LDARB killed renamable $x0, pcsections !0 :: (load acquire (s8) from %ir.p8) + ; CHECK-NOLSE-NEXT: renamable $w9 = LDRHHui killed renamable $x1, 0, pcsections !0 :: (load unordered (s16) from %ir.p16) + ; CHECK-NOLSE-NEXT: renamable $w9 = SBFMWri killed renamable $w9, 0, 15 + ; CHECK-NOLSE-NEXT: renamable $w0 = ADDWrx killed renamable $w9, killed renamable $w8, 32, pcsections !0 + ; CHECK-NOLSE-NEXT: RET undef $lr, implicit $w0 + ; CHECK-LDAPR-LABEL: name: load_sext + ; CHECK-LDAPR: bb.0 (%ir-block.0): + ; CHECK-LDAPR-NEXT: liveins: $x0, $x1 + ; CHECK-LDAPR-NEXT: {{ $}} + ; CHECK-LDAPR-NEXT: renamable $w8 = LDAPRB killed renamable $x0, pcsections !0 :: (load acquire (s8) from %ir.p8) + ; CHECK-LDAPR-NEXT: renamable $w9 = LDRHHui killed renamable $x1, 0, pcsections !0 :: (load unordered (s16) from %ir.p16) + ; CHECK-LDAPR-NEXT: renamable $w9 = SBFMWri killed renamable $w9, 0, 15 + ; CHECK-LDAPR-NEXT: renamable $w0 = ADDWrx killed renamable $w9, killed renamable $w8, 32, pcsections !0 + ; CHECK-LDAPR-NEXT: RET undef $lr, implicit $w0 + %val1.8 = load atomic i8, i8* %p8 acquire, align 1, !pcsections !0 + %val1 = sext i8 %val1.8 to i32 + + %val2.16 = load atomic i16, i16* %p16 unordered, align 2, !pcsections !0 + %val2 = sext i16 %val2.16 to i32 + + %res = add i32 %val1, %val2, !pcsections !0 + ret i32 %res +} + +define void @store_trunc(i32 %val, i8* %p8, i16* %p16) { + ; CHECK-LABEL: name: store_trunc + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: liveins: $w0, $x1, $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: STLRB renamable $w0, killed renamable $x1, pcsections !0 :: (store seq_cst (s8) into %ir.p8) + ; CHECK-NEXT: STRHHui killed renamable $w0, killed renamable $x2, 0, pcsections !0 :: (store monotonic (s16) into %ir.p16) + ; CHECK-NEXT: RET undef $lr + %val8 = trunc i32 %val to i8 + store atomic i8 %val8, i8* %p8 seq_cst, align 1, !pcsections !0 + + %val16 = trunc i32 %val to i16 + store atomic i16 %val16, i16* %p16 monotonic, align 2, !pcsections !0 + + ret void +} + +define i8 @atomicrmw_add_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_add_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw add i8* %ptr, i8 %rhs seq_cst, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_xchg_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_xchg_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w1 = KILL $w1, implicit-def $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: early-clobber renamable $w9 = STXRB renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw xchg i8* %ptr, i8 %rhs monotonic, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_sub_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_sub_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STXRB renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw sub i8* %ptr, i8 %rhs acquire, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_and_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_and_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw and i8* %ptr, i8 %rhs release, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_or_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_or_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw or i8* %ptr, i8 %rhs seq_cst, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_xor_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_xor_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STXRB renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw xor i8* %ptr, i8 %rhs monotonic, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_min_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0 + ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 13, implicit killed $nzcv, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STXRB renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw min i8* %ptr, i8 %rhs acquire, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_max_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0 + ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw max i8* %ptr, i8 %rhs release, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_umin_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w9 = ANDWri killed renamable $w1, 7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w9, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w10 = ANDWri renamable $w8, 7 + ; CHECK-NEXT: $wzr = SUBSWrs renamable $w10, renamable $w9, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr killed renamable $w10, renamable $w9, 9, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w11 = STLXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw umin i8* %ptr, i8 %rhs seq_cst, !pcsections !0 + ret i8 %res +} + +define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) { + ; CHECK-LABEL: name: atomicrmw_umax_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w9 = ANDWri killed renamable $w1, 7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w9, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w10 = ANDWri renamable $w8, 7 + ; CHECK-NEXT: $wzr = SUBSWrs renamable $w10, renamable $w9, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr killed renamable $w10, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w11 = STXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw umax i8* %ptr, i8 %rhs monotonic, !pcsections !0 + ret i8 %res +} + +define i16 @atomicrmw_add_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_add_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw add i16* %ptr, i16 %rhs seq_cst, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_xchg_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_xchg_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w1 = KILL $w1, implicit-def $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $x0, $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: early-clobber renamable $w9 = STXRH renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw xchg i16* %ptr, i16 %rhs monotonic, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_sub_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_sub_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STXRH renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw sub i16* %ptr, i16 %rhs acquire, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_and_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_and_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw and i16* %ptr, i16 %rhs release, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_or_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_or_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw or i16* %ptr, i16 %rhs seq_cst, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_xor_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_xor_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STXRH renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw xor i16* %ptr, i16 %rhs monotonic, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_min_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0 + ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 13, implicit killed $nzcv, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STXRH renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw min i16* %ptr, i16 %rhs acquire, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_max_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0 + ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, implicit-def $x9, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH renamable $w9, renamable $x0, implicit killed $x9, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw max i16* %ptr, i16 %rhs release, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_umin_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w9 = ANDWri killed renamable $w1, 15 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w9, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w10 = ANDWri renamable $w8, 15 + ; CHECK-NEXT: $wzr = SUBSWrs renamable $w10, renamable $w9, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr killed renamable $w10, renamable $w9, 9, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w11 = STLXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw umin i16* %ptr, i16 %rhs seq_cst, !pcsections !0 + ret i16 %res +} + +define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) { + ; CHECK-LABEL: name: atomicrmw_umax_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w9 = ANDWri killed renamable $w1, 15 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.atomicrmw.start: + ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) + ; CHECK-NEXT: liveins: $w9, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w10 = ANDWri renamable $w8, 15 + ; CHECK-NEXT: $wzr = SUBSWrs renamable $w10, renamable $w9, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr killed renamable $w10, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: early-clobber renamable $w11 = STXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.atomicrmw.end: + ; CHECK-NEXT: liveins: $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w0 = ORRWrs $wzr, $w8, 0, implicit killed $x8 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + %res = atomicrmw umax i16* %ptr, i16 %rhs monotonic, !pcsections !0 + ret i16 %res +} + +define { i8, i1 } @cmpxchg_i8(i8* %ptr, i8 %desired, i8 %new) { + ; CHECK-LABEL: name: cmpxchg_i8 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x8 = ORRXrs $xzr, $x0, 0 + ; CHECK-NEXT: renamable $w2 = KILL $w2, implicit-def $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x2, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 7, pcsections !0 + ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 0, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.3(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $w1, $x0, $x2, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w9 = STXRB renamable $w2, renamable $x8, pcsections !0 :: (volatile store (s8) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w1 = MOVZWi 1, 0 + ; CHECK-NEXT: $w0 = KILL renamable $w0, implicit killed $x0 + ; CHECK-NEXT: RET undef $lr, implicit $w0, implicit $w1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.nostore: + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w1 = ORRWrs $wzr, $wzr, 0 + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: $w0 = KILL renamable $w0, implicit killed $x0 + ; CHECK-NEXT: RET undef $lr, implicit $w0, implicit $w1 + %res = cmpxchg i8* %ptr, i8 %desired, i8 %new monotonic monotonic, !pcsections !0 + ret { i8, i1 } %res +} + +define { i16, i1 } @cmpxchg_i16(i16* %ptr, i16 %desired, i16 %new) { + ; CHECK-LABEL: name: cmpxchg_i16 + ; CHECK: bb.0 (%ir-block.0): + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: liveins: $w1, $w2, $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x8 = ORRXrs $xzr, $x0, 0 + ; CHECK-NEXT: renamable $w2 = KILL $w2, implicit-def $x2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1.cmpxchg.start: + ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000) + ; CHECK-NEXT: liveins: $w1, $x2, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 15, pcsections !0 + ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 8, implicit-def $nzcv, pcsections !0 + ; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2.cmpxchg.trystore: + ; CHECK-NEXT: successors: %bb.3(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: liveins: $w1, $x0, $x2, $x8 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: early-clobber renamable $w9 = STXRH renamable $w2, renamable $x8, pcsections !0 :: (volatile store (s16) into %ir.ptr) + ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $w1 = MOVZWi 1, 0 + ; CHECK-NEXT: $w0 = KILL renamable $w0, implicit killed $x0 + ; CHECK-NEXT: RET undef $lr, implicit $w0, implicit $w1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.4.cmpxchg.nostore: + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $w1 = ORRWrs $wzr, $wzr, 0 + ; CHECK-NEXT: CLREX 15, pcsections !0 + ; CHECK-NEXT: $w0 = KILL renamable $w0, implicit killed $x0 + ; CHECK-NEXT: RET undef $lr, implicit $w0, implicit $w1 + %res = cmpxchg i16* %ptr, i16 %desired, i16 %new monotonic monotonic, !pcsections !0 + ret { i16, i1 } %res +} + +!0 = !{!"foo"} diff --git a/llvm/test/CodeGen/AArch64/pcsections.ll b/llvm/test/CodeGen/AArch64/pcsections.ll --- a/llvm/test/CodeGen/AArch64/pcsections.ll +++ b/llvm/test/CodeGen/AArch64/pcsections.ll @@ -81,7 +81,7 @@ ; CHECK-UNOPT-NEXT: stlxr ; CHECK-UNOPT: .Lpcsection9: ; CHECK-UNOPT-NEXT: cbnz -; CHECK-UNOPT: .Lpcsection12: +; CHECK-UNOPT: .Lpcsection13: ; CHECK-UNOPT-NEXT: b ; --- ; CHECK-NOT: .Lpcsection @@ -100,7 +100,7 @@ ; CHECK-NEXT: .Lpcsection_base7: ; DEFCM-NEXT: .word .Lpcsection5-.Lpcsection_base7 ; LARGE-NEXT: .xword .Lpcsection5-.Lpcsection_base7 -; CHECK-UNOPT: .word .Lpcsection12-.Lpcsection_base14 +; CHECK-UNOPT: .word .Lpcsection13-.Lpcsection_base15 ; CHECK-NEXT: .text entry: %0 = atomicrmw add i64* @foo, i64 1 monotonic, align 8, !pcsections !0