Index: llvm/lib/Target/M68k/M68kISelLowering.cpp =================================================================== --- llvm/lib/Target/M68k/M68kISelLowering.cpp +++ llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -157,6 +157,29 @@ computeRegisterProperties(STI.getRegisterInfo()); + // We lower the `atomic-compare-and-swap` to `__sync_val_compare_and_swap` + // for subtarget < M68020 + setMaxAtomicSizeInBitsSupported(32); + setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i8, MVT::i16, MVT::i32}, + Subtarget.atLeastM68020() ? Legal : LibCall); + + // M68k does not have native read-modify-write support, so expand all of them + // to `__sync_fetch_*` + setOperationAction( + { + ISD::ATOMIC_LOAD_ADD, + ISD::ATOMIC_LOAD_SUB, + ISD::ATOMIC_LOAD_AND, + ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_XOR, + ISD::ATOMIC_LOAD_NAND, + ISD::ATOMIC_LOAD_MIN, + ISD::ATOMIC_LOAD_MAX, + ISD::ATOMIC_LOAD_UMIN, + ISD::ATOMIC_LOAD_UMAX, + }, + {MVT::i8, MVT::i16, MVT::i32}, LibCall); + // 2^2 bytes // FIXME can it be just 2^1? setMinFunctionAlignment(Align::Constant<2>()); Index: llvm/lib/Target/M68k/M68kInstrAtomics.td =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kInstrAtomics.td @@ -0,0 +1,44 @@ +//===-- M68kInstrAtomics.td - Atomics Instructions ---------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +let Predicates = [AtLeastM68020] in { +class MxCASOp size_encoding, MxType type> + : MxInst<(outs type.ROp:$out), + (ins type.ROp:$dc, type.ROp:$du, !cast("MxARI"#type.Size):$mem), + "cas."#type.Prefix#" $dc, $du, $mem"> { + let Inst = (ascend + (descend 0b00001, size_encoding, 0b011, MxEncAddrMode_j<"mem">.EA), + (descend 0b0000000, (operand "$du", 3), 0b000, (operand "$dc", 3)) + ); + let Constraints = "$out = $dc"; + let mayLoad = 1; + let mayStore = 1; +} + +def CAS8 : MxCASOp<0x1, MxType8d>; +def CAS16 : MxCASOp<0x2, MxType16d>; +def CAS32 : MxCASOp<0x3, MxType32d>; + + +foreach size = [8, 16, 32] in { + def : Pat<(!cast("atomic_load_"#size) MxCP_ARI:$ptr), + (!cast("MOV"#size#"dj") !cast("MxARI"#size):$ptr)>; + + def : Pat<(!cast("atomic_store_"#size) MxCP_ARI:$ptr, + !cast("MxDRD"#size):$val), + (!cast("MOV"#size#"jd") !cast("MxARI"#size):$ptr, + !cast("MxDRD"#size):$val)>; + + def : Pat<(!cast("atomic_cmp_swap_"#size) MxCP_ARI:$ptr, + !cast("MxDRD"#size):$cmp, + !cast("MxDRD"#size):$new), + (!cast("CAS"#size) !cast("MxDRD"#size):$cmp, + !cast("MxDRD"#size):$new, + !cast("MxARI"#size):$ptr)>; +} +} Index: llvm/lib/Target/M68k/M68kInstrInfo.td =================================================================== --- llvm/lib/Target/M68k/M68kInstrInfo.td +++ llvm/lib/Target/M68k/M68kInstrInfo.td @@ -786,5 +786,6 @@ include "M68kInstrBits.td" include "M68kInstrArithmetic.td" include "M68kInstrControl.td" +include "M68kInstrAtomics.td" include "M68kInstrCompiler.td" Index: llvm/lib/Target/M68k/M68kTargetMachine.cpp =================================================================== --- llvm/lib/Target/M68k/M68kTargetMachine.cpp +++ llvm/lib/Target/M68k/M68kTargetMachine.cpp @@ -143,6 +143,7 @@ const M68kSubtarget &getM68kSubtarget() const { return *getM68kTargetMachine().getSubtargetImpl(); } + void addIRPasses() override; bool addIRTranslator() override; bool addLegalizeMachineIR() override; bool addRegBankSelect() override; @@ -157,6 +158,11 @@ return new M68kPassConfig(*this, PM); } +void M68kPassConfig::addIRPasses() { + addPass(createAtomicExpandPass()); + TargetPassConfig::addIRPasses(); +} + bool M68kPassConfig::addInstSelector() { // Install an instruction selector. addPass(createM68kISelDag(getM68kTargetMachine())); Index: llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68000 | FileCheck %s --check-prefix=NO-ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68010 | FileCheck %s --check-prefix=NO-ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68020 | FileCheck %s --check-prefix=ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68030 | FileCheck %s --check-prefix=ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68040 | FileCheck %s --check-prefix=ATOMIC + +define i1 @cmpxchg_i8_monotonic_monotonic(i8 %cmp, i8 %new, ptr %mem) nounwind { +; NO-ATOMIC-LABEL: cmpxchg_i8_monotonic_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #20, %sp +; NO-ATOMIC-NEXT: movem.l %d2, (16,%sp) ; 8-byte Folded Spill +; NO-ATOMIC-NEXT: move.b (31,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #255, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (8,%sp) +; NO-ATOMIC-NEXT: move.b (27,%sp), %d2 +; NO-ATOMIC-NEXT: move.l %d2, %d0 +; NO-ATOMIC-NEXT: and.l #255, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (32,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_val_compare_and_swap_1@PLT +; NO-ATOMIC-NEXT: sub.b %d2, %d0 +; NO-ATOMIC-NEXT: seq %d0 +; NO-ATOMIC-NEXT: movem.l (16,%sp), %d2 ; 8-byte Folded Reload +; NO-ATOMIC-NEXT: adda.l #20, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: cmpxchg_i8_monotonic_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #4, %sp +; ATOMIC-NEXT: movem.l %d2, (0,%sp) ; 8-byte Folded Spill +; ATOMIC-NEXT: move.l (16,%sp), %a0 +; ATOMIC-NEXT: move.b (15,%sp), %d0 +; ATOMIC-NEXT: move.b (11,%sp), %d1 +; ATOMIC-NEXT: move.b %d1, %d2 +; ATOMIC-NEXT: cas.b %d2, %d0, (%a0) +; ATOMIC-NEXT: sub.b %d1, %d2 +; ATOMIC-NEXT: seq %d0 +; ATOMIC-NEXT: movem.l (0,%sp), %d2 ; 8-byte Folded Reload +; ATOMIC-NEXT: adda.l #4, %sp +; ATOMIC-NEXT: rts + %res = cmpxchg ptr %mem, i8 %cmp, i8 %new monotonic monotonic + %val = extractvalue {i8, i1} %res, 1 + ret i1 %val +} + +define i16 @cmpxchg_i16_release_monotonic(i16 %cmp, i16 %new, ptr %mem) nounwind { +; NO-ATOMIC-LABEL: cmpxchg_i16_release_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: move.w (22,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #65535, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (8,%sp) +; NO-ATOMIC-NEXT: move.w (18,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #65535, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (24,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_val_compare_and_swap_2@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: cmpxchg_i16_release_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (12,%sp), %a0 +; ATOMIC-NEXT: move.w (10,%sp), %d1 +; ATOMIC-NEXT: move.w (6,%sp), %d0 +; ATOMIC-NEXT: cas.w %d0, %d1, (%a0) +; ATOMIC-NEXT: rts + %res = cmpxchg ptr %mem, i16 %cmp, i16 %new release monotonic + %val = extractvalue {i16, i1} %res, 0 + ret i16 %val +} + +define i32 @cmpxchg_i32_release_acquire(i32 %cmp, i32 %new, ptr %mem) nounwind { +; NO-ATOMIC-LABEL: cmpxchg_i32_release_acquire: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: move.l (20,%sp), (8,%sp) +; NO-ATOMIC-NEXT: move.l (16,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (24,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_val_compare_and_swap_4@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: cmpxchg_i32_release_acquire: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (12,%sp), %a0 +; ATOMIC-NEXT: move.l (8,%sp), %d1 +; ATOMIC-NEXT: move.l (4,%sp), %d0 +; ATOMIC-NEXT: cas.l %d0, %d1, (%a0) +; ATOMIC-NEXT: rts + %res = cmpxchg ptr %mem, i32 %cmp, i32 %new release acquire + %val = extractvalue {i32, i1} %res, 0 + ret i32 %val +} + +define i64 @cmpxchg_i64_seqcst_seqcst(i64 %cmp, i64 %new, ptr %mem) nounwind { +; NO-ATOMIC-LABEL: cmpxchg_i64_seqcst_seqcst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #36, %sp +; NO-ATOMIC-NEXT: move.l (44,%sp), (28,%sp) +; NO-ATOMIC-NEXT: move.l (40,%sp), (24,%sp) +; NO-ATOMIC-NEXT: lea (24,%sp), %a0 +; NO-ATOMIC-NEXT: move.l %a0, (4,%sp) +; NO-ATOMIC-NEXT: move.l #5, (20,%sp) +; NO-ATOMIC-NEXT: move.l #5, (16,%sp) +; NO-ATOMIC-NEXT: move.l (52,%sp), (12,%sp) +; NO-ATOMIC-NEXT: move.l (48,%sp), (8,%sp) +; NO-ATOMIC-NEXT: move.l (56,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_compare_exchange_8@PLT +; NO-ATOMIC-NEXT: move.l (28,%sp), %d1 +; NO-ATOMIC-NEXT: move.l (24,%sp), %d0 +; NO-ATOMIC-NEXT: adda.l #36, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: cmpxchg_i64_seqcst_seqcst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #36, %sp +; ATOMIC-NEXT: move.l (44,%sp), (28,%sp) +; ATOMIC-NEXT: move.l (40,%sp), (24,%sp) +; ATOMIC-NEXT: lea (24,%sp), %a0 +; ATOMIC-NEXT: move.l %a0, (4,%sp) +; ATOMIC-NEXT: move.l #5, (20,%sp) +; ATOMIC-NEXT: move.l #5, (16,%sp) +; ATOMIC-NEXT: move.l (52,%sp), (12,%sp) +; ATOMIC-NEXT: move.l (48,%sp), (8,%sp) +; ATOMIC-NEXT: move.l (56,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_compare_exchange_8@PLT +; ATOMIC-NEXT: move.l (28,%sp), %d1 +; ATOMIC-NEXT: move.l (24,%sp), %d0 +; ATOMIC-NEXT: adda.l #36, %sp +; ATOMIC-NEXT: rts + %res = cmpxchg ptr %mem, i64 %cmp, i64 %new seq_cst seq_cst + %val = extractvalue {i64, i1} %res, 0 + ret i64 %val +} Index: llvm/test/CodeGen/M68k/Atomics/load-store.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/M68k/Atomics/load-store.ll @@ -0,0 +1,606 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68000 | FileCheck %s --check-prefix=NO-ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68010 | FileCheck %s --check-prefix=NO-ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68020 | FileCheck %s --check-prefix=ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68030 | FileCheck %s --check-prefix=ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68040 | FileCheck %s --check-prefix=ATOMIC + +define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i8_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i8_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i8, i8* %a unordered, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i8_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i8_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i8, i8* %a monotonic, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_acquire(i8 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i8_acquire: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i8_acquire: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i8, i8* %a acquire, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i8_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i8_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i8, i8* %a seq_cst, align 1 + ret i8 %1 +} + +define i16 @atomic_load_i16_unordered(i16 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i16_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i16_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i16, i16* %a unordered, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i16_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i16_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i16, i16* %a monotonic, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_acquire(i16 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i16_acquire: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i16_acquire: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i16, i16* %a acquire, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i16_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i16_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i16, i16* %a seq_cst, align 2 + ret i16 %1 +} + +define i32 @atomic_load_i32_unordered(i32 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i32_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i32_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i32, i32* %a unordered, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i32_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i32_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i32, i32* %a monotonic, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_acquire(i32 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i32_acquire: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i32_acquire: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i32, i32* %a acquire, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i32_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l (%a0), %d0 +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i32_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l (%a0), %d0 +; ATOMIC-NEXT: rts + %1 = load atomic i32, i32* %a seq_cst, align 4 + ret i32 %1 +} + +define i64 @atomic_load_i64_unordered(i64 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i64_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: move.l #0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (16,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_load_8@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i64_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: move.l #0, (4,%sp) +; ATOMIC-NEXT: move.l (16,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_load_8@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %1 = load atomic i64, i64* %a unordered, align 8 + ret i64 %1 +} + +define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i64_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: move.l #0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (16,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_load_8@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i64_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: move.l #0, (4,%sp) +; ATOMIC-NEXT: move.l (16,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_load_8@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %1 = load atomic i64, i64* %a monotonic, align 8 + ret i64 %1 +} + +define i64 @atomic_load_i64_acquire(i64 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i64_acquire: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: move.l #2, (4,%sp) +; NO-ATOMIC-NEXT: move.l (16,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_load_8@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i64_acquire: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: move.l #2, (4,%sp) +; ATOMIC-NEXT: move.l (16,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_load_8@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %1 = load atomic i64, i64* %a acquire, align 8 + ret i64 %1 +} + +define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind { +; NO-ATOMIC-LABEL: atomic_load_i64_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: move.l #5, (4,%sp) +; NO-ATOMIC-NEXT: move.l (16,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_load_8@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_load_i64_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: move.l #5, (4,%sp) +; ATOMIC-NEXT: move.l (16,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_load_8@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %1 = load atomic i64, i64* %a seq_cst, align 8 + ret i64 %1 +} + +define void @atomic_store_i8_unordered(i8 *%a, i8 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i8_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.b (11,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i8_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.b (11,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i8 %val, i8* %a unordered, align 1 + ret void +} + +define void @atomic_store_i8_monotonic(i8 *%a, i8 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i8_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.b (11,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i8_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.b (11,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i8 %val, i8* %a monotonic, align 1 + ret void +} + +define void @atomic_store_i8_release(i8 *%a, i8 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i8_release: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.b (11,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i8_release: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.b (11,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i8 %val, i8* %a release, align 1 + ret void +} + +define void @atomic_store_i8_seq_cst(i8 *%a, i8 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i8_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.b (11,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.b %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i8_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.b (11,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.b %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i8 %val, i8* %a seq_cst, align 1 + ret void +} + +define void @atomic_store_i16_unordered(i16 *%a, i16 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i16_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.w (10,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i16_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.w (10,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i16 %val, i16* %a unordered, align 2 + ret void +} + +define void @atomic_store_i16_monotonic(i16 *%a, i16 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i16_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.w (10,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i16_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.w (10,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i16 %val, i16* %a monotonic, align 2 + ret void +} + +define void @atomic_store_i16_release(i16 *%a, i16 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i16_release: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.w (10,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i16_release: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.w (10,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i16 %val, i16* %a release, align 2 + ret void +} + +define void @atomic_store_i16_seq_cst(i16 *%a, i16 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i16_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.w (10,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.w %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i16_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.w (10,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.w %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i16 %val, i16* %a seq_cst, align 2 + ret void +} + +define void @atomic_store_i32_unordered(i32 *%a, i32 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i32_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (8,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i32_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (8,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i32 %val, i32* %a unordered, align 4 + ret void +} + +define void @atomic_store_i32_monotonic(i32 *%a, i32 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i32_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (8,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i32_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (8,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i32 %val, i32* %a monotonic, align 4 + ret void +} + +define void @atomic_store_i32_release(i32 *%a, i32 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i32_release: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (8,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i32_release: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (8,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i32 %val, i32* %a release, align 4 + ret void +} + +define void @atomic_store_i32_seq_cst(i32 *%a, i32 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i32_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: move.l (8,%sp), %d0 +; NO-ATOMIC-NEXT: move.l (4,%sp), %a0 +; NO-ATOMIC-NEXT: move.l %d0, (%a0) +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i32_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: move.l (8,%sp), %d0 +; ATOMIC-NEXT: move.l (4,%sp), %a0 +; ATOMIC-NEXT: move.l %d0, (%a0) +; ATOMIC-NEXT: rts + store atomic i32 %val, i32* %a seq_cst, align 4 + ret void +} + +define void @atomic_store_i64_unordered(i64 *%a, i64 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i64_unordered: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #20, %sp +; NO-ATOMIC-NEXT: move.l #0, (12,%sp) +; NO-ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; NO-ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (24,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_store_8@PLT +; NO-ATOMIC-NEXT: adda.l #20, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i64_unordered: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #20, %sp +; ATOMIC-NEXT: move.l #0, (12,%sp) +; ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; ATOMIC-NEXT: move.l (24,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_store_8@PLT +; ATOMIC-NEXT: adda.l #20, %sp +; ATOMIC-NEXT: rts + store atomic i64 %val, i64* %a unordered, align 8 + ret void +} + +define void @atomic_store_i64_monotonic(i64 *%a, i64 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i64_monotonic: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #20, %sp +; NO-ATOMIC-NEXT: move.l #0, (12,%sp) +; NO-ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; NO-ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (24,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_store_8@PLT +; NO-ATOMIC-NEXT: adda.l #20, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i64_monotonic: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #20, %sp +; ATOMIC-NEXT: move.l #0, (12,%sp) +; ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; ATOMIC-NEXT: move.l (24,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_store_8@PLT +; ATOMIC-NEXT: adda.l #20, %sp +; ATOMIC-NEXT: rts + store atomic i64 %val, i64* %a monotonic, align 8 + ret void +} + +define void @atomic_store_i64_release(i64 *%a, i64 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i64_release: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #20, %sp +; NO-ATOMIC-NEXT: move.l #3, (12,%sp) +; NO-ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; NO-ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (24,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_store_8@PLT +; NO-ATOMIC-NEXT: adda.l #20, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i64_release: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #20, %sp +; ATOMIC-NEXT: move.l #3, (12,%sp) +; ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; ATOMIC-NEXT: move.l (24,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_store_8@PLT +; ATOMIC-NEXT: adda.l #20, %sp +; ATOMIC-NEXT: rts + store atomic i64 %val, i64* %a release, align 8 + ret void +} + +define void @atomic_store_i64_seq_cst(i64 *%a, i64 %val) nounwind { +; NO-ATOMIC-LABEL: atomic_store_i64_seq_cst: +; NO-ATOMIC: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #20, %sp +; NO-ATOMIC-NEXT: move.l #5, (12,%sp) +; NO-ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; NO-ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (24,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_store_8@PLT +; NO-ATOMIC-NEXT: adda.l #20, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomic_store_i64_seq_cst: +; ATOMIC: ; %bb.0: +; ATOMIC-NEXT: suba.l #20, %sp +; ATOMIC-NEXT: move.l #5, (12,%sp) +; ATOMIC-NEXT: move.l (32,%sp), (8,%sp) +; ATOMIC-NEXT: move.l (28,%sp), (4,%sp) +; ATOMIC-NEXT: move.l (24,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_store_8@PLT +; ATOMIC-NEXT: adda.l #20, %sp +; ATOMIC-NEXT: rts + store atomic i64 %val, i64* %a seq_cst, align 8 + ret void +} Index: llvm/test/CodeGen/M68k/Atomics/rmw.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/M68k/Atomics/rmw.ll @@ -0,0 +1,376 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68000 | FileCheck %s --check-prefix=NO-ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68010 | FileCheck %s --check-prefix=NO-ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68020 | FileCheck %s --check-prefix=ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68030 | FileCheck %s --check-prefix=ATOMIC +; RUN: llc %s -o - -mtriple=m68k -mcpu=M68040 | FileCheck %s --check-prefix=ATOMIC + +define i8 @atomicrmw_add_i8(i8 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_add_i8: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: move.b (19,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #255, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_add_1@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_add_i8: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: move.b (19,%sp), %d0 +; ATOMIC-NEXT: and.l #255, %d0 +; ATOMIC-NEXT: move.l %d0, (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_add_1@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw add ptr %ptr, i8 %val monotonic + ret i8 %old +} + +define i16 @atomicrmw_sub_i16(i16 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_sub_i16: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: move.w (18,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #65535, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_sub_2@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_sub_i16: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: move.w (18,%sp), %d0 +; ATOMIC-NEXT: and.l #65535, %d0 +; ATOMIC-NEXT: move.l %d0, (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_sub_2@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw sub ptr %ptr, i16 %val acquire + ret i16 %old +} + +define i32 @atomicrmw_and_i32(i32 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_and_i32: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: move.l (16,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_and_4@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_and_i32: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: move.l (16,%sp), (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_and_4@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw and ptr %ptr, i32 %val seq_cst + ret i32 %old +} + +define i64 @atomicrmw_xor_i64(i64 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_xor_i64: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #20, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -24 +; NO-ATOMIC-NEXT: move.l #3, (12,%sp) +; NO-ATOMIC-NEXT: move.l (28,%sp), (8,%sp) +; NO-ATOMIC-NEXT: move.l (24,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (32,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __atomic_fetch_xor_8@PLT +; NO-ATOMIC-NEXT: adda.l #20, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_xor_i64: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #20, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -24 +; ATOMIC-NEXT: move.l #3, (12,%sp) +; ATOMIC-NEXT: move.l (28,%sp), (8,%sp) +; ATOMIC-NEXT: move.l (24,%sp), (4,%sp) +; ATOMIC-NEXT: move.l (32,%sp), (%sp) +; ATOMIC-NEXT: jsr __atomic_fetch_xor_8@PLT +; ATOMIC-NEXT: adda.l #20, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw xor ptr %ptr, i64 %val release + ret i64 %old +} + +define i8 @atomicrmw_or_i8(i8 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_or_i8: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: move.b (19,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #255, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_or_1@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_or_i8: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: move.b (19,%sp), %d0 +; ATOMIC-NEXT: and.l #255, %d0 +; ATOMIC-NEXT: move.l %d0, (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_or_1@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw or ptr %ptr, i8 %val monotonic + ret i8 %old +} + +define i16 @atmoicrmw_nand_i16(i16 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atmoicrmw_nand_i16: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: movem.l %d2, (8,%sp) ; 8-byte Folded Spill +; NO-ATOMIC-NEXT: move.w (18,%sp), %d2 +; NO-ATOMIC-NEXT: move.l %d2, %d0 +; NO-ATOMIC-NEXT: and.l #65535, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_nand_2@PLT +; NO-ATOMIC-NEXT: move.w %d2, %d0 +; NO-ATOMIC-NEXT: movem.l (8,%sp), %d2 ; 8-byte Folded Reload +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atmoicrmw_nand_i16: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: movem.l %d2, (8,%sp) ; 8-byte Folded Spill +; ATOMIC-NEXT: move.w (18,%sp), %d2 +; ATOMIC-NEXT: move.l %d2, %d0 +; ATOMIC-NEXT: and.l #65535, %d0 +; ATOMIC-NEXT: move.l %d0, (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_nand_2@PLT +; ATOMIC-NEXT: move.w %d2, %d0 +; ATOMIC-NEXT: movem.l (8,%sp), %d2 ; 8-byte Folded Reload +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw nand ptr %ptr, i16 %val seq_cst + ret i16 %val +} + +define i32 @atomicrmw_min_i32(i32 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_min_i32: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: move.l (16,%sp), (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_min_4@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_min_i32: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: move.l (16,%sp), (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_min_4@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw min ptr %ptr, i32 %val acquire + ret i32 %old +} + +define i64 @atomicrmw_max_i64(i64 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_max_i64: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #52, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -56 +; NO-ATOMIC-NEXT: movem.l %d2-%d4/%a2-%a3, (32,%sp) ; 24-byte Folded Spill +; NO-ATOMIC-NEXT: move.l (60,%sp), %d3 +; NO-ATOMIC-NEXT: move.l (56,%sp), %d4 +; NO-ATOMIC-NEXT: move.l (64,%sp), %a2 +; NO-ATOMIC-NEXT: move.l (4,%a2), %d1 +; NO-ATOMIC-NEXT: move.l (%a2), %d0 +; NO-ATOMIC-NEXT: lea (24,%sp), %a3 +; NO-ATOMIC-NEXT: bra .LBB7_1 +; NO-ATOMIC-NEXT: .LBB7_3: ; %atomicrmw.start +; NO-ATOMIC-NEXT: ; in Loop: Header=BB7_1 Depth=1 +; NO-ATOMIC-NEXT: move.l %d1, (12,%sp) +; NO-ATOMIC-NEXT: move.l %d0, (8,%sp) +; NO-ATOMIC-NEXT: move.l #5, (20,%sp) +; NO-ATOMIC-NEXT: move.l #5, (16,%sp) +; NO-ATOMIC-NEXT: jsr __atomic_compare_exchange_8@PLT +; NO-ATOMIC-NEXT: move.b %d0, %d2 +; NO-ATOMIC-NEXT: move.l (28,%sp), %d1 +; NO-ATOMIC-NEXT: move.l (24,%sp), %d0 +; NO-ATOMIC-NEXT: cmpi.b #0, %d2 +; NO-ATOMIC-NEXT: bne .LBB7_4 +; NO-ATOMIC-NEXT: .LBB7_1: ; %atomicrmw.start +; NO-ATOMIC-NEXT: ; =>This Inner Loop Header: Depth=1 +; NO-ATOMIC-NEXT: move.l %d0, (24,%sp) +; NO-ATOMIC-NEXT: move.l %d1, (28,%sp) +; NO-ATOMIC-NEXT: move.l %a2, (%sp) +; NO-ATOMIC-NEXT: move.l %a3, (4,%sp) +; NO-ATOMIC-NEXT: move.l %d3, %d2 +; NO-ATOMIC-NEXT: sub.l %d1, %d2 +; NO-ATOMIC-NEXT: move.l %d4, %d2 +; NO-ATOMIC-NEXT: subx.l %d0, %d2 +; NO-ATOMIC-NEXT: slt %d2 +; NO-ATOMIC-NEXT: cmpi.b #0, %d2 +; NO-ATOMIC-NEXT: bne .LBB7_3 +; NO-ATOMIC-NEXT: ; %bb.2: ; %atomicrmw.start +; NO-ATOMIC-NEXT: ; in Loop: Header=BB7_1 Depth=1 +; NO-ATOMIC-NEXT: move.l %d3, %d1 +; NO-ATOMIC-NEXT: move.l %d4, %d0 +; NO-ATOMIC-NEXT: bra .LBB7_3 +; NO-ATOMIC-NEXT: .LBB7_4: ; %atomicrmw.end +; NO-ATOMIC-NEXT: movem.l (32,%sp), %d2-%d4/%a2-%a3 ; 24-byte Folded Reload +; NO-ATOMIC-NEXT: adda.l #52, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_max_i64: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #52, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -56 +; ATOMIC-NEXT: movem.l %d2-%d4/%a2-%a3, (32,%sp) ; 24-byte Folded Spill +; ATOMIC-NEXT: move.l (60,%sp), %d3 +; ATOMIC-NEXT: move.l (56,%sp), %d4 +; ATOMIC-NEXT: move.l (64,%sp), %a2 +; ATOMIC-NEXT: move.l (4,%a2), %d1 +; ATOMIC-NEXT: move.l (%a2), %d0 +; ATOMIC-NEXT: lea (24,%sp), %a3 +; ATOMIC-NEXT: bra .LBB7_1 +; ATOMIC-NEXT: .LBB7_3: ; %atomicrmw.start +; ATOMIC-NEXT: ; in Loop: Header=BB7_1 Depth=1 +; ATOMIC-NEXT: move.l %d1, (12,%sp) +; ATOMIC-NEXT: move.l %d0, (8,%sp) +; ATOMIC-NEXT: move.l #5, (20,%sp) +; ATOMIC-NEXT: move.l #5, (16,%sp) +; ATOMIC-NEXT: jsr __atomic_compare_exchange_8@PLT +; ATOMIC-NEXT: move.b %d0, %d2 +; ATOMIC-NEXT: move.l (28,%sp), %d1 +; ATOMIC-NEXT: move.l (24,%sp), %d0 +; ATOMIC-NEXT: cmpi.b #0, %d2 +; ATOMIC-NEXT: bne .LBB7_4 +; ATOMIC-NEXT: .LBB7_1: ; %atomicrmw.start +; ATOMIC-NEXT: ; =>This Inner Loop Header: Depth=1 +; ATOMIC-NEXT: move.l %d0, (24,%sp) +; ATOMIC-NEXT: move.l %d1, (28,%sp) +; ATOMIC-NEXT: move.l %a2, (%sp) +; ATOMIC-NEXT: move.l %a3, (4,%sp) +; ATOMIC-NEXT: move.l %d3, %d2 +; ATOMIC-NEXT: sub.l %d1, %d2 +; ATOMIC-NEXT: move.l %d4, %d2 +; ATOMIC-NEXT: subx.l %d0, %d2 +; ATOMIC-NEXT: slt %d2 +; ATOMIC-NEXT: cmpi.b #0, %d2 +; ATOMIC-NEXT: bne .LBB7_3 +; ATOMIC-NEXT: ; %bb.2: ; %atomicrmw.start +; ATOMIC-NEXT: ; in Loop: Header=BB7_1 Depth=1 +; ATOMIC-NEXT: move.l %d3, %d1 +; ATOMIC-NEXT: move.l %d4, %d0 +; ATOMIC-NEXT: bra .LBB7_3 +; ATOMIC-NEXT: .LBB7_4: ; %atomicrmw.end +; ATOMIC-NEXT: movem.l (32,%sp), %d2-%d4/%a2-%a3 ; 24-byte Folded Reload +; ATOMIC-NEXT: adda.l #52, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw max ptr %ptr, i64 %val seq_cst + ret i64 %old +} + +define i8 @atomicrmw_i8_umin(i8 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_i8_umin: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: move.b (19,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #255, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_umin_1@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_i8_umin: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: move.b (19,%sp), %d0 +; ATOMIC-NEXT: and.l #255, %d0 +; ATOMIC-NEXT: move.l %d0, (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_umin_1@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw umin ptr %ptr, i8 %val release + ret i8 %old +} + +define i16 @atomicrmw_umax_i16(i16 %val, ptr %ptr) { +; NO-ATOMIC-LABEL: atomicrmw_umax_i16: +; NO-ATOMIC: .cfi_startproc +; NO-ATOMIC-NEXT: ; %bb.0: +; NO-ATOMIC-NEXT: suba.l #12, %sp +; NO-ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; NO-ATOMIC-NEXT: move.w (18,%sp), %d0 +; NO-ATOMIC-NEXT: and.l #65535, %d0 +; NO-ATOMIC-NEXT: move.l %d0, (4,%sp) +; NO-ATOMIC-NEXT: move.l (20,%sp), (%sp) +; NO-ATOMIC-NEXT: jsr __sync_fetch_and_umax_2@PLT +; NO-ATOMIC-NEXT: adda.l #12, %sp +; NO-ATOMIC-NEXT: rts +; +; ATOMIC-LABEL: atomicrmw_umax_i16: +; ATOMIC: .cfi_startproc +; ATOMIC-NEXT: ; %bb.0: +; ATOMIC-NEXT: suba.l #12, %sp +; ATOMIC-NEXT: .cfi_def_cfa_offset -16 +; ATOMIC-NEXT: move.w (18,%sp), %d0 +; ATOMIC-NEXT: and.l #65535, %d0 +; ATOMIC-NEXT: move.l %d0, (4,%sp) +; ATOMIC-NEXT: move.l (20,%sp), (%sp) +; ATOMIC-NEXT: jsr __sync_fetch_and_umax_2@PLT +; ATOMIC-NEXT: adda.l #12, %sp +; ATOMIC-NEXT: rts + %old = atomicrmw umax ptr %ptr, i16 %val seq_cst + ret i16 %old +} Index: llvm/test/CodeGen/M68k/pipeline.ll =================================================================== --- llvm/test/CodeGen/M68k/pipeline.ll +++ llvm/test/CodeGen/M68k/pipeline.ll @@ -3,6 +3,7 @@ ; CHECK-NEXT: Pre-ISel Intrinsic Lowering ; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Expand large div/rem +; CHECK-NEXT: Expand Atomic instructions ; CHECK-NEXT: Module Verifier ; CHECK-NEXT: Dominator Tree Construction ; CHECK-NEXT: Basic Alias Analysis (stateless AA impl) @@ -131,4 +132,4 @@ ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Machine Optimization Remark Emitter ; CHECK-NEXT: M68k Assembly Printer -; CHECK-NEXT: Free MachineFunction \ No newline at end of file +; CHECK-NEXT: Free MachineFunction Index: llvm/test/MC/Disassembler/M68k/atomics.txt =================================================================== --- /dev/null +++ llvm/test/MC/Disassembler/M68k/atomics.txt @@ -0,0 +1,10 @@ +# RUN: llvm-mc -disassemble %s -triple=m68k | FileCheck %s + +# CHECK: cas.b %d3, %d2, (%a2) +0x0a 0xd2 0x00 0x83 + +# CHECK: cas.w %d4, %d5, (%a3) +0x0c 0xd3 0x01 0x44 + +# CHECK: cas.l %d6, %d7, (%a4) +0x0e 0xd4 0x01 0xc6 Index: llvm/test/MC/M68k/Atomics/cas.s =================================================================== --- /dev/null +++ llvm/test/MC/M68k/Atomics/cas.s @@ -0,0 +1,13 @@ +; RUN: llvm-mc -show-encoding -triple=m68k %s | FileCheck %s + +; CHECK: cas.b %d3, %d2, (%a2) +; CHECK-SAME: ; encoding: [0x0a,0xd2,0x00,0x83] +cas.b %d3, %d2, (%a2) + +; CHECK: cas.w %d4, %d5, (%a3) +; CHECK-SAME: ; encoding: [0x0c,0xd3,0x01,0x44] +cas.w %d4, %d5, (%a3) + +; CHECK: cas.l %d6, %d7, (%a4) +; CHECK-SAME: ; encoding: [0x0e,0xd4,0x01,0xc6] +cas.l %d6, %d7, (%a4)