Index: llvm/lib/Target/M68k/M68kISelLowering.cpp =================================================================== --- llvm/lib/Target/M68k/M68kISelLowering.cpp +++ llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -160,6 +160,8 @@ // 2^2 bytes // FIXME can it be just 2^1? setMinFunctionAlignment(Align::Constant<2>()); + + setMaxAtomicSizeInBitsSupported(32); } EVT M68kTargetLowering::getSetCCResultType(const DataLayout &DL, Index: llvm/lib/Target/M68k/M68kInstrAtomics.td =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kInstrAtomics.td @@ -0,0 +1,15 @@ +//===-- M68kInstrAtomics.td - Atomics Instructions ---------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +def : Pat<(atomic_load_8 MxCP_ARI:$ptr), (MOV8dj MxARI8:$ptr)>; +def : Pat<(atomic_load_16 MxCP_ARI:$ptr), (MOV16dj MxARI16:$ptr)>; +def : Pat<(atomic_load_32 MxCP_ARI:$ptr), (MOV32dj MxARI32:$ptr)>; + +def : Pat<(atomic_store_8 MxCP_ARI:$ptr, MxDRD8:$val), (MOV8jd MxARI8:$ptr, MxDRD8:$val)>; +def : Pat<(atomic_store_16 MxCP_ARI:$ptr, MxDRD16:$val), (MOV16jd MxARI16:$ptr, MxDRD16:$val)>; +def : Pat<(atomic_store_32 MxCP_ARI:$ptr, MxDRD32:$val), (MOV32jd MxARI32:$ptr, MxDRD32:$val)>; Index: llvm/lib/Target/M68k/M68kInstrInfo.td =================================================================== --- llvm/lib/Target/M68k/M68kInstrInfo.td +++ llvm/lib/Target/M68k/M68kInstrInfo.td @@ -781,5 +781,6 @@ include "M68kInstrBits.td" include "M68kInstrArithmetic.td" include "M68kInstrControl.td" +include "M68kInstrAtomics.td" include "M68kInstrCompiler.td" Index: llvm/lib/Target/M68k/M68kTargetMachine.cpp =================================================================== --- llvm/lib/Target/M68k/M68kTargetMachine.cpp +++ llvm/lib/Target/M68k/M68kTargetMachine.cpp @@ -143,6 +143,7 @@ const M68kSubtarget &getM68kSubtarget() const { return *getM68kTargetMachine().getSubtargetImpl(); } + void addIRPasses() override; bool addIRTranslator() override; bool addLegalizeMachineIR() override; bool addRegBankSelect() override; @@ -157,6 +158,8 @@ return new M68kPassConfig(*this, PM); } +void M68kPassConfig::addIRPasses() { addPass(createAtomicExpandPass()); } + bool M68kPassConfig::addInstSelector() { // Install an instruction selector. addPass(createM68kISelDag(getM68kTargetMachine())); Index: llvm/test/CodeGen/M68k/atomic-load-store.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/M68k/atomic-load-store.ll @@ -0,0 +1,366 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=m68k -o - %s | FileCheck %s + +define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { +; CHECK-LABEL: atomic_load_i8_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i8, i8* %a unordered, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind { +; CHECK-LABEL: atomic_load_i8_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i8, i8* %a monotonic, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_acquire(i8 *%a) nounwind { +; CHECK-LABEL: atomic_load_i8_acquire: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i8, i8* %a acquire, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind { +; CHECK-LABEL: atomic_load_i8_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i8, i8* %a seq_cst, align 1 + ret i8 %1 +} + +define i16 @atomic_load_i16_unordered(i16 *%a) nounwind { +; CHECK-LABEL: atomic_load_i16_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i16, i16* %a unordered, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind { +; CHECK-LABEL: atomic_load_i16_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i16, i16* %a monotonic, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_acquire(i16 *%a) nounwind { +; CHECK-LABEL: atomic_load_i16_acquire: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i16, i16* %a acquire, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind { +; CHECK-LABEL: atomic_load_i16_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i16, i16* %a seq_cst, align 2 + ret i16 %1 +} + +define i32 @atomic_load_i32_unordered(i32 *%a) nounwind { +; CHECK-LABEL: atomic_load_i32_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i32, i32* %a unordered, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind { +; CHECK-LABEL: atomic_load_i32_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i32, i32* %a monotonic, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_acquire(i32 *%a) nounwind { +; CHECK-LABEL: atomic_load_i32_acquire: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i32, i32* %a acquire, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind { +; CHECK-LABEL: atomic_load_i32_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l (%a0), %d0 +; CHECK-NEXT: rts + %1 = load atomic i32, i32* %a seq_cst, align 4 + ret i32 %1 +} + +define i64 @atomic_load_i64_unordered(i64 *%a) nounwind { +; CHECK-LABEL: atomic_load_i64_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #12, %sp +; CHECK-NEXT: move.l #0, (4,%sp) +; CHECK-NEXT: move.l (16,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_load_8@PLT +; CHECK-NEXT: adda.l #12, %sp +; CHECK-NEXT: rts + %1 = load atomic i64, i64* %a unordered, align 8 + ret i64 %1 +} + +define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind { +; CHECK-LABEL: atomic_load_i64_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #12, %sp +; CHECK-NEXT: move.l #0, (4,%sp) +; CHECK-NEXT: move.l (16,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_load_8@PLT +; CHECK-NEXT: adda.l #12, %sp +; CHECK-NEXT: rts + %1 = load atomic i64, i64* %a monotonic, align 8 + ret i64 %1 +} + +define i64 @atomic_load_i64_acquire(i64 *%a) nounwind { +; CHECK-LABEL: atomic_load_i64_acquire: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #12, %sp +; CHECK-NEXT: move.l #2, (4,%sp) +; CHECK-NEXT: move.l (16,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_load_8@PLT +; CHECK-NEXT: adda.l #12, %sp +; CHECK-NEXT: rts + %1 = load atomic i64, i64* %a acquire, align 8 + ret i64 %1 +} + +define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind { +; CHECK-LABEL: atomic_load_i64_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #12, %sp +; CHECK-NEXT: move.l #5, (4,%sp) +; CHECK-NEXT: move.l (16,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_load_8@PLT +; CHECK-NEXT: adda.l #12, %sp +; CHECK-NEXT: rts + %1 = load atomic i64, i64* %a seq_cst, align 8 + ret i64 %1 +} + +define void @atomic_store_i8_unordered(i8 *%a, i8 %val) nounwind { +; CHECK-LABEL: atomic_store_i8_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.b (11,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b %d0, (%a0) +; CHECK-NEXT: rts + store atomic i8 %val, i8* %a unordered, align 1 + ret void +} + +define void @atomic_store_i8_monotonic(i8 *%a, i8 %val) nounwind { +; CHECK-LABEL: atomic_store_i8_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.b (11,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b %d0, (%a0) +; CHECK-NEXT: rts + store atomic i8 %val, i8* %a monotonic, align 1 + ret void +} + +define void @atomic_store_i8_release(i8 *%a, i8 %val) nounwind { +; CHECK-LABEL: atomic_store_i8_release: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.b (11,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b %d0, (%a0) +; CHECK-NEXT: rts + store atomic i8 %val, i8* %a release, align 1 + ret void +} + +define void @atomic_store_i8_seq_cst(i8 *%a, i8 %val) nounwind { +; CHECK-LABEL: atomic_store_i8_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.b (11,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.b %d0, (%a0) +; CHECK-NEXT: rts + store atomic i8 %val, i8* %a seq_cst, align 1 + ret void +} + +define void @atomic_store_i16_unordered(i16 *%a, i16 %val) nounwind { +; CHECK-LABEL: atomic_store_i16_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.w (10,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w %d0, (%a0) +; CHECK-NEXT: rts + store atomic i16 %val, i16* %a unordered, align 2 + ret void +} + +define void @atomic_store_i16_monotonic(i16 *%a, i16 %val) nounwind { +; CHECK-LABEL: atomic_store_i16_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.w (10,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w %d0, (%a0) +; CHECK-NEXT: rts + store atomic i16 %val, i16* %a monotonic, align 2 + ret void +} + +define void @atomic_store_i16_release(i16 *%a, i16 %val) nounwind { +; CHECK-LABEL: atomic_store_i16_release: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.w (10,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w %d0, (%a0) +; CHECK-NEXT: rts + store atomic i16 %val, i16* %a release, align 2 + ret void +} + +define void @atomic_store_i16_seq_cst(i16 *%a, i16 %val) nounwind { +; CHECK-LABEL: atomic_store_i16_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.w (10,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.w %d0, (%a0) +; CHECK-NEXT: rts + store atomic i16 %val, i16* %a seq_cst, align 2 + ret void +} + +define void @atomic_store_i32_unordered(i32 *%a, i32 %val) nounwind { +; CHECK-LABEL: atomic_store_i32_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (8,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l %d0, (%a0) +; CHECK-NEXT: rts + store atomic i32 %val, i32* %a unordered, align 4 + ret void +} + +define void @atomic_store_i32_monotonic(i32 *%a, i32 %val) nounwind { +; CHECK-LABEL: atomic_store_i32_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (8,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l %d0, (%a0) +; CHECK-NEXT: rts + store atomic i32 %val, i32* %a monotonic, align 4 + ret void +} + +define void @atomic_store_i32_release(i32 *%a, i32 %val) nounwind { +; CHECK-LABEL: atomic_store_i32_release: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (8,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l %d0, (%a0) +; CHECK-NEXT: rts + store atomic i32 %val, i32* %a release, align 4 + ret void +} + +define void @atomic_store_i32_seq_cst(i32 *%a, i32 %val) nounwind { +; CHECK-LABEL: atomic_store_i32_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: move.l (8,%sp), %d0 +; CHECK-NEXT: move.l (4,%sp), %a0 +; CHECK-NEXT: move.l %d0, (%a0) +; CHECK-NEXT: rts + store atomic i32 %val, i32* %a seq_cst, align 4 + ret void +} + +define void @atomic_store_i64_unordered(i64 *%a, i64 %val) nounwind { +; CHECK-LABEL: atomic_store_i64_unordered: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #20, %sp +; CHECK-NEXT: move.l #0, (12,%sp) +; CHECK-NEXT: move.l (32,%sp), (8,%sp) +; CHECK-NEXT: move.l (28,%sp), (4,%sp) +; CHECK-NEXT: move.l (24,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_store_8@PLT +; CHECK-NEXT: adda.l #20, %sp +; CHECK-NEXT: rts + store atomic i64 %val, i64* %a unordered, align 8 + ret void +} + +define void @atomic_store_i64_monotonic(i64 *%a, i64 %val) nounwind { +; CHECK-LABEL: atomic_store_i64_monotonic: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #20, %sp +; CHECK-NEXT: move.l #0, (12,%sp) +; CHECK-NEXT: move.l (32,%sp), (8,%sp) +; CHECK-NEXT: move.l (28,%sp), (4,%sp) +; CHECK-NEXT: move.l (24,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_store_8@PLT +; CHECK-NEXT: adda.l #20, %sp +; CHECK-NEXT: rts + store atomic i64 %val, i64* %a monotonic, align 8 + ret void +} + +define void @atomic_store_i64_release(i64 *%a, i64 %val) nounwind { +; CHECK-LABEL: atomic_store_i64_release: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #20, %sp +; CHECK-NEXT: move.l #3, (12,%sp) +; CHECK-NEXT: move.l (32,%sp), (8,%sp) +; CHECK-NEXT: move.l (28,%sp), (4,%sp) +; CHECK-NEXT: move.l (24,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_store_8@PLT +; CHECK-NEXT: adda.l #20, %sp +; CHECK-NEXT: rts + store atomic i64 %val, i64* %a release, align 8 + ret void +} + +define void @atomic_store_i64_seq_cst(i64 *%a, i64 %val) nounwind { +; CHECK-LABEL: atomic_store_i64_seq_cst: +; CHECK: ; %bb.0: +; CHECK-NEXT: suba.l #20, %sp +; CHECK-NEXT: move.l #5, (12,%sp) +; CHECK-NEXT: move.l (32,%sp), (8,%sp) +; CHECK-NEXT: move.l (28,%sp), (4,%sp) +; CHECK-NEXT: move.l (24,%sp), (%sp) +; CHECK-NEXT: jsr __atomic_store_8@PLT +; CHECK-NEXT: adda.l #20, %sp +; CHECK-NEXT: rts + store atomic i64 %val, i64* %a seq_cst, align 8 + ret void +}