Index: llvm/lib/Target/M68k/M68kInstrAtomics.td =================================================================== --- /dev/null +++ llvm/lib/Target/M68k/M68kInstrAtomics.td @@ -0,0 +1,15 @@ +//===-- M68kInstrAtomics.td - Atomics Instructions ---------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +def : Pat<(atomic_load_8 MxCP_ARI:$ptr), (MOV8dj MxARI8:$ptr)>; +def : Pat<(atomic_load_16 MxCP_ARI:$ptr), (MOV16dj MxARI16:$ptr)>; +def : Pat<(atomic_load_32 MxCP_ARI:$ptr), (MOV32dj MxARI32:$ptr)>; + +def : Pat<(atomic_store_8 MxCP_ARI:$ptr, MxDRD8:$val), (MOV8jd MxARI8:$ptr, MxDRD8:$val)>; +def : Pat<(atomic_store_16 MxCP_ARI:$ptr, MxDRD16:$val), (MOV16jd MxARI16:$ptr, MxDRD16:$val)>; +def : Pat<(atomic_store_32 MxCP_ARI:$ptr, MxDRD32:$val), (MOV32jd MxARI32:$ptr, MxDRD32:$val)>; Index: llvm/lib/Target/M68k/M68kInstrInfo.td =================================================================== --- llvm/lib/Target/M68k/M68kInstrInfo.td +++ llvm/lib/Target/M68k/M68kInstrInfo.td @@ -781,5 +781,6 @@ include "M68kInstrBits.td" include "M68kInstrArithmetic.td" include "M68kInstrControl.td" +include "M68kInstrAtomics.td" include "M68kInstrCompiler.td" Index: llvm/test/CodeGen/M68k/atomic-load-store.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/M68k/atomic-load-store.ll @@ -0,0 +1,119 @@ +define i8 @atomic_load_i8_unordered(i8 *%a) nounwind { + %1 = load atomic i8, i8* %a unordered, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind { + %1 = load atomic i8, i8* %a monotonic, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_acquire(i8 *%a) nounwind { + %1 = load atomic i8, i8* %a acquire, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind { + %1 = load atomic i8, i8* %a seq_cst, align 1 + ret i8 %1 +} + +define i16 @atomic_load_i16_unordered(i16 *%a) nounwind { + %1 = load atomic i16, i16* %a unordered, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind { + %1 = load atomic i16, i16* %a monotonic, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_acquire(i16 *%a) nounwind { + %1 = load atomic i16, i16* %a acquire, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind { + %1 = load atomic i16, i16* %a seq_cst, align 2 + ret i16 %1 +} + +define i32 @atomic_load_i32_unordered(i32 *%a) nounwind { + %1 = load atomic i32, i32* %a unordered, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind { + %1 = load atomic i32, i32* %a monotonic, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_acquire(i32 *%a) nounwind { + %1 = load atomic i32, i32* %a acquire, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind { + %1 = load atomic i32, i32* %a seq_cst, align 4 + ret i32 %1 +} + +define void @atomic_store_i8_unordered(i8 *%a, i8 %val) nounwind { + store atomic i8 %val, i8* %a unordered, align 1 + ret void +} + +define void @atomic_store_i8_monotonic(i8 *%a, i8 %val) nounwind { + store atomic i8 %val, i8* %a monotonic, align 1 + ret void +} + +define void @atomic_store_i8_release(i8 *%a, i8 %val) nounwind { + store atomic i8 %val, i8* %a release, align 1 + ret void +} + +define void @atomic_store_i8_seq_cst(i8 *%a, i8 %val) nounwind { + store atomic i8 %val, i8* %a seq_cst, align 1 + ret void +} + +define void @atomic_store_i16_unordered(i16 *%a, i16 %val) nounwind { + store atomic i16 %val, i16* %a unordered, align 2 + ret void +} + +define void @atomic_store_i16_monotonic(i16 *%a, i16 %val) nounwind { + store atomic i16 %val, i16* %a monotonic, align 2 + ret void +} + +define void @atomic_store_i16_release(i16 *%a, i16 %val) nounwind { + store atomic i16 %val, i16* %a release, align 2 + ret void +} + +define void @atomic_store_i16_seq_cst(i16 *%a, i16 %val) nounwind { + store atomic i16 %val, i16* %a seq_cst, align 2 + ret void +} + +define void @atomic_store_i32_unordered(i32 *%a, i32 %val) nounwind { + store atomic i32 %val, i32* %a unordered, align 4 + ret void +} + +define void @atomic_store_i32_monotonic(i32 *%a, i32 %val) nounwind { + store atomic i32 %val, i32* %a monotonic, align 4 + ret void +} + +define void @atomic_store_i32_release(i32 *%a, i32 %val) nounwind { + store atomic i32 %val, i32* %a release, align 4 + ret void +} + +define void @atomic_store_i32_seq_cst(i32 *%a, i32 %val) nounwind { + store atomic i32 %val, i32* %a seq_cst, align 4 + ret void +}