diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -65,6 +65,28 @@ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand); } + // FIXME: Add support for cmpxchg with ATOMIC_CMP_SWAP_WITH_SUCCESS + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); + setOperationAction(ISD::ATOMIC_STORE, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); + setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Expand); + + setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_CLR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); + + setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); + } + // We don't have any truncstores setTruncStoreAction(MVT::i16, MVT::i8, Expand); diff --git a/llvm/test/CodeGen/MSP430/atomics/fence.ll b/llvm/test/CodeGen/MSP430/atomics/fence.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/fence.ll @@ -0,0 +1,14 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; FIXME: We can remove the call to __sync_synchronize since +; MSP430 is always single threaded + +; CHECK_LABEL: atomic_fence +; CHECK: ; %bb.0: +; CHECK-NEXT: call #__sync_synchronize +; CHECK-NEXT: ret +define void @atomic_fence() { + fence acquire + ret void +} + diff --git a/llvm/test/CodeGen/MSP430/atomics/load.ll b/llvm/test/CodeGen/MSP430/atomics/load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load.ll @@ -0,0 +1,30 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load8 +; CHECK: call #__sync_val_compare_and_swap_1 +define i8 @atomic_load8(i8* %foo) { + %val = load atomic i8, i8* %foo unordered, align 1 + ret i8 %val +} + +; CHECK-LABEL: atomic_load16 +; CHECK: call #__sync_val_compare_and_swap_2 +define i16 @atomic_load16(i16* %foo) { + %val = load atomic i16, i16* %foo unordered, align 2 + ret i16 %val +} + +; CHECK-LABEL: atomic_load32 +; CHECK: call #__sync_val_compare_and_swap_4 +define i32 @atomic_load32(i32* %foo) { + %val = load atomic i32, i32* %foo unordered, align 4 + ret i32 %val +} + + +; CHECK-LABEL: atomic_load64 +; CHECK: call #__sync_val_compare_and_swap_8 +define i64 @atomic_load64(i64* %foo) { + %val = load atomic i64, i64* %foo unordered, align 8 + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_add.ll b/llvm/test/CodeGen/MSP430/atomics/load_add.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_add.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_add8 +; CHECK: call #__sync_fetch_and_add_1 +define i8 @atomic_load_add8(i8* %foo) { + %val = atomicrmw add i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_add16 +; CHECK: call #__sync_fetch_and_add_2 +define i16 @atomic_load_add16(i16* %foo) { + %val = atomicrmw add i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_add32 +; CHECK: call #__sync_fetch_and_add_4 +define i32 @atomic_load_add32(i32* %foo) { + %val = atomicrmw add i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_add64 +; CHECK: call #__sync_fetch_and_add_8 +define i64 @atomic_load_add64(i64* %foo) { + %val = atomicrmw add i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_and.ll b/llvm/test/CodeGen/MSP430/atomics/load_and.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_and.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_and8 +; CHECK: call #__sync_fetch_and_and_1 +define i8 @atomic_load_and8(i8* %foo) { + %val = atomicrmw and i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_and16 +; CHECK: call #__sync_fetch_and_and_2 +define i16 @atomic_load_and16(i16* %foo) { + %val = atomicrmw and i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_and32 +; CHECK: call #__sync_fetch_and_and_4 +define i32 @atomic_load_and32(i32* %foo) { + %val = atomicrmw and i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_and64 +; CHECK: call #__sync_fetch_and_and_8 +define i64 @atomic_load_and64(i64* %foo) { + %val = atomicrmw and i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_max.ll b/llvm/test/CodeGen/MSP430/atomics/load_max.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_max.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_max8 +; CHECK: call #__sync_fetch_and_max_1 +define i8 @atomic_load_max8(i8* %foo) { + %val = atomicrmw max i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_max16 +; CHECK: call #__sync_fetch_and_max_2 +define i16 @atomic_load_max16(i16* %foo) { + %val = atomicrmw max i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_max32 +; CHECK: call #__sync_fetch_and_max_4 +define i32 @atomic_load_max32(i32* %foo) { + %val = atomicrmw max i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_max64 +; CHECK: call #__sync_fetch_and_max_8 +define i64 @atomic_load_max64(i64* %foo) { + %val = atomicrmw max i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_min.ll b/llvm/test/CodeGen/MSP430/atomics/load_min.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_min.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_min8 +; CHECK: call #__sync_fetch_and_min_1 +define i8 @atomic_load_min8(i8* %foo) { + %val = atomicrmw min i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_min16 +; CHECK: call #__sync_fetch_and_min_2 +define i16 @atomic_load_min16(i16* %foo) { + %val = atomicrmw min i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_min32 +; CHECK: call #__sync_fetch_and_min_4 +define i32 @atomic_load_min32(i32* %foo) { + %val = atomicrmw min i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_min64 +; CHECK: call #__sync_fetch_and_min_8 +define i64 @atomic_load_min64(i64* %foo) { + %val = atomicrmw min i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_nand.ll b/llvm/test/CodeGen/MSP430/atomics/load_nand.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_nand.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_nand8 +; CHECK: call #__sync_fetch_and_nand_1 +define i8 @atomic_load_nand8(i8* %foo) { + %val = atomicrmw nand i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_nand16 +; CHECK: call #__sync_fetch_and_nand_2 +define i16 @atomic_load_nand16(i16* %foo) { + %val = atomicrmw nand i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_nand32 +; CHECK: call #__sync_fetch_and_nand_4 +define i32 @atomic_load_nand32(i32* %foo) { + %val = atomicrmw nand i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_nand64 +; CHECK: call #__sync_fetch_and_nand_8 +define i64 @atomic_load_nand64(i64* %foo) { + %val = atomicrmw nand i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_or.ll b/llvm/test/CodeGen/MSP430/atomics/load_or.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_or.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_or8 +; CHECK: call #__sync_fetch_and_or_1 +define i8 @atomic_load_or8(i8* %foo) { + %val = atomicrmw or i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_or16 +; CHECK: call #__sync_fetch_and_or_2 +define i16 @atomic_load_or16(i16* %foo) { + %val = atomicrmw or i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_or32 +; CHECK: call #__sync_fetch_and_or_4 +define i32 @atomic_load_or32(i32* %foo) { + %val = atomicrmw or i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_or64 +; CHECK: call #__sync_fetch_and_or_8 +define i64 @atomic_load_or64(i64* %foo) { + %val = atomicrmw or i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_sub.ll b/llvm/test/CodeGen/MSP430/atomics/load_sub.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_sub.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_sub8 +; CHECK: call #__sync_fetch_and_sub_1 +define i8 @atomic_load_sub8(i8* %foo) { + %val = atomicrmw sub i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_sub16 +; CHECK: call #__sync_fetch_and_sub_2 +define i16 @atomic_load_sub16(i16* %foo) { + %val = atomicrmw sub i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_sub32 +; CHECK: call #__sync_fetch_and_sub_4 +define i32 @atomic_load_sub32(i32* %foo) { + %val = atomicrmw sub i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_sub64 +; CHECK: call #__sync_fetch_and_sub_8 +define i64 @atomic_load_sub64(i64* %foo) { + %val = atomicrmw sub i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_swap.ll b/llvm/test/CodeGen/MSP430/atomics/load_swap.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_swap.ll @@ -0,0 +1,30 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_swap8 +; CHECK: call #__sync_lock_test_and_set_1 +define i8 @atomic_load_swap8(i8* %foo) { + %val = atomicrmw xchg i8* %foo, i8 13 seq_cst + ret i8 %val +} + + +; CHECK-LABEL: atomic_load_swap16 +; CHECK: call #__sync_lock_test_and_set_2 +define i16 @atomic_load_swap16(i16* %foo) { + %val = atomicrmw xchg i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_swap32 +; CHECK: call #__sync_lock_test_and_set_4 +define i32 @atomic_load_swap32(i32* %foo) { + %val = atomicrmw xchg i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_swap64 +; CHECK: call #__sync_lock_test_and_set_8 +define i64 @atomic_load_swap64(i64* %foo) { + %val = atomicrmw xchg i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_umax.ll b/llvm/test/CodeGen/MSP430/atomics/load_umax.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_umax.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_umax8 +; CHECK: call #__sync_fetch_and_umax_1 +define i8 @atomic_load_umax8(i8* %foo) { + %val = atomicrmw umax i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_umax16 +; CHECK: call #__sync_fetch_and_umax_2 +define i16 @atomic_load_umax16(i16* %foo) { + %val = atomicrmw umax i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_umax32 +; CHECK: call #__sync_fetch_and_umax_4 +define i32 @atomic_load_umax32(i32* %foo) { + %val = atomicrmw umax i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_umax64 +; CHECK: call #__sync_fetch_and_umax_8 +define i64 @atomic_load_umax64(i64* %foo) { + %val = atomicrmw umax i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_umin.ll b/llvm/test/CodeGen/MSP430/atomics/load_umin.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_umin.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_umin8 +; CHECK: call #__sync_fetch_and_umin_1 +define i8 @atomic_load_umin8(i8* %foo) { + %val = atomicrmw umin i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_umin16 +; CHECK: call #__sync_fetch_and_umin_2 +define i16 @atomic_load_umin16(i16* %foo) { + %val = atomicrmw umin i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_umin32 +; CHECK: call #__sync_fetch_and_umin_4 +define i32 @atomic_load_umin32(i32* %foo) { + %val = atomicrmw umin i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_umin64 +; CHECK: call #__sync_fetch_and_umin_8 +define i64 @atomic_load_umin64(i64* %foo) { + %val = atomicrmw umin i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/load_xor.ll b/llvm/test/CodeGen/MSP430/atomics/load_xor.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/load_xor.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_load_xor8 +; CHECK: call #__sync_fetch_and_xor_1 +define i8 @atomic_load_xor8(i8* %foo) { + %val = atomicrmw xor i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_load_xor16 +; CHECK: call #__sync_fetch_and_xor_2 +define i16 @atomic_load_xor16(i16* %foo) { + %val = atomicrmw xor i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_load_xor32 +; CHECK: call #__sync_fetch_and_xor_4 +define i32 @atomic_load_xor32(i32* %foo) { + %val = atomicrmw xor i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_load_xor64 +; CHECK: call #__sync_fetch_and_xor_8 +define i64 @atomic_load_xor64(i64* %foo) { + %val = atomicrmw xor i64* %foo, i64 13 seq_cst + ret i64 %val +} diff --git a/llvm/test/CodeGen/MSP430/atomics/store.ll b/llvm/test/CodeGen/MSP430/atomics/store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/store.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_store8 +; CHECK: call #__sync_lock_test_and_set_1 +define void @atomic_store8(i8* %foo) { + store atomic i8 1, i8* %foo unordered, align 1 + ret void +} + +; CHECK-LABEL: atomic_store16 +; CHECK: call #__sync_lock_test_and_set_2 +define void @atomic_store16(i16* %foo) { + store atomic i16 1, i16* %foo unordered, align 2 + ret void +} + +; CHECK-LABEL: atomic_store32 +; CHECK: call #__sync_lock_test_and_set_4 +define void @atomic_store32(i32* %foo) { + store atomic i32 1, i32* %foo unordered, align 4 + ret void +} + +; CHECK-LABEL: atomic_store64 +; CHECK: call #__sync_lock_test_and_set_8 +define void @atomic_store64(i64* %foo) { + store atomic i64 1, i64* %foo unordered, align 8 + ret void +} diff --git a/llvm/test/CodeGen/MSP430/atomics/swap.ll b/llvm/test/CodeGen/MSP430/atomics/swap.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/MSP430/atomics/swap.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=msp430 < %s | FileCheck %s + +; CHECK-LABEL: atomic_swap8 +; CHECK: call #__sync_lock_test_and_set_1 +define i8 @atomic_swap8(i8* %foo) { + %val = atomicrmw xchg i8* %foo, i8 13 seq_cst + ret i8 %val +} + +; CHECK-LABEL: atomic_swap16 +; CHECK: call #__sync_lock_test_and_set_2 +define i16 @atomic_swap16(i16* %foo) { + %val = atomicrmw xchg i16* %foo, i16 13 seq_cst + ret i16 %val +} + +; CHECK-LABEL: atomic_swap32 +; CHECK: call #__sync_lock_test_and_set_4 +define i32 @atomic_swap32(i32* %foo) { + %val = atomicrmw xchg i32* %foo, i32 13 seq_cst + ret i32 %val +} + +; CHECK-LABEL: atomic_swap64 +; CHECK: call #__sync_lock_test_and_set_8 +define i64 @atomic_swap64(i64* %foo) { + %val = atomicrmw xchg i64* %foo, i64 13 seq_cst + ret i64 %val +}