diff --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c --- a/compiler-rt/lib/builtins/atomic.c +++ b/compiler-rt/lib/builtins/atomic.c @@ -23,6 +23,8 @@ // //===----------------------------------------------------------------------===// +#include +#include #include #include @@ -88,14 +90,14 @@ typedef _Atomic(uintptr_t) Lock; /// Unlock a lock. This is a release operation. __inline static void unlock(Lock *l) { - __c11_atomic_store(l, 0, __ATOMIC_RELEASE); + atomic_store_explicit(l, 0, memory_order_release); } /// Locks a lock. In the current implementation, this is potentially /// unbounded in the contended case. __inline static void lock(Lock *l) { uintptr_t old = 0; - while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE, - __ATOMIC_RELAXED)) + while (!atomic_compare_exchange_weak_explicit( + l, &old, 1, memory_order_acquire, memory_order_relaxed)) old = 0; } /// locks for atomic operations @@ -122,10 +124,10 @@ /// Macros for determining whether a size is lock free. Clang can not yet /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are /// not lock free. -#define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1) -#define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2) -#define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4) -#define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8) +#define IS_LOCK_FREE_1 atomic_is_lock_free((uint8_t *)NULL) +#define IS_LOCK_FREE_2 atomic_is_lock_free((uint16_t *)NULL) +#define IS_LOCK_FREE_4 atomic_is_lock_free((uint32_t *)NULL) +#define IS_LOCK_FREE_8 atomic_is_lock_free((uint64_t *)NULL) #define IS_LOCK_FREE_16 0 /// Macro that calls the compiler-generated lock-free versions of functions @@ -164,9 +166,10 @@ /// An atomic load operation. This is atomic with respect to the source /// pointer only. -void __atomic_load_c(int size, void *src, void *dest, int model) { +void __atomic_load_c(int size, void *src, void *dest, memory_order model); +void __atomic_load_c(int size, void *src, void *dest, memory_order model) { #define LOCK_FREE_ACTION(type) \ - *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \ + *((type *)dest) = atomic_load_explicit((_Atomic(type) *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -178,9 +181,10 @@ /// An atomic store operation. This is atomic with respect to the destination /// pointer only. -void __atomic_store_c(int size, void *dest, void *src, int model) { +void __atomic_store_c(int size, void *dest, void *src, memory_order model); +void __atomic_store_c(int size, void *dest, void *src, memory_order model) { #define LOCK_FREE_ACTION(type) \ - __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \ + atomic_store_explicit((_Atomic(type) *)dest, *(type *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -195,10 +199,12 @@ /// they are not, then this stores the current value from *ptr in *expected. /// /// This function returns 1 if the exchange takes place or 0 if it fails. +int __atomic_compare_exchange_c(int size, void *ptr, void *expected, + void *desired, int success, int failure); int __atomic_compare_exchange_c(int size, void *ptr, void *expected, void *desired, int success, int failure) { #define LOCK_FREE_ACTION(type) \ - return __c11_atomic_compare_exchange_strong( \ + return atomic_compare_exchange_strong_explicit( \ (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \ failure) LOCK_FREE_CASES(); @@ -217,10 +223,13 @@ /// Performs an atomic exchange operation between two pointers. This is atomic /// with respect to the target address. -void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) { +void __atomic_exchange_c(int size, void *ptr, void *val, void *old, + memory_order model); +void __atomic_exchange_c(int size, void *ptr, void *val, void *old, + memory_order model) { #define LOCK_FREE_ACTION(type) \ *(type *)old = \ - __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \ + atomic_exchange_explicit((_Atomic(type) *)ptr, *(type *)val, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -251,9 +260,10 @@ #endif #define OPTIMISED_CASE(n, lockfree, type) \ - type __atomic_load_##n(type *src, int model) { \ + type __atomic_load_##n(type *src, memory_order model); \ + type __atomic_load_##n(type *src, memory_order model) { \ if (lockfree) \ - return __c11_atomic_load((_Atomic(type) *)src, model); \ + return atomic_load_explicit((_Atomic(type) *)src, model); \ Lock *l = lock_for_pointer(src); \ lock(l); \ type val = *src; \ @@ -264,9 +274,10 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ - void __atomic_store_##n(type *dest, type val, int model) { \ + void __atomic_store_##n(type *dest, type val, memory_order model); \ + void __atomic_store_##n(type *dest, type val, memory_order model) { \ if (lockfree) { \ - __c11_atomic_store((_Atomic(type) *)dest, val, model); \ + atomic_store_explicit((_Atomic(type) *)dest, val, model); \ return; \ } \ Lock *l = lock_for_pointer(dest); \ @@ -279,9 +290,10 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ - type __atomic_exchange_##n(type *dest, type val, int model) { \ + type __atomic_exchange_##n(type *dest, type val, memory_order model); \ + type __atomic_exchange_##n(type *dest, type val, memory_order model) { \ if (lockfree) \ - return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \ + return atomic_exchange_explicit((_Atomic(type) *)dest, val, model); \ Lock *l = lock_for_pointer(dest); \ lock(l); \ type tmp = *dest; \ @@ -293,11 +305,20 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ - int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ - int success, int failure) { \ - if (lockfree) \ - return __c11_atomic_compare_exchange_strong( \ - (_Atomic(type) *)ptr, expected, desired, success, failure); \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + bool weak, memory_order success, \ + memory_order failure); \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + bool weak, memory_order success, \ + memory_order failure) { \ + if (lockfree) { \ + if (weak) \ + return atomic_compare_exchange_weak_explicit( \ + (_Atomic(type) *)ptr, expected, desired, success, failure); \ + else \ + return atomic_compare_exchange_strong_explicit( \ + (_Atomic(type) *)ptr, expected, desired, success, failure); \ + } \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ if (*ptr == *expected) { \ @@ -316,9 +337,13 @@ // Atomic read-modify-write operations for integers of various sizes. //////////////////////////////////////////////////////////////////////////////// #define ATOMIC_RMW(n, lockfree, type, opname, op) \ - type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \ + type __atomic_fetch_##opname##_##n(_Atomic(type) * ptr, type val, \ + memory_order model); \ + type __atomic_fetch_##opname##_##n(_Atomic(type) * ptr, type val, \ + memory_order model) { \ if (lockfree) \ - return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \ + return atomic_fetch_##opname##_explicit((_Atomic(type) *)ptr, val, \ + model); \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ type tmp = *ptr; \