Index: compiler-rt/lib/builtins/atomic.c =================================================================== --- compiler-rt/lib/builtins/atomic.c +++ compiler-rt/lib/builtins/atomic.c @@ -23,6 +23,7 @@ // //===----------------------------------------------------------------------===// +#include #include #include @@ -85,17 +86,17 @@ static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0 #else -typedef _Atomic(uintptr_t) Lock; +typedef uintptr_t Lock; /// Unlock a lock. This is a release operation. __inline static void unlock(Lock *l) { - __c11_atomic_store(l, 0, __ATOMIC_RELEASE); + __atomic_store_n(l, 0, __ATOMIC_RELEASE); } /// Locks a lock. In the current implementation, this is potentially /// unbounded in the contended case. __inline static void lock(Lock *l) { uintptr_t old = 0; - while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE, - __ATOMIC_RELAXED)) + while (!__atomic_compare_exchange_n(l, &old, 1, 1, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) old = 0; } /// locks for atomic operations @@ -122,10 +123,11 @@ /// Macros for determining whether a size is lock free. Clang can not yet /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are /// not lock free. -#define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1) -#define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2) -#define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4) -#define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8) +#define __IS_LOCK_FREE(x) __atomic_is_lock_free(sizeof(*(x)), (x)) +#define IS_LOCK_FREE_1 __IS_LOCK_FREE((uint8_t *)NULL) +#define IS_LOCK_FREE_2 __IS_LOCK_FREE((uint16_t *)NULL) +#define IS_LOCK_FREE_4 __IS_LOCK_FREE((uint32_t *)NULL) +#define IS_LOCK_FREE_8 __IS_LOCK_FREE((uint64_t *)NULL) #define IS_LOCK_FREE_16 0 /// Macro that calls the compiler-generated lock-free versions of functions @@ -164,9 +166,10 @@ /// An atomic load operation. This is atomic with respect to the source /// pointer only. +void __atomic_load_c(int size, void *src, void *dest, int model); void __atomic_load_c(int size, void *src, void *dest, int model) { #define LOCK_FREE_ACTION(type) \ - *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \ + *((type *)dest) = __atomic_load_n((type *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -178,9 +181,10 @@ /// An atomic store operation. This is atomic with respect to the destination /// pointer only. +void __atomic_store_c(int size, void *dest, void *src, int model); void __atomic_store_c(int size, void *dest, void *src, int model) { #define LOCK_FREE_ACTION(type) \ - __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \ + __atomic_store_n((type *)dest, *(type *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -195,12 +199,13 @@ /// they are not, then this stores the current value from *ptr in *expected. /// /// This function returns 1 if the exchange takes place or 0 if it fails. +int __atomic_compare_exchange_c(int size, void *ptr, void *expected, + void *desired, int success, int failure); int __atomic_compare_exchange_c(int size, void *ptr, void *expected, void *desired, int success, int failure) { #define LOCK_FREE_ACTION(type) \ - return __c11_atomic_compare_exchange_strong( \ - (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \ - failure) + return __atomic_compare_exchange_n((type *)ptr, (type *)expected, \ + *(type *)desired, 0, success, failure) LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION Lock *l = lock_for_pointer(ptr); @@ -217,10 +222,10 @@ /// Performs an atomic exchange operation between two pointers. This is atomic /// with respect to the target address. +void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model); void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) { #define LOCK_FREE_ACTION(type) \ - *(type *)old = \ - __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \ + *(type *)old = __atomic_exchange_n((type *)ptr, *(type *)val, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -251,9 +256,10 @@ #endif #define OPTIMISED_CASE(n, lockfree, type) \ + type __atomic_load_##n(type *src, int model); \ type __atomic_load_##n(type *src, int model) { \ if (lockfree) \ - return __c11_atomic_load((_Atomic(type) *)src, model); \ + return __atomic_load_n((type *)src, model); \ Lock *l = lock_for_pointer(src); \ lock(l); \ type val = *src; \ @@ -264,9 +270,10 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ + void __atomic_store_##n(type *dest, type val, int model); \ void __atomic_store_##n(type *dest, type val, int model) { \ if (lockfree) { \ - __c11_atomic_store((_Atomic(type) *)dest, val, model); \ + __atomic_store_n(dest, val, model); \ return; \ } \ Lock *l = lock_for_pointer(dest); \ @@ -279,9 +286,10 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ + type __atomic_exchange_##n(type *dest, type val, int model); \ type __atomic_exchange_##n(type *dest, type val, int model) { \ if (lockfree) \ - return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \ + return __atomic_exchange_n(dest, val, model); \ Lock *l = lock_for_pointer(dest); \ lock(l); \ type tmp = *dest; \ @@ -293,11 +301,13 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ - int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ - int success, int failure) { \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + int success, int failure); \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + int success, int failure) { \ if (lockfree) \ - return __c11_atomic_compare_exchange_strong( \ - (_Atomic(type) *)ptr, expected, desired, success, failure); \ + return __atomic_compare_exchange_n(ptr, expected, desired, 0, success, \ + failure); \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ if (*ptr == *expected) { \ @@ -316,9 +326,10 @@ // Atomic read-modify-write operations for integers of various sizes. //////////////////////////////////////////////////////////////////////////////// #define ATOMIC_RMW(n, lockfree, type, opname, op) \ + type __atomic_fetch_##opname##_##n(type *ptr, type val, int model); \ type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \ if (lockfree) \ - return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \ + return __atomic_fetch_##opname(ptr, val, model); \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ type tmp = *ptr; \