diff --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c --- a/compiler-rt/lib/builtins/atomic.c +++ b/compiler-rt/lib/builtins/atomic.c @@ -23,11 +23,98 @@ // //===----------------------------------------------------------------------===// +#include #include #include #include "assembly.h" +#ifdef __clang__ +#define __CLANG_ATOMICS +#elif defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) +#define __GCC_ATOMICS +#else +#error "your compiler does not support atomic.c" +#endif + +#if defined(__CLANG_ATOMICS) +#define ATOMIC_IS_LOCK_FREE(object) __c11_atomic_is_lock_free(sizeof(*(object))) +#define ATOMIC_STORE_EXPLICIT(object, desired, order) \ + __c11_atomic_store(object, desired, order) +#define ATOMIC_LOAD_EXPLICIT(object, order) __c11_atomic_load(object, order) +#define ATOMIC_COMPARE_EXCHANGE_WEAK_EXPLICIT(object, expected, desired, \ + success, failure) \ + __c11_atomic_compare_exchange_weak(object, expected, desired, success, \ + failure) +#define ATOMIC_COMPARE_EXCHANGE_STRONG_EXPLICIT(object, expected, desired, \ + success, failure) \ + __c11_atomic_compare_exchange_strong(object, expected, desired, success, \ + failure) +#define ATOMIC_EXCHANGE_EXPLICIT(object, desired, order) \ + __c11_atomic_exchange(object, desired, order) +#define ATOMIC_FETCH_ADD_EXPLICIT(object, operand, order) \ + __c11_atomic_fetch_add(object, operand, order) +#define ATOMIC_FETCH_AND_EXPLICIT(object, operand, order) \ + __c11_atomic_fetch_and(object, operand, order) +#define ATOMIC_FETCH_OR_EXPLICIT(object, operand, order) \ + __c11_atomic_fetch_or(object, operand, order) +#define ATOMIC_FETCH_SUB_EXPLICIT(object, operand, order) \ + __c11_atomic_fetch_sub(object, operand, order) +#define ATOMIC_FETCH_XOR_EXPLICIT(object, operand, order) \ + __c11_atomic_fetch_xor(object, operand, order) +#elif defined(__GCC_ATOMICS) +#define ATOMIC_IS_LOCK_FREE(object) \ + __atomic_is_lock_free(sizeof(*(object)), (object)) +#define ATOMIC_STORE_EXPLICIT(object, desired, order) \ + __extension__({ \ + __typeof__(object) __ptr = (object); \ + __typeof__(*object) __tmp = (desired); \ + __atomic_store(__ptr, &__tmp, (order)); \ + }) +#define ATOMIC_LOAD_EXPLICIT(object, order) \ + __extension__({ \ + __typeof__(object) __ptr = (object); \ + __typeof__(*object) __tmp; \ + __atomic_load(__ptr, &__tmp, (order)); \ + __tmp; \ + }) +#define ATOMIC_COMPARE_EXCHANGE_WEAK_EXPLICIT(object, expected, desired, \ + success, failure) \ + __extension__({ \ + __typeof__(object) __ptr = (object); \ + __typeof__(*object) __tmp = (desired); \ + __atomic_compare_exchange(__ptr, (expected), &__tmp, 1, (success), \ + (failure)); \ + }) +#define ATOMIC_COMPARE_EXCHANGE_STRONG_EXPLICIT(object, expected, desired, \ + success, failure) \ + __extension__({ \ + __typeof__(object) __ptr = (object); \ + __typeof__(*object) __tmp = (desired); \ + __atomic_compare_exchange(__ptr, (expected), &__tmp, 0, (success), \ + (failure)); \ + }) +#define ATOMIC_EXCHANGE_EXPLICIT(object, desired, order) \ + __extension__({ \ + __typeof__(object) __ptr = (object); \ + __typeof__(*object) __val = (desired); \ + __typeof__(*object) __tmp; \ + __atomic_exchange(__ptr, &__val, &__tmp, (order)); \ + __tmp; \ + }) +#define ATOMIC_FETCH_ADD_EXPLICIT(object, operand, order) \ + __atomic_fetch_add(object, operand, order) +#define ATOMIC_FETCH_AND_EXPLICIT(object, operand, order) \ + __atomic_fetch_and(object, operand, order) +#define ATOMIC_FETCH_OR_EXPLICIT(object, operand, order) \ + __atomic_fetch_or(object, operand, order) +#define ATOMIC_FETCH_SUB_EXPLICIT(object, operand, order) \ + __atomic_fetch_sub(object, operand, order) +#define ATOMIC_FETCH_XOR_EXPLICIT(object, operand, order) \ + __atomic_fetch_xor(object, operand, order) +#endif + // Clang objects if you redefine a builtin. This little hack allows us to // define a function with the same name as an intrinsic. #pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load) @@ -88,14 +175,14 @@ typedef _Atomic(uintptr_t) Lock; /// Unlock a lock. This is a release operation. __inline static void unlock(Lock *l) { - __c11_atomic_store(l, 0, __ATOMIC_RELEASE); + ATOMIC_STORE_EXPLICIT(l, 0, __ATOMIC_RELEASE); } /// Locks a lock. In the current implementation, this is potentially /// unbounded in the contended case. __inline static void lock(Lock *l) { uintptr_t old = 0; - while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE, - __ATOMIC_RELAXED)) + while (!ATOMIC_COMPARE_EXCHANGE_WEAK_EXPLICIT(l, &old, 1, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) old = 0; } /// locks for atomic operations @@ -122,10 +209,10 @@ /// Macros for determining whether a size is lock free. Clang can not yet /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are /// not lock free. -#define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1) -#define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2) -#define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4) -#define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8) +#define IS_LOCK_FREE_1 ATOMIC_IS_LOCK_FREE((uint8_t *)NULL) +#define IS_LOCK_FREE_2 ATOMIC_IS_LOCK_FREE((uint16_t *)NULL) +#define IS_LOCK_FREE_4 ATOMIC_IS_LOCK_FREE((uint32_t *)NULL) +#define IS_LOCK_FREE_8 ATOMIC_IS_LOCK_FREE((uint64_t *)NULL) #define IS_LOCK_FREE_16 0 /// Macro that calls the compiler-generated lock-free versions of functions @@ -164,9 +251,10 @@ /// An atomic load operation. This is atomic with respect to the source /// pointer only. +void __atomic_load_c(int size, void *src, void *dest, int model); void __atomic_load_c(int size, void *src, void *dest, int model) { #define LOCK_FREE_ACTION(type) \ - *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \ + *((type *)dest) = ATOMIC_LOAD_EXPLICIT((_Atomic(type) *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -178,9 +266,10 @@ /// An atomic store operation. This is atomic with respect to the destination /// pointer only. +void __atomic_store_c(int size, void *dest, void *src, int model); void __atomic_store_c(int size, void *dest, void *src, int model) { #define LOCK_FREE_ACTION(type) \ - __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \ + ATOMIC_STORE_EXPLICIT((_Atomic(type) *)dest, *(type *)src, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -195,10 +284,12 @@ /// they are not, then this stores the current value from *ptr in *expected. /// /// This function returns 1 if the exchange takes place or 0 if it fails. +int __atomic_compare_exchange_c(int size, void *ptr, void *expected, + void *desired, int success, int failure); int __atomic_compare_exchange_c(int size, void *ptr, void *expected, void *desired, int success, int failure) { #define LOCK_FREE_ACTION(type) \ - return __c11_atomic_compare_exchange_strong( \ + return ATOMIC_COMPARE_EXCHANGE_STRONG_EXPLICIT( \ (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \ failure) LOCK_FREE_CASES(); @@ -217,10 +308,11 @@ /// Performs an atomic exchange operation between two pointers. This is atomic /// with respect to the target address. +void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model); void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) { #define LOCK_FREE_ACTION(type) \ *(type *)old = \ - __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \ + ATOMIC_EXCHANGE_EXPLICIT((_Atomic(type) *)ptr, *(type *)val, model); \ return; LOCK_FREE_CASES(); #undef LOCK_FREE_ACTION @@ -251,9 +343,10 @@ #endif #define OPTIMISED_CASE(n, lockfree, type) \ + type __atomic_load_##n(type *src, int model); \ type __atomic_load_##n(type *src, int model) { \ if (lockfree) \ - return __c11_atomic_load((_Atomic(type) *)src, model); \ + return ATOMIC_LOAD_EXPLICIT((_Atomic(type) *)src, model); \ Lock *l = lock_for_pointer(src); \ lock(l); \ type val = *src; \ @@ -264,9 +357,10 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ + void __atomic_store_##n(type *dest, type val, int model); \ void __atomic_store_##n(type *dest, type val, int model) { \ if (lockfree) { \ - __c11_atomic_store((_Atomic(type) *)dest, val, model); \ + ATOMIC_STORE_EXPLICIT((_Atomic(type) *)dest, val, model); \ return; \ } \ Lock *l = lock_for_pointer(dest); \ @@ -279,9 +373,10 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ + type __atomic_exchange_##n(type *dest, type val, int model); \ type __atomic_exchange_##n(type *dest, type val, int model) { \ if (lockfree) \ - return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \ + return ATOMIC_EXCHANGE_EXPLICIT((_Atomic(type) *)dest, val, model); \ Lock *l = lock_for_pointer(dest); \ lock(l); \ type tmp = *dest; \ @@ -293,10 +388,12 @@ #undef OPTIMISED_CASE #define OPTIMISED_CASE(n, lockfree, type) \ - int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ - int success, int failure) { \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + int success, int failure); \ + bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \ + int success, int failure) { \ if (lockfree) \ - return __c11_atomic_compare_exchange_strong( \ + return ATOMIC_COMPARE_EXCHANGE_STRONG_EXPLICIT( \ (_Atomic(type) *)ptr, expected, desired, success, failure); \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ @@ -316,9 +413,11 @@ // Atomic read-modify-write operations for integers of various sizes. //////////////////////////////////////////////////////////////////////////////// #define ATOMIC_RMW(n, lockfree, type, opname, op) \ + type __atomic_fetch_##opname##_##n(type *ptr, type val, int model); \ type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \ if (lockfree) \ - return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \ + return ATOMIC_FETCH_##opname##_EXPLICIT((_Atomic(type) *)ptr, val, \ + model); \ Lock *l = lock_for_pointer(ptr); \ lock(l); \ type tmp = *ptr; \ @@ -327,18 +426,18 @@ return tmp; \ } -#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +) +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, ADD, +) OPTIMISED_CASES #undef OPTIMISED_CASE -#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -) +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, SUB, -) OPTIMISED_CASES #undef OPTIMISED_CASE -#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &) +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, AND, &) OPTIMISED_CASES #undef OPTIMISED_CASE -#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |) +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, OR, |) OPTIMISED_CASES #undef OPTIMISED_CASE -#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^) +#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, XOR, ^) OPTIMISED_CASES #undef OPTIMISED_CASE