Index: libcxx/include/atomic =================================================================== --- libcxx/include/atomic +++ libcxx/include/atomic @@ -552,7 +552,9 @@ #ifdef _LIBCPP_HAS_NO_THREADS #error is not supported on this single threaded system #endif -#if !defined(_LIBCPP_HAS_C_ATOMIC_IMP) && !defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) +#if !defined(_LIBCPP_HAS_C_ATOMIC_IMP) && \ + !defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) && \ + !defined(_LIBCPP_HAS_EXTERNAL_ATOMIC_IMP) #error is not implemented #endif #ifdef kill_dependency @@ -577,6 +579,15 @@ _LIBCPP_BEGIN_NAMESPACE_STD +#ifndef _LIBCPP_ATOMIC_FLAG_TYPE +#define _LIBCPP_ATOMIC_FLAG_TYPE bool +#endif + +#ifndef _LIBCPP_ATOMIC_SCOPE_DEFAULT +struct __cxx_atomic_scope { }; +#define _LIBCPP_ATOMIC_SCOPE_DEFAULT __cxx_atomic_scope +#endif + typedef enum memory_order { memory_order_relaxed, memory_order_consume, memory_order_acquire, @@ -584,9 +595,9 @@ } memory_order; #if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) -namespace __gcc_atomic { + template -struct __gcc_atomic_t { +struct __cxx_atomic_type { #if _GNUC_VER >= 501 static_assert(is_trivially_copyable<_Tp>::value, @@ -595,15 +606,17 @@ _LIBCPP_INLINE_VISIBILITY #ifndef _LIBCPP_CXX03_LANG - __gcc_atomic_t() _NOEXCEPT = default; + __cxx_atomic_type() _NOEXCEPT = default; #else - __gcc_atomic_t() _NOEXCEPT : __a_value() {} + __cxx_atomic_type() _NOEXCEPT : __a_value() {} #endif // _LIBCPP_CXX03_LANG - _LIBCPP_CONSTEXPR explicit __gcc_atomic_t(_Tp value) _NOEXCEPT + _LIBCPP_CONSTEXPR explicit __cxx_atomic_type(_Tp value) _NOEXCEPT : __a_value(value) {} _Tp __a_value; }; -#define _Atomic(x) __gcc_atomic::__gcc_atomic_t + +template +using __cxx_atomic_base_impl = __cxx_atomic_type<_Tp>; template _Tp __create(); @@ -639,22 +652,20 @@ __ATOMIC_CONSUME)))); } -} // namespace __gcc_atomic - template static inline typename enable_if< - __gcc_atomic::__can_assign::value>::type -__c11_atomic_init(volatile _Atomic(_Tp)* __a, _Tp __val) { + __cxx_atomic_type::__can_assign*, _Tp>::value>::type +__cxx_atomic_init(volatile __cxx_atomic_type<_Tp>* __a, _Tp __val) { __a->__a_value = __val; } template static inline typename enable_if< - !__gcc_atomic::__can_assign::value && - __gcc_atomic::__can_assign< _Atomic(_Tp)*, _Tp>::value>::type -__c11_atomic_init(volatile _Atomic(_Tp)* __a, _Tp __val) { + !__cxx_atomic_type::__can_assign*, _Tp>::value && + __cxx_atomic_type::__can_assign< __cxx_atomic_type<_Tp>*, _Tp>::value>::type +__cxx_atomic_init(volatile __cxx_atomic_type<_Tp>* __a, _Tp __val) { // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because // the default operator= in an object is not volatile, a byte-by-byte copy // is required. @@ -667,105 +678,105 @@ } template -static inline void __c11_atomic_init(_Atomic(_Tp)* __a, _Tp __val) { +static inline void __cxx_atomic_init(__cxx_atomic_type<_Tp>* __a, _Tp __val) { __a->__a_value = __val; } -static inline void __c11_atomic_thread_fence(memory_order __order) { - __atomic_thread_fence(__gcc_atomic::__to_gcc_order(__order)); +static inline void __cxx_atomic_thread_fence(memory_order __order) { + __atomic_thread_fence(__to_gcc_order(__order)); } -static inline void __c11_atomic_signal_fence(memory_order __order) { - __atomic_signal_fence(__gcc_atomic::__to_gcc_order(__order)); +static inline void __cxx_atomic_signal_fence(memory_order __order) { + __atomic_signal_fence(__to_gcc_order(__order)); } template -static inline void __c11_atomic_store(volatile _Atomic(_Tp)* __a, _Tp __val, +static inline void __cxx_atomic_store(volatile __cxx_atomic_type<_Tp>* __a, _Tp __val, memory_order __order) { - return __atomic_store(&__a->__a_value, &__val, - __gcc_atomic::__to_gcc_order(__order)); + __atomic_store(&__a->__a_value, &__val, + __to_gcc_order(__order)); } template -static inline void __c11_atomic_store(_Atomic(_Tp)* __a, _Tp __val, +static inline void __cxx_atomic_store(__cxx_atomic_type<_Tp>* __a, _Tp __val, memory_order __order) { __atomic_store(&__a->__a_value, &__val, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_load(const volatile _Atomic(_Tp)* __a, +static inline _Tp __cxx_atomic_load(const volatile __cxx_atomic_type<_Tp>* __a, memory_order __order) { _Tp __ret; __atomic_load(&__a->__a_value, &__ret, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); return __ret; } template -static inline _Tp __c11_atomic_load(const _Atomic(_Tp)* __a, memory_order __order) { +static inline _Tp __cxx_atomic_load(const __cxx_atomic_type<_Tp>* __a, memory_order __order) { _Tp __ret; __atomic_load(&__a->__a_value, &__ret, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); return __ret; } template -static inline _Tp __c11_atomic_exchange(volatile _Atomic(_Tp)* __a, +static inline _Tp __cxx_atomic_exchange(volatile __cxx_atomic_type<_Tp>* __a, _Tp __value, memory_order __order) { _Tp __ret; __atomic_exchange(&__a->__a_value, &__value, &__ret, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); return __ret; } template -static inline _Tp __c11_atomic_exchange(_Atomic(_Tp)* __a, _Tp __value, +static inline _Tp __cxx_atomic_exchange(__cxx_atomic_type<_Tp>* __a, _Tp __value, memory_order __order) { _Tp __ret; __atomic_exchange(&__a->__a_value, &__value, &__ret, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); return __ret; } template -static inline bool __c11_atomic_compare_exchange_strong( - volatile _Atomic(_Tp)* __a, _Tp* __expected, _Tp __value, +static inline bool __cxx_atomic_compare_exchange_strong( + volatile __cxx_atomic_type<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) { return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, false, - __gcc_atomic::__to_gcc_order(__success), - __gcc_atomic::__to_gcc_failure_order(__failure)); + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); } template -static inline bool __c11_atomic_compare_exchange_strong( - _Atomic(_Tp)* __a, _Tp* __expected, _Tp __value, memory_order __success, +static inline bool __cxx_atomic_compare_exchange_strong( + __cxx_atomic_type<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) { return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, false, - __gcc_atomic::__to_gcc_order(__success), - __gcc_atomic::__to_gcc_failure_order(__failure)); + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); } template -static inline bool __c11_atomic_compare_exchange_weak( - volatile _Atomic(_Tp)* __a, _Tp* __expected, _Tp __value, +static inline bool __cxx_atomic_compare_exchange_weak( + volatile __cxx_atomic_type<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) { return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, true, - __gcc_atomic::__to_gcc_order(__success), - __gcc_atomic::__to_gcc_failure_order(__failure)); + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); } template -static inline bool __c11_atomic_compare_exchange_weak( - _Atomic(_Tp)* __a, _Tp* __expected, _Tp __value, memory_order __success, +static inline bool __cxx_atomic_compare_exchange_weak( + __cxx_atomic_type<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) { return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, true, - __gcc_atomic::__to_gcc_order(__success), - __gcc_atomic::__to_gcc_failure_order(__failure)); + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); } template @@ -782,74 +793,268 @@ struct __skip_amt<_Tp[n]> { }; template -static inline _Tp __c11_atomic_fetch_add(volatile _Atomic(_Tp)* __a, - _Td __delta, memory_order __order) { +static inline _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_type<_Tp>* __a, + _Td __delta, memory_order __order) { return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_add(_Atomic(_Tp)* __a, _Td __delta, - memory_order __order) { +static inline _Tp __cxx_atomic_fetch_add(__cxx_atomic_type<_Tp>* __a, _Td __delta, + memory_order __order) { return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_sub(volatile _Atomic(_Tp)* __a, - _Td __delta, memory_order __order) { +static inline _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_type<_Tp>* __a, + _Td __delta, memory_order __order) { return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_sub(_Atomic(_Tp)* __a, _Td __delta, - memory_order __order) { +static inline _Tp __cxx_atomic_fetch_sub(__cxx_atomic_type<_Tp>* __a, _Td __delta, + memory_order __order) { return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_and(volatile _Atomic(_Tp)* __a, - _Tp __pattern, memory_order __order) { +static inline _Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_type<_Tp>* __a, + _Tp __pattern, memory_order __order) { return __atomic_fetch_and(&__a->__a_value, __pattern, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_and(_Atomic(_Tp)* __a, - _Tp __pattern, memory_order __order) { +static inline _Tp __cxx_atomic_fetch_and(__cxx_atomic_type<_Tp>* __a, + _Tp __pattern, memory_order __order) { return __atomic_fetch_and(&__a->__a_value, __pattern, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_or(volatile _Atomic(_Tp)* __a, - _Tp __pattern, memory_order __order) { +static inline _Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_type<_Tp>* __a, + _Tp __pattern, memory_order __order) { return __atomic_fetch_or(&__a->__a_value, __pattern, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_or(_Atomic(_Tp)* __a, _Tp __pattern, - memory_order __order) { +static inline _Tp __cxx_atomic_fetch_or(__cxx_atomic_type<_Tp>* __a, _Tp __pattern, + memory_order __order) { return __atomic_fetch_or(&__a->__a_value, __pattern, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_xor(volatile _Atomic(_Tp)* __a, - _Tp __pattern, memory_order __order) { +static inline _Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_type<_Tp>* __a, + _Tp __pattern, memory_order __order) { return __atomic_fetch_xor(&__a->__a_value, __pattern, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); } template -static inline _Tp __c11_atomic_fetch_xor(_Atomic(_Tp)* __a, _Tp __pattern, - memory_order __order) { +static inline _Tp __cxx_atomic_fetch_xor(__cxx_atomic_type<_Tp>* __a, _Tp __pattern, + memory_order __order) { return __atomic_fetch_xor(&__a->__a_value, __pattern, - __gcc_atomic::__to_gcc_order(__order)); + __to_gcc_order(__order)); +} + +#define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0) + +#elif defined(_LIBCPP_HAS_C_ATOMIC_IMP) + +template +struct __cxx_atomic_type { + +#if _GNUC_VER >= 501 + static_assert(is_trivially_copyable<_Tp>::value, + "std::atomic requires that 'Tp' be a trivially copyable type"); +#endif + + _LIBCPP_INLINE_VISIBILITY +#ifndef _LIBCPP_CXX03_LANG + __cxx_atomic_type() _NOEXCEPT = default; +#else + __cxx_atomic_type() _NOEXCEPT : __a_value() {} +#endif // _LIBCPP_CXX03_LANG + _LIBCPP_CONSTEXPR explicit __cxx_atomic_type(_Tp value) _NOEXCEPT + : __a_value(value) {} + _Atomic(_Tp) __a_value; +}; + +template +using __cxx_atomic_base_impl = __cxx_atomic_type<_Tp>; + +#define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s) + +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_thread_fence(int __order) { + __c11_atomic_thread_fence(__order); +} + +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_signal_fence(int __order) { + __c11_atomic_signal_fence(__order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_init(__cxx_atomic_type<_Tp> volatile* __a, _Tp __val) { + __c11_atomic_init(&__a->__a_value, __val); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_init(__cxx_atomic_type<_Tp> * __a, _Tp __val) { + __c11_atomic_init(&__a->__a_value, __val); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_store(__cxx_atomic_type<_Tp> volatile* __a, _Tp __val, int __order) { + __c11_atomic_store(&__a->__a_value, __val, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_store(__cxx_atomic_type<_Tp> * __a, _Tp __val, int __order) { + __c11_atomic_store(&__a->__a_value, __val, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_load(__cxx_atomic_type<_Tp> const volatile* __a, int __order) { + using __ptr_type = typename remove_const__a_value)>::type*; + return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_load(__cxx_atomic_type<_Tp> const* __a, int __order) { + using __ptr_type = typename remove_const__a_value)>::type*; + return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_exchange(__cxx_atomic_type<_Tp> volatile* __a, _Tp __value, int __order) { + return __c11_atomic_exchange(&__a->__a_value, __value, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_exchange(__cxx_atomic_type<_Tp> * __a, _Tp __value, int __order) { + return __c11_atomic_exchange(&__a->__a_value, __value, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_type<_Tp> volatile* __a, _Tp* __expected, _Tp __value, int __success, int __failure) { + return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, __success, __failure); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_type<_Tp> * __a, _Tp* __expected, _Tp __value, int __success, int __failure) { + return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, __success, __failure); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_type<_Tp> volatile* __a, _Tp* __expected, _Tp __value, int __success, int __failure) { + return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, __success, __failure); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_type<_Tp> * __a, _Tp* __expected, _Tp __value, int __success, int __failure) { + return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, __success, __failure); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_add(__cxx_atomic_type<_Tp> volatile* __a, _Tp __delta, int __order) { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_add(__cxx_atomic_type<_Tp> * __a, _Tp __delta, int __order) { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_type<_Tp*> volatile* __a, ptrdiff_t __delta, int __order) { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_type<_Tp*> * __a, ptrdiff_t __delta, int __order) { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_type<_Tp> volatile* __a, _Tp __delta, int __order) { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_type<_Tp> * __a, _Tp __delta, int __order) { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_type<_Tp*> volatile* __a, ptrdiff_t __delta, int __order) { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, __order); } +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_type<_Tp*> * __a, ptrdiff_t __delta, int __order) { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_and(__cxx_atomic_type<_Tp> volatile* __a, _Tp __pattern, int __order) { + return __c11_atomic_fetch_and(&__a->__a_value, __pattern, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_and(__cxx_atomic_type<_Tp> * __a, _Tp __pattern, int __order) { + return __c11_atomic_fetch_and(&__a->__a_value, __pattern, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_or(__cxx_atomic_type<_Tp> volatile* __a, _Tp __pattern, int __order) { + return __c11_atomic_fetch_or(&__a->__a_value, __pattern, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_or(__cxx_atomic_type<_Tp> * __a, _Tp __pattern, int __order) { + return __c11_atomic_fetch_or(&__a->__a_value, __pattern, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_xor(__cxx_atomic_type<_Tp> volatile* __a, _Tp __pattern, int __order) { + return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_xor(__cxx_atomic_type<_Tp> * __a, _Tp __pattern, int __order) { + return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, __order); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp* __cxx_atomic_fetch_add(_Atomic(_Tp*) volatile* __a, ptrdiff_t __delta, int __order) { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, __order); +} +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp* __cxx_atomic_fetch_add(_Atomic(_Tp*) * __a, ptrdiff_t __delta, int __order) { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, __order); +} + #endif // _LIBCPP_HAS_GCC_ATOMIC_IMP template @@ -871,7 +1076,7 @@ # define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE # define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE # define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE -#else +#elif defined(__cxx_atomic_type_BOOL_LOCK_FREE) # define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE # define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE # define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE @@ -884,12 +1089,173 @@ # define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE #endif +#if defined(_LIBCPP_FREESTANDING) && defined(__cpp_lib_atomic_is_always_lock_free) + +template +struct __cxx_atomic_lock_impl { + +#if _GNUC_VER >= 501 + static_assert(is_trivially_copyable<_Tp>::value, + "std::atomic requires that 'Tp' be a trivially copyable type"); +#endif + + _LIBCPP_INLINE_VISIBILITY + __cxx_atomic_lock_impl() _NOEXCEPT + : __a_value(), __a_lock(0) {} + _LIBCPP_INLINE_VISIBILITY + _LIBCPP_CONSTEXPR explicit __cxx_atomic_lock_impl(_Tp value) _NOEXCEPT + : __a_value(value), __a_lock(0) {} + + _Tp __a_value; + mutable __cxx_atomic_base_impl<_LIBCPP_ATOMIC_FLAG_TYPE, _Sco> __a_lock; + + template + _LIBCPP_INLINE_VISIBILITY inline void reader_section(_Function && __f) const volatile { + while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), __ATOMIC_ACQUIRE)); + __f(__a_value); + __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), __ATOMIC_RELEASE); + } + template + _LIBCPP_INLINE_VISIBILITY inline void writer_section(_Function && __f) volatile { + while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), __ATOMIC_ACQUIRE)); + __f(__a_value); + __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), __ATOMIC_RELEASE); + } +}; + +template +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val) { + __a->__a_value = __val; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, _Tp __val, int) { + __a->writer_section([&](_Tp volatile& __a_value){ __a_value = __val; }); +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, int) { + _Tp __val; + __a->reader_section([&](_Tp const volatile& __a_value){ __val = __a_value; }); + return __val; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, int) { + _Tp __value; + __a->writer_section([&](_Tp volatile& __a_value){ + _Tp __temp = __a_value; + __a_value = __value; + __value = __temp; + }); + return __value; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +bool __cxx_atomic_compare_exchange_strong(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp* __expected, _Tp __value, int, int) { + bool __ret; + __a->writer_section([&](_Tp volatile& __a_value){ + __ret = __a_value == *__expected; + if(__ret) + __a_value = __value; + else + *__expected = __a_value; }); + return __ret; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +bool __cxx_atomic_compare_exchange_weak(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp* __expected, _Tp __value, int, int) { + bool __ret; + __a->writer_section([&](_Tp volatile& __a_value){ + __ret = __a_value == *__expected; + if(__ret) + __a_value = __value; + else + *__expected = __a_value; }); + return __ret; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Td __delta, int) { + _Tp __old; + __a->writer_section([&](_Tp volatile& __a_value){ + __old = __a_value; + __a_value += __delta; }); + return __old; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Td __delta, int) { + _Tp __old; + __a->writer_section([&](_Tp volatile& __a_value){ + __old = __a_value; + __a_value -= __delta; }); + return __old; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, int) { + _Tp __old; + __a->writer_section([&](_Tp volatile& __a_value){ + __old = __a_value; + __a_value &= __pattern; }); + return __old; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, int) { + _Tp __old; + __a->writer_section([&](_Tp volatile& __a_value){ + __old = __a_value; + __a_value |= __pattern; }); + return __old; +} + +template +static _LIBCPP_INLINE_VISIBILITY inline +_Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp, _Sco>* __a, + _Tp __pattern, int) { + _Tp __old; + __a->writer_section([&](_Tp volatile& __a_value){ + __old = __a_value; + __a_value ^= __pattern; }); + return __old; +} + +template +using __cxx_atomic_impl = typename conditional<__atomic_always_lock_free(sizeof(_Tp), 0), + __cxx_atomic_base_impl<_Tp, _Sco>, + __cxx_atomic_lock_impl<_Tp, _Sco>>::type; + +#else + +template +using __cxx_atomic_impl = __cxx_atomic_base_impl<_Tp, _Sco>; + +#endif //_LIBCPP_FREESTANDING && __cpp_lib_atomic_is_always_lock_free + // general atomic -template ::value && !is_same<_Tp, bool>::value> +template ::value && !is_same<_Tp, bool>::value> struct __atomic_base // false { - mutable _Atomic(_Tp) __a_; + mutable __cxx_atomic_impl<_Tp, _Sco> __a_; #if defined(__cpp_lib_atomic_is_always_lock_free) static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0); @@ -897,78 +1263,72 @@ _LIBCPP_INLINE_VISIBILITY bool is_lock_free() const volatile _NOEXCEPT - { -#if defined(_LIBCPP_HAS_C_ATOMIC_IMP) - return __c11_atomic_is_lock_free(sizeof(_Tp)); -#else - return __atomic_is_lock_free(sizeof(_Tp), 0); -#endif - } + {return __cxx_atomic_is_lock_free(sizeof(_Tp));} _LIBCPP_INLINE_VISIBILITY bool is_lock_free() const _NOEXCEPT {return static_cast<__atomic_base const volatile*>(this)->is_lock_free();} _LIBCPP_INLINE_VISIBILITY void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) - {__c11_atomic_store(&__a_, __d, __m);} + {__cxx_atomic_store(&__a_, __d, __m);} _LIBCPP_INLINE_VISIBILITY void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) - {__c11_atomic_store(&__a_, __d, __m);} + {__cxx_atomic_store(&__a_, __d, __m);} _LIBCPP_INLINE_VISIBILITY _Tp load(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) - {return __c11_atomic_load(&__a_, __m);} + {return __cxx_atomic_load(&__a_, __m);} _LIBCPP_INLINE_VISIBILITY _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) - {return __c11_atomic_load(&__a_, __m);} + {return __cxx_atomic_load(&__a_, __m);} _LIBCPP_INLINE_VISIBILITY operator _Tp() const volatile _NOEXCEPT {return load();} _LIBCPP_INLINE_VISIBILITY operator _Tp() const _NOEXCEPT {return load();} _LIBCPP_INLINE_VISIBILITY _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_exchange(&__a_, __d, __m);} + {return __cxx_atomic_exchange(&__a_, __d, __m);} _LIBCPP_INLINE_VISIBILITY _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_exchange(&__a_, __d, __m);} + {return __cxx_atomic_exchange(&__a_, __d, __m);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - {return __c11_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - {return __c11_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) volatile _NOEXCEPT _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - {return __c11_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __s, memory_order __f) _NOEXCEPT _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) - {return __c11_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_weak(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} _LIBCPP_INLINE_VISIBILITY bool compare_exchange_strong(_Tp& __e, _Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} _LIBCPP_INLINE_VISIBILITY #ifndef _LIBCPP_CXX03_LANG @@ -992,52 +1352,56 @@ }; #if defined(__cpp_lib_atomic_is_always_lock_free) -template -_LIBCPP_CONSTEXPR bool __atomic_base<_Tp, __b>::is_always_lock_free; +template +_LIBCPP_CONSTEXPR bool __atomic_base<_Tp, _Sco, __b>::is_always_lock_free; #endif // atomic -template -struct __atomic_base<_Tp, true> - : public __atomic_base<_Tp, false> +template +struct __atomic_base<_Tp, _Sco, true> + : public __atomic_base<_Tp, _Sco, false> { - typedef __atomic_base<_Tp, false> __base; + typedef __atomic_base<_Tp, _Sco, false> __base; _LIBCPP_INLINE_VISIBILITY - __atomic_base() _NOEXCEPT _LIBCPP_DEFAULT +#ifndef _LIBCPP_CXX03_LANG + __atomic_base() _NOEXCEPT = default; +#else + __atomic_base() _NOEXCEPT : __base() {} +#endif // _LIBCPP_CXX03_LANG _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __base(__d) {} _LIBCPP_INLINE_VISIBILITY _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_fetch_add(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_fetch_add(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_fetch_sub(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_fetch_sub(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_fetch_and(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_fetch_and(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_fetch_or(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_fetch_or(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_fetch_xor(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_fetch_xor(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp operator++(int) volatile _NOEXCEPT {return fetch_add(_Tp(1));} @@ -1085,7 +1449,11 @@ { typedef __atomic_base<_Tp> __base; _LIBCPP_INLINE_VISIBILITY - atomic() _NOEXCEPT _LIBCPP_DEFAULT +#ifndef _LIBCPP_CXX03_LANG + atomic() _NOEXCEPT = default; +#else + atomic() _NOEXCEPT : __base() {} +#endif // _LIBCPP_CXX03_LANG _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR atomic(_Tp __d) _NOEXCEPT : __base(__d) {} @@ -1105,7 +1473,11 @@ { typedef __atomic_base<_Tp*> __base; _LIBCPP_INLINE_VISIBILITY - atomic() _NOEXCEPT _LIBCPP_DEFAULT +#ifndef _LIBCPP_CXX03_LANG + atomic() _NOEXCEPT = default; +#else + atomic() _NOEXCEPT : __base() {} +#endif // _LIBCPP_CXX03_LANG _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR atomic(_Tp* __d) _NOEXCEPT : __base(__d) {} @@ -1119,17 +1491,17 @@ _LIBCPP_INLINE_VISIBILITY _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_fetch_add(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_fetch_add(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_fetch_sub(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_fetch_sub(&this->__a_, __op, __m);} + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} _LIBCPP_INLINE_VISIBILITY _Tp* operator++(int) volatile _NOEXCEPT {return fetch_add(1);} @@ -1182,7 +1554,7 @@ void atomic_init(volatile atomic<_Tp>* __o, _Tp __d) _NOEXCEPT { - __c11_atomic_init(&__o->__a_, __d); + __cxx_atomic_init(&__o->__a_, __d); } template @@ -1190,7 +1562,7 @@ void atomic_init(atomic<_Tp>* __o, _Tp __d) _NOEXCEPT { - __c11_atomic_init(&__o->__a_, __d); + __cxx_atomic_init(&__o->__a_, __d); } // atomic_store @@ -1718,20 +2090,20 @@ typedef struct atomic_flag { - _Atomic(bool) __a_; + __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE, _LIBCPP_ATOMIC_SCOPE_DEFAULT> __a_; _LIBCPP_INLINE_VISIBILITY bool test_and_set(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {return __c11_atomic_exchange(&__a_, true, __m);} + {return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);} _LIBCPP_INLINE_VISIBILITY bool test_and_set(memory_order __m = memory_order_seq_cst) _NOEXCEPT - {return __c11_atomic_exchange(&__a_, true, __m);} + {return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);} _LIBCPP_INLINE_VISIBILITY void clear(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT - {__c11_atomic_store(&__a_, false, __m);} + {__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);} _LIBCPP_INLINE_VISIBILITY void clear(memory_order __m = memory_order_seq_cst) _NOEXCEPT - {__c11_atomic_store(&__a_, false, __m);} + {__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);} _LIBCPP_INLINE_VISIBILITY #ifndef _LIBCPP_CXX03_LANG @@ -1817,14 +2189,14 @@ void atomic_thread_fence(memory_order __m) _NOEXCEPT { - __c11_atomic_thread_fence(__m); + __cxx_atomic_thread_fence(__m); } inline _LIBCPP_INLINE_VISIBILITY void atomic_signal_fence(memory_order __m) _NOEXCEPT { - __c11_atomic_signal_fence(__m); + __cxx_atomic_signal_fence(__m); } // Atomics for standard typedef types