diff --git a/libcxx/include/atomic b/libcxx/include/atomic --- a/libcxx/include/atomic +++ b/libcxx/include/atomic @@ -135,6 +135,31 @@ T operator=(T) noexcept; }; +template +struct atomic_ref +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + bool is_lock_free() const noexcept; + void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; + T load(memory_order m = memory_order_seq_cst) const noexcept; + operator T() const noexcept; + T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(T& expc, T desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(T& expc, T desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(T& expc, T desr, + memory_order m = memory_order_seq_cst) const noexcept; + + explicit atomic_ref(T& obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + T operator=(T desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; +}; + template <> struct atomic { @@ -211,6 +236,47 @@ integral operator^=(integral op) noexcept; }; +template <> +struct atomic_ref +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + bool is_lock_free() const noexcept; + void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; + integral load(memory_order m = memory_order_seq_cst) const noexcept; + operator integral() const noexcept; + integral exchange(integral desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + + integral fetch_add(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_sub(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_and(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_or(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) const noexcept; + + explicit atomic_ref(integral& obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + integral operator=(integral desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + integral operator++(int) const noexcept; + integral operator--(int) const noexcept; + integral operator++() const noexcept; + integral operator--() const noexcept; + integral operator+=(integral op) const noexcept; + integral operator-=(integral op) const noexcept; + integral operator&=(integral op) const noexcept; + integral operator|=(integral op) const noexcept; + integral operator^=(integral op) const noexcept; +}; + template struct atomic { @@ -268,6 +334,40 @@ T* operator-=(ptrdiff_t op) noexcept; }; +template +struct atomic_ref +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + bool is_lock_free() const noexcept; + void store(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + T* load(memory_order m = memory_order_seq_cst) const noexcept; + operator T*() const noexcept; + T* exchange(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + + T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + + explicit atomic_ref(T* obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + T* operator=(T* desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + T* operator++(int) const noexcept; + T* operator--(int) const noexcept; + T* operator++() const noexcept; + T* operator--() const noexcept; + T* operator+=(ptrdiff_t op) const noexcept; + T* operator-=(ptrdiff_t op) const noexcept; +}; template bool @@ -703,6 +803,18 @@ __a->__a_value = __val; } +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp&>* __a, _Tp __val) { + __cxx_atomic_assign_volatile(__a->__a_value, __val); +} + +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp&>* __a, _Tp __val) { + __a->__a_value = __val; +} + _LIBCPP_INLINE_VISIBILITY inline void __cxx_atomic_thread_fence(memory_order __order) { __atomic_thread_fence(__to_gcc_order(__order)); @@ -729,6 +841,22 @@ __to_gcc_order(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp&>* __a, _Tp __val, + memory_order __order) { + __atomic_store(&__a->__a_value, &__val, + __to_gcc_order(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp&>* __a, _Tp __val, + memory_order __order) { + __atomic_store(&__a->__a_value, &__val, + __to_gcc_order(__order)); +} + template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, @@ -748,6 +876,25 @@ return __ret; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp&>* __a, + memory_order __order) { + _Tp __ret; + __atomic_load(&__a->__a_value, &__ret, + __to_gcc_order(__order)); + return __ret; +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp&>* __a, memory_order __order) { + _Tp __ret; + __atomic_load(&__a->__a_value, &__ret, + __to_gcc_order(__order)); + return __ret; +} + template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, @@ -768,6 +915,26 @@ return __ret; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp&>* __a, + _Tp __value, memory_order __order) { + _Tp __ret; + __atomic_exchange(&__a->__a_value, &__value, &__ret, + __to_gcc_order(__order)); + return __ret; +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp&>* __a, _Tp __value, + memory_order __order) { + _Tp __ret; + __atomic_exchange(&__a->__a_value, &__value, &__ret, + __to_gcc_order(__order)); + return __ret; +} + template _LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_strong( @@ -790,6 +957,28 @@ __to_gcc_failure_order(__failure)); } +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_strong( + volatile __cxx_atomic_base_impl<_Tp&>* __a, _Tp* __expected, _Tp __value, + memory_order __success, memory_order __failure) { + return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, + false, + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); +} + +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_strong( + __cxx_atomic_base_impl<_Tp&>* __a, _Tp* __expected, _Tp __value, memory_order __success, + memory_order __failure) { + return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, + false, + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); +} + template _LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_weak( @@ -812,6 +1001,28 @@ __to_gcc_failure_order(__failure)); } +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_weak( + volatile __cxx_atomic_base_impl<_Tp&>* __a, _Tp* __expected, _Tp __value, + memory_order __success, memory_order __failure) { + return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, + true, + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); +} + +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_weak( + __cxx_atomic_base_impl<_Tp&>* __a, _Tp* __expected, _Tp __value, memory_order __success, + memory_order __failure) { + return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, + true, + __to_gcc_order(__success), + __to_gcc_failure_order(__failure)); +} + template struct __skip_amt { enum {value = 1}; }; @@ -841,6 +1052,22 @@ __to_gcc_order(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp&>* __a, + _Td __delta, memory_order __order) { + return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, + __to_gcc_order(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp&>* __a, _Td __delta, + memory_order __order) { + return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, + __to_gcc_order(__order)); +} + template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, @@ -857,6 +1084,22 @@ __to_gcc_order(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp&>* __a, + _Td __delta, memory_order __order) { + return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, + __to_gcc_order(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp&>* __a, _Td __delta, + memory_order __order) { + return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, + __to_gcc_order(__order)); +} + template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, @@ -873,6 +1116,22 @@ __to_gcc_order(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp&>* __a, + _Tp __pattern, memory_order __order) { + return __atomic_fetch_and(&__a->__a_value, __pattern, + __to_gcc_order(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp&>* __a, + _Tp __pattern, memory_order __order) { + return __atomic_fetch_and(&__a->__a_value, __pattern, + __to_gcc_order(__order)); +} + template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, @@ -889,6 +1148,22 @@ __to_gcc_order(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp&>* __a, + _Tp __pattern, memory_order __order) { + return __atomic_fetch_or(&__a->__a_value, __pattern, + __to_gcc_order(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp&>* __a, _Tp __pattern, + memory_order __order) { + return __atomic_fetch_or(&__a->__a_value, __pattern, + __to_gcc_order(__order)); +} + template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, @@ -905,10 +1180,36 @@ __to_gcc_order(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp&>* __a, + _Tp __pattern, memory_order __order) { + return __atomic_fetch_xor(&__a->__a_value, __pattern, + __to_gcc_order(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp&>* __a, _Tp __pattern, + memory_order __order) { + return __atomic_fetch_xor(&__a->__a_value, __pattern, + __to_gcc_order(__order)); +} + #define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0) #elif defined(_LIBCPP_HAS_C_ATOMIC_IMP) +template +struct __choose_atomic { + using type = _Atomic(_Tp); +}; + +template +struct __choose_underlying { + using type = _Tp; +}; + template struct __cxx_atomic_base_impl { @@ -920,7 +1221,8 @@ #endif // _LIBCPP_CXX03_LANG _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {} - _LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value; + using __backing_atomic = typename conditional::value, __choose_underlying<_Tp>, __choose_atomic<_Tp>>::type::type; + _LIBCPP_DISABLE_EXTENSION_WARNING __backing_atomic __a_value; }; #define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s) @@ -938,117 +1240,231 @@ template _LIBCPP_INLINE_VISIBILITY void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT { - __c11_atomic_init(&__a->__a_value, __val); + __c11_atomic_init(&__a->__a_value, __val); } template _LIBCPP_INLINE_VISIBILITY void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val) _NOEXCEPT { - __c11_atomic_init(&__a->__a_value, __val); + __c11_atomic_init(&__a->__a_value, __val); +} +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __val) _NOEXCEPT { + __c11_atomic_init(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __val); +} +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __val) _NOEXCEPT { + __c11_atomic_init(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __val); } template _LIBCPP_INLINE_VISIBILITY void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT { - __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); + __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) _NOEXCEPT { - __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); + __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT { + __c11_atomic_store(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __val, + static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __val, memory_order __order) _NOEXCEPT { + __c11_atomic_store(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT { - using __ptr_type = typename remove_const__a_value)>::type*; - return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order)); + auto __ptr_to_atomic_val = &__a->__a_value; + using __ptr_type = typename remove_const::type; + return __c11_atomic_load(const_cast<__ptr_type>(__ptr_to_atomic_val), static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT { - using __ptr_type = typename remove_const__a_value)>::type*; - return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order)); + auto __ptr_to_atomic_val = &__a->__a_value; + using __ptr_type = typename remove_const::type; + return __c11_atomic_load(const_cast<__ptr_type>(__ptr_to_atomic_val), static_cast<__memory_order_underlying_t>(__order)); } - template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT { - return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); +_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp&> const volatile* __a, memory_order __order) _NOEXCEPT { + auto __ptr_to_atomic_val = reinterpret_cast<_Atomic(_Tp) const volatile*>(&__a->__a_value); + using __ptr_type = typename remove_const::type; + return __c11_atomic_load(const_cast<__ptr_type>(__ptr_to_atomic_val), static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NOEXCEPT { - return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); +_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp&> const* __a, memory_order __order) _NOEXCEPT { + auto __ptr_to_atomic_val = reinterpret_cast<_Atomic(_Tp) const*>(&__a->__a_value); + using __ptr_type = typename remove_const::type; + return __c11_atomic_load(const_cast<__ptr_type>(__ptr_to_atomic_val), static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); +_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT { + return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); +_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NOEXCEPT { + return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); } - template _LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); +_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT { + return __c11_atomic_exchange(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __value, + static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); +_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __value, memory_order __order) _NOEXCEPT { + return __c11_atomic_exchange(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __value, + static_cast<__memory_order_underlying_t>(__order)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, + _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_strong(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __expected, + __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp&> * __a, _Tp* __expected, + _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_strong(reinterpret_cast<_Atomic(_Tp) *>(&__a->__a_value), __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_weak(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp&> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { + return __c11_atomic_compare_exchange_weak(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { +_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*&> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(reinterpret_cast<_Atomic(_Tp*) volatile*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*&> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_add(reinterpret_cast<_Atomic(_Tp*)*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*&> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(reinterpret_cast<_Atomic(_Tp*) volatile*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*&> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_sub(reinterpret_cast<_Atomic(_Tp*)*>(&__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order)); +} + +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); } template @@ -1056,6 +1472,16 @@ _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_and(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_and(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order)); +} template _LIBCPP_INLINE_VISIBILITY @@ -1065,7 +1491,17 @@ template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); + return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_or(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_or(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order)); } template @@ -1078,6 +1514,16 @@ _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp&> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_xor(reinterpret_cast<_Atomic(_Tp) volatile*>(&__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order)); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp&> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { + return __c11_atomic_fetch_xor(reinterpret_cast<_Atomic(_Tp)*>(&__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order)); +} #endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP @@ -1141,16 +1587,16 @@ _LIBCPP_INLINE_VISIBILITY void __unlock() const { __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release); } - _LIBCPP_INLINE_VISIBILITY _Tp __read() const volatile { + _LIBCPP_INLINE_VISIBILITY typename remove_reference<_Tp>::type __read() const volatile { __lock(); - _Tp __old; + typename remove_reference<_Tp>::type __old; __cxx_atomic_assign_volatile(__old, __a_value); __unlock(); return __old; } - _LIBCPP_INLINE_VISIBILITY _Tp __read() const { + _LIBCPP_INLINE_VISIBILITY typename remove_reference<_Tp>::type __read() const { __lock(); - _Tp __old = __a_value; + typename remove_reference<_Tp>::type __old = __a_value; __unlock(); return __old; } @@ -1166,6 +1612,16 @@ void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) { __a->__a_value = __val; } +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp&>* __a, _Tp __val) { + __cxx_atomic_assign_volatile(__a->__a_value, __val); +} +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp&>* __a, _Tp __val) { + __a->__a_value = __val; +} template _LIBCPP_INLINE_VISIBILITY @@ -1181,6 +1637,20 @@ __a->__a_value = __val; __a->__unlock(); } +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp&>* __a, _Tp __val, memory_order) { + __a->__lock(); + __cxx_atomic_assign_volatile(__a->__a_value, __val); + __a->__unlock(); +} +template +_LIBCPP_INLINE_VISIBILITY +void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp&>* __a, _Tp __val, memory_order) { + __a->__lock(); + __a->__a_value = __val; + __a->__unlock(); +} template _LIBCPP_INLINE_VISIBILITY @@ -1193,6 +1663,17 @@ return __a->__read(); } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp&>* __a, memory_order) { + return __a->__read(); +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp&>* __a, memory_order) { + return __a->__read(); +} + template _LIBCPP_INLINE_VISIBILITY _Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { @@ -1212,6 +1693,25 @@ __a->__unlock(); return __old; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp&>* __a, _Tp __value, memory_order) { + __a->__lock(); + _Tp __old; + __cxx_atomic_assign_volatile(__old, __a->__a_value); + __cxx_atomic_assign_volatile(__a->__a_value, __value); + __a->__unlock(); + return __old; +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp&>* __a, _Tp __value, memory_order) { + __a->__lock(); + _Tp __old = __a->__a_value; + __a->__a_value = __value; + __a->__unlock(); + return __old; +} template _LIBCPP_INLINE_VISIBILITY @@ -1241,6 +1741,34 @@ __a->__unlock(); return __ret; } +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_strong(volatile __cxx_atomic_lock_impl<_Tp&>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { + __a->__lock(); + _Tp temp; + __cxx_atomic_assign_volatile(temp, __a->__a_value); + bool __ret = temp == *__expected; + if(__ret) + __cxx_atomic_assign_volatile(__a->__a_value, __value); + else + __cxx_atomic_assign_volatile(*__expected, __a->__a_value); + __a->__unlock(); + return __ret; +} +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_lock_impl<_Tp&>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { + __a->__lock(); + bool __ret = __a->__a_value == *__expected; + if(__ret) + __a->__a_value = __value; + else + *__expected = __a->__a_value; + __a->__unlock(); + return __ret; +} template _LIBCPP_INLINE_VISIBILITY @@ -1270,6 +1798,34 @@ __a->__unlock(); return __ret; } +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_weak(volatile __cxx_atomic_lock_impl<_Tp&>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { + __a->__lock(); + _Tp temp; + __cxx_atomic_assign_volatile(temp, __a->__a_value); + bool __ret = temp == *__expected; + if(__ret) + __cxx_atomic_assign_volatile(__a->__a_value, __value); + else + __cxx_atomic_assign_volatile(*__expected, __a->__a_value); + __a->__unlock(); + return __ret; +} +template +_LIBCPP_INLINE_VISIBILITY +bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_lock_impl<_Tp&>* __a, + _Tp* __expected, _Tp __value, memory_order, memory_order) { + __a->__lock(); + bool __ret = __a->__a_value == *__expected; + if(__ret) + __a->__a_value = __value; + else + *__expected = __a->__a_value; + __a->__unlock(); + return __ret; +} template _LIBCPP_INLINE_VISIBILITY @@ -1292,6 +1848,27 @@ __a->__unlock(); return __old; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp&>* __a, + _Td __delta, memory_order) { + __a->__lock(); + _Tp __old; + __cxx_atomic_assign_volatile(__old, __a->__a_value); + __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old + __delta)); + __a->__unlock(); + return __old; +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp&>* __a, + _Td __delta, memory_order) { + __a->__lock(); + _Tp __old = __a->__a_value; + __a->__a_value += __delta; + __a->__unlock(); + return __old; +} template _LIBCPP_INLINE_VISIBILITY @@ -1314,6 +1891,27 @@ __a->__unlock(); return __old; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*&>* __a, + ptrdiff_t __delta, memory_order) { + __a->__lock(); + _Tp* __old; + __cxx_atomic_assign_volatile(__old, __a->__a_value); + __cxx_atomic_assign_volatile(__a->__a_value, __old + __delta); + __a->__unlock(); + return __old; +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*&>* __a, + ptrdiff_t __delta, memory_order) { + __a->__lock(); + _Tp* __old = __a->__a_value; + __a->__a_value += __delta; + __a->__unlock(); + return __old; +} template _LIBCPP_INLINE_VISIBILITY @@ -1336,6 +1934,28 @@ __a->__unlock(); return __old; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp&>* __a, + _Td __delta, memory_order) { + __a->__lock(); + _Tp __old; + __cxx_atomic_assign_volatile(__old, __a->__a_value); + __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old - __delta)); + __a->__unlock(); + return __old; +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp&>* __a, + _Td __delta, memory_order) { + __a->__lock(); + _Tp __old = __a->__a_value; + __a->__a_value -= __delta; + __a->__unlock(); + return __old; +} +// TODO specialize for pointer fetch_sub template _LIBCPP_INLINE_VISIBILITY @@ -1358,6 +1978,27 @@ __a->__unlock(); return __old; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp&>* __a, + _Tp __pattern, memory_order) { + __a->__lock(); + _Tp __old; + __cxx_atomic_assign_volatile(__old, __a->__a_value); + __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old & __pattern)); + __a->__unlock(); + return __old; +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp&>* __a, + _Tp __pattern, memory_order) { + __a->__lock(); + _Tp __old = __a->__a_value; + __a->__a_value &= __pattern; + __a->__unlock(); + return __old; +} template _LIBCPP_INLINE_VISIBILITY @@ -1380,6 +2021,27 @@ __a->__unlock(); return __old; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp&>* __a, + _Tp __pattern, memory_order) { + __a->__lock(); + _Tp __old; + __cxx_atomic_assign_volatile(__old, __a->__a_value); + __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old | __pattern)); + __a->__unlock(); + return __old; +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp&>* __a, + _Tp __pattern, memory_order) { + __a->__lock(); + _Tp __old = __a->__a_value; + __a->__a_value |= __pattern; + __a->__unlock(); + return __old; +} template _LIBCPP_INLINE_VISIBILITY @@ -1402,6 +2064,27 @@ __a->__unlock(); return __old; } +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp&>* __a, + _Tp __pattern, memory_order) { + __a->__lock(); + _Tp __old; + __cxx_atomic_assign_volatile(__old, __a->__a_value); + __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old ^ __pattern)); + __a->__unlock(); + return __old; +} +template +_LIBCPP_INLINE_VISIBILITY +_Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp&>* __a, + _Tp __pattern, memory_order) { + __a->__lock(); + _Tp __old = __a->__a_value; + __a->__a_value ^= __pattern; + __a->__unlock(); + return __old; +} #ifdef __cpp_lib_atomic_is_always_lock_free @@ -1441,12 +2124,6 @@ typename _Base = __cxx_atomic_base_impl<_Tp> > #endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS struct __cxx_atomic_impl : public _Base { - -#if _GNUC_VER >= 501 - static_assert(is_trivially_copyable<_Tp>::value, - "std::atomic requires that 'Tp' be a trivially copyable type"); -#endif - _LIBCPP_INLINE_VISIBILITY __cxx_atomic_impl() _NOEXCEPT _LIBCPP_DEFAULT _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp value) _NOEXCEPT : _Base(value) {} @@ -1463,6 +2140,12 @@ static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0); #endif +// TODO perform this check when compiled with clang too. +#if _GNUC_VER >= 501 + static_assert(is_trivially_copyable<_Tp>::value, + "std::atomic requires that 'Tp' be a trivially copyable type"); +#endif + _LIBCPP_INLINE_VISIBILITY bool is_lock_free() const volatile _NOEXCEPT {return __cxx_atomic_is_lock_free(sizeof(_Tp));} @@ -1550,9 +2233,79 @@ #endif }; +// general atomic_ref + +template ::value && !is_same<_Tp, bool>::value> +struct __atomic_ref_base // false +{ + mutable __cxx_atomic_impl<_Tp&> __a_; + +#if defined(__cpp_lib_atomic_is_always_lock_free) + static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_.__a_value), 0); +#endif + + static_assert(is_trivially_copyable<_Tp>::value, "std::atomic_ref requires that 'Tp' be a trivially copyable type"); + + static _LIBCPP_CONSTEXPR size_t required_alignment = std::alignment_of::__a_value)>::value; + + _LIBCPP_INLINE_VISIBILITY + bool is_lock_free() const _NOEXCEPT + {return __cxx_atomic_is_lock_free(sizeof(_Tp));} + _LIBCPP_INLINE_VISIBILITY + void store(_Tp __d, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) + {__cxx_atomic_store(&__a_, __d, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT + _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) + {return __cxx_atomic_load(&__a_, __m);} + _LIBCPP_INLINE_VISIBILITY + operator _Tp() const _NOEXCEPT {return load();} + _LIBCPP_INLINE_VISIBILITY + _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_exchange(&__a_, __d, __m);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const _NOEXCEPT + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const _NOEXCEPT + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} + + _LIBCPP_INLINE_VISIBILITY _LIBCPP_EXPLICIT + __atomic_ref_base(_Tp& __d) _NOEXCEPT : __a_(__d) {} + _LIBCPP_INLINE_VISIBILITY + __atomic_ref_base(const __atomic_ref_base& other) _NOEXCEPT + : __a_(other.__a_) {} + + _Tp operator=(const _Tp __d) const _NOEXCEPT { + store(__d); + return __d; + } +#ifndef _LIBCPP_CXX03_LANG + __atomic_ref_base& operator=(const __atomic_ref_base&) = delete; +#else +private: + __atomic_ref_base& operator=(const __atomic_ref_base&); +#endif +}; + #if defined(__cpp_lib_atomic_is_always_lock_free) template _LIBCPP_CONSTEXPR bool __atomic_base<_Tp, __b>::is_always_lock_free; +template +_LIBCPP_CONSTEXPR bool __atomic_ref_base<_Tp, __b>::is_always_lock_free; #endif // atomic @@ -1636,6 +2389,60 @@ _Tp operator^=(_Tp __op) _NOEXCEPT {return fetch_xor(__op) ^ __op;} }; +// atomic_ref + +template +struct __atomic_ref_base<_Tp, true> + : public __atomic_ref_base<_Tp, false> +{ + typedef __atomic_ref_base<_Tp, false> __base; + + _LIBCPP_INLINE_VISIBILITY _LIBCPP_EXPLICIT + __atomic_ref_base(_Tp& __d) _NOEXCEPT : __base(__d) {} + _LIBCPP_INLINE_VISIBILITY + __atomic_ref_base(const __atomic_ref_base& other) _NOEXCEPT + : __base(other) {} + + _Tp operator=(const _Tp __d) const _NOEXCEPT { + return __base::operator=(__d); + } + + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} + + _LIBCPP_INLINE_VISIBILITY + _Tp operator++(int) const _NOEXCEPT {return fetch_add(_Tp(1));} + _LIBCPP_INLINE_VISIBILITY + _Tp operator--(int) const _NOEXCEPT {return fetch_sub(_Tp(1));} + _LIBCPP_INLINE_VISIBILITY + _Tp operator++() const _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp operator--() const _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp operator+=(_Tp __op) const _NOEXCEPT {return fetch_add(__op) + __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator-=(_Tp __op) const _NOEXCEPT {return fetch_sub(__op) - __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator&=(_Tp __op) const _NOEXCEPT {return fetch_and(__op) & __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator|=(_Tp __op) const _NOEXCEPT {return fetch_or(__op) | __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator^=(_Tp __op) const _NOEXCEPT {return fetch_xor(__op) ^ __op;} +}; + // atomic template @@ -1656,6 +2463,23 @@ {__base::store(__d); return __d;} }; +// atomic_ref +template +struct atomic_ref + : public __atomic_ref_base<_Tp> +{ + typedef __atomic_ref_base<_Tp> __base; + _LIBCPP_INLINE_VISIBILITY + _LIBCPP_EXPLICIT atomic_ref(_Tp& obj) : __base(obj) {} + _LIBCPP_INLINE_VISIBILITY + atomic_ref(const atomic_ref& ref) _NOEXCEPT : __base(ref) {} + + _LIBCPP_INLINE_VISIBILITY + _Tp operator=(const _Tp __d) const _NOEXCEPT { + return __base::operator=(__d); + } +}; + // atomic template @@ -1716,6 +2540,44 @@ _Tp* operator-=(ptrdiff_t __op) _NOEXCEPT {return fetch_sub(__op) - __op;} }; +// atomic_ref + +template +struct atomic_ref<_Tp*> + : public __atomic_ref_base<_Tp*> +{ + typedef __atomic_ref_base<_Tp*> __base; + _LIBCPP_INLINE_VISIBILITY + _LIBCPP_EXPLICIT atomic_ref(_Tp*& obj) : __base(obj) {} + _LIBCPP_INLINE_VISIBILITY + atomic_ref(const atomic_ref& ref) _NOEXCEPT : __base(ref) {} + + _LIBCPP_INLINE_VISIBILITY + _Tp* operator=(_Tp* __d) const _NOEXCEPT { + return __base::operator=(__d); + } + + _LIBCPP_INLINE_VISIBILITY + _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + + _LIBCPP_INLINE_VISIBILITY + _Tp* operator++(int) const _NOEXCEPT {return fetch_add(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator--(int) const _NOEXCEPT {return fetch_sub(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator++() const _NOEXCEPT {return fetch_add(1) + 1;} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator--() const _NOEXCEPT {return fetch_sub(1) - 1;} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator+=(ptrdiff_t __op) const _NOEXCEPT {return fetch_add(__op) + __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator-=(ptrdiff_t __op) const _NOEXCEPT {return fetch_sub(__op) - __op;} +}; + // atomic_is_lock_free template diff --git a/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp b/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp --- a/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp +++ b/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp @@ -22,8 +22,13 @@ #endif template void checkAlwaysLockFree() { - if (std::atomic::is_always_lock_free) + assert(std::atomic::is_always_lock_free == + std::atomic_ref::is_always_lock_free); + if (std::atomic::is_always_lock_free) { assert(std::atomic().is_lock_free()); + T val; + assert(std::atomic_ref(val).is_lock_free()); + } } // FIXME: This separate test is needed to work around llvm.org/PR31864 diff --git a/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp --- a/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp +++ b/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp @@ -75,6 +75,7 @@ #include #include "test_macros.h" +#include "test_compare_exchange_helpers.h" template void @@ -130,15 +131,125 @@ } template -void test() +void test() { + do_test(); + do_test(); +} + +/* +template +struct atomic_ref { - do_test(); - do_test(); + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + bool is_lock_free() const noexcept; + void store(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + T* load(memory_order m = memory_order_seq_cst) const noexcept; + operator T*() const noexcept; + T* exchange(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + + T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + + explicit atomic_ref(T* obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + T* operator=(T* desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + T* operator++(int) const noexcept; + T* operator--(int) const noexcept; + T* operator++() const noexcept; + T* operator--() const noexcept; + T* operator+=(ptrdiff_t op) const noexcept; + T* operator-=(ptrdiff_t op) const noexcept; +}; */ + +template +void test_atomic_ref() { + using atomic_ref_t = std::atomic_ref; + assert(atomic_ref_t::is_always_lock_free); + assert(atomic_ref_t::required_alignment >= sizeof(T)); + assert(atomic_ref_t::required_alignment % sizeof(T) == 0); + constexpr T base_pointer = nullptr; + T underlying = base_pointer; + const atomic_ref_t test_ref(underlying); + assert(test_ref.is_lock_free()); + + test_ref.store(base_pointer + 1); + assert(test_ref.load() == base_pointer + 1); + test_ref.store(base_pointer + 2, std::memory_order_relaxed); + assert(test_ref.load(std::memory_order_relaxed) == base_pointer + 2); + + test_ref.store(base_pointer + 3); + assert(static_cast(test_ref) == base_pointer + 3); + + assert(test_ref.exchange(base_pointer + 4) == base_pointer + 3); + assert(test_ref.exchange(base_pointer + 5, std::memory_order_relaxed) == + base_pointer + 4); + assert(test_ref.load() == base_pointer + 5); + + test_compare_exchange_weak(test_ref, base_pointer); + test_compare_exchange_weak(test_ref, + base_pointer); + test_compare_exchange_weak(test_ref, base_pointer); + + test_compare_exchange_strong(test_ref, base_pointer); + test_compare_exchange_strong(test_ref, + base_pointer); + test_compare_exchange_strong(test_ref, + base_pointer); + + test_ref.store(base_pointer); + assert(test_ref.fetch_add(std::ptrdiff_t(1)) == base_pointer); + assert(test_ref.fetch_add(std::ptrdiff_t(2), std::memory_order_relaxed) == + base_pointer + 1); + assert(test_ref.fetch_sub(std::ptrdiff_t(2)) == base_pointer + 3); + assert(test_ref.fetch_sub(std::ptrdiff_t(1), std::memory_order_relaxed) == + base_pointer + 1); + assert(test_ref.load() == base_pointer); + + test_ref.store(base_pointer + 2); + assert(test_ref.load() == base_pointer + 2); + assert(underlying == base_pointer + 2); + + const atomic_ref_t test_ref_2(test_ref); + assert(test_ref_2.load() == base_pointer + 2); + assert((test_ref_2 = base_pointer + 3) == base_pointer + 3); + assert(test_ref.load() == base_pointer + 3); + assert(underlying == base_pointer + 3); + + assert(test_ref_2++ == base_pointer + 3); + assert(test_ref_2.load() == base_pointer + 4); + assert(++test_ref_2 == base_pointer + 5); + assert(test_ref_2.load() == base_pointer + 5); + + assert(test_ref_2-- == base_pointer + 5); + assert(test_ref_2.load() == base_pointer + 4); + assert(--test_ref_2 == base_pointer + 3); + assert(test_ref_2.load() == base_pointer + 3); + + assert((test_ref_2 += std::ptrdiff_t(2)) == base_pointer + 5); + assert((test_ref_2 -= std::ptrdiff_t(1)) == base_pointer + 4); + + assert(test_ref_2.load() == base_pointer + 4); + assert(test_ref.load() == base_pointer + 4); + assert(underlying == base_pointer + 4); } int main(int, char**) { test, int*>(); + test_atomic_ref(); - return 0; + return 0; } diff --git a/libcxx/test/std/atomics/atomics.types.generic/assign_to_atomic_ref_deleted.fail.cpp b/libcxx/test/std/atomics/atomics.types.generic/assign_to_atomic_ref_deleted.fail.cpp new file mode 100644 --- /dev/null +++ b/libcxx/test/std/atomics/atomics.types.generic/assign_to_atomic_ref_deleted.fail.cpp @@ -0,0 +1,47 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// + +// template +// struct atomic_ref +// { +// static constexpr bool is_always_lock_free; +// static constexpr size_t required_alignment; + +// bool is_lock_free() const noexcept; +// void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// T load(memory_order m = memory_order_seq_cst) const noexcept; +// operator T() const noexcept; +// T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order s, memory_order f) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; + +// explicit atomic_ref(T& obj) noexcept; +// atomic_ref(const atomic_ref&) noexcept; +// T operator=(T desired) const noexcept; +// atomic_ref& operator=(const atomic_ref&) = delete; +// }; + +#include +#include +#include + +int main(int, char**) { + int val = 1; + std::atomic_ref t0(val); + std::atomic_ref t1(val); + t0 = t1; + + return 0; +} diff --git a/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp --- a/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp +++ b/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp @@ -92,6 +92,7 @@ #include #include "test_macros.h" +#include "test_compare_exchange_helpers.h" template void @@ -166,6 +167,154 @@ do_test(); } +/* +template <> +struct atomic_ref +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + bool is_lock_free() const noexcept; + void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; + integral load(memory_order m = memory_order_seq_cst) const noexcept; + operator integral() const noexcept; + integral exchange(integral desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + + integral fetch_add(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_sub(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_and(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_or(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) const noexcept; + + explicit atomic_ref(integral& obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + integral operator=(integral desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + integral operator++(int) const noexcept; + integral operator--(int) const noexcept; + integral operator++() const noexcept; + integral operator--() const noexcept; + integral operator+=(integral op) const noexcept; + integral operator-=(integral op) const noexcept; + integral operator&=(integral op) const noexcept; + integral operator|=(integral op) const noexcept; + integral operator^=(integral op) const noexcept; +}; +*/ + +template +void test_atomic_ref() { + constexpr T initial_value = 42; + T underlying(42); + using atomic_ref_t = std::atomic_ref; + const atomic_ref_t test_ref(underlying); + + const auto is_lock_free = test_ref.is_lock_free(); + (void)is_lock_free; + constexpr auto is_always_lock_free = atomic_ref_t::is_always_lock_free; + (void)is_always_lock_free; + constexpr auto required_alignment = atomic_ref_t::required_alignment; + assert(required_alignment >= sizeof(T)); + assert(required_alignment % sizeof(T) == 0); + assert(test_ref.load(std::memory_order_relaxed) == initial_value); + + assert(test_ref.load() == initial_value); + test_ref.store(initial_value + T(1), std::memory_order_relaxed); + assert(test_ref.load(std::memory_order_relaxed) == initial_value + T(1)); + test_ref.store(initial_value + T(2)); + assert(test_ref.load() == initial_value + T(2)); + assert(static_cast(test_ref) == initial_value + T(2)); + + assert(test_ref.exchange(initial_value) == initial_value + T(2)); + assert(test_ref.exchange(initial_value - T(1), std::memory_order_relaxed) == + initial_value); + assert(test_ref.load() == initial_value - T(1)); + + test_compare_exchange_weak(test_ref, initial_value); + test_compare_exchange_weak(test_ref, + initial_value); + test_compare_exchange_weak(test_ref, + initial_value); + + test_compare_exchange_strong(test_ref, initial_value); + test_compare_exchange_strong(test_ref, + initial_value); + test_compare_exchange_strong(test_ref, + initial_value); + + test_ref.store(initial_value + T(7)); + assert(test_ref.fetch_add(T(3)) == initial_value + T(7)); + assert(test_ref.load() == initial_value + T(10)); + assert(test_ref.fetch_add(T(2), std::memory_order_relaxed) == + initial_value + T(10)); + assert(test_ref.load() == initial_value + T(12)); + + assert(test_ref.fetch_sub(T(2)) == initial_value + T(12)); + assert(test_ref.load() == initial_value + T(10)); + assert(test_ref.fetch_sub(T(3), std::memory_order_relaxed) == + initial_value + T(10)); + assert(test_ref.load() == initial_value + T(7)); + + test_ref.store(T(7)); + assert(test_ref.fetch_and(T(3)) == T(7)); + assert(test_ref.load() == T(3)); + assert(test_ref.fetch_and(T(1), std::memory_order_relaxed) == T(3)); + assert(test_ref.load() == T(1)); + + test_ref.store(T(0)); + assert(test_ref.fetch_or(T(1)) == T(0)); + assert(test_ref.load() == T(1)); + assert(test_ref.fetch_or(T(2), std::memory_order_relaxed) == T(1)); + assert(test_ref.load() == T(3)); + + test_ref.store(T(128) | T(32) | T(8)); + assert(test_ref.fetch_xor(T(64) | T(16) | T(8)) == (T(128) | T(32) | T(8))); + assert(test_ref.load() == (T(128) | T(64) | T(32) | T(16))); + assert( + test_ref.fetch_xor(T(128) | T(64) | T(16), std::memory_order_relaxed) == + (T(128) | T(64) | T(32) | T(16))); + assert(test_ref.load() == T(32)); + + assert(underlying == T(32)); + const atomic_ref_t test_ref_2(test_ref); + assert(test_ref_2.load() == T(32)); + assert((test_ref_2 = T(64)) == T(64)); + assert(test_ref.load() == T(64)); + assert(test_ref_2.load() == T(64)); + + assert(test_ref_2++ == T(64)); + assert(static_cast(test_ref_2) == T(65)); + assert(++test_ref_2 == T(66)); + assert(test_ref_2.load() == T(66)); + assert(test_ref_2-- == T(66)); + assert(test_ref_2.load() == T(65)); + assert(--test_ref_2 == T(64)); + assert(test_ref_2.load() == T(64)); + + assert((test_ref_2 += T(16)) == T(80)); + assert((test_ref_2 -= T(32)) == T(48)); + assert(test_ref_2.load() == T(48)); + assert((test_ref_2 = T(31)) == T(31)); + assert((test_ref_2 &= T(15)) == T(15)); + assert(test_ref_2.load() == T(15)); + assert((test_ref_2 |= T(16)) == T(31)); + assert(test_ref_2.load() == T(31)); + constexpr T expected_final_value = T(1) | T(2) | T(4) | T(8) | T(64); + assert((test_ref_2 ^= (T(16) | T(64))) == expected_final_value); + assert(test_ref_2.load() == expected_final_value); + assert(test_ref.load() == expected_final_value); + assert(underlying == expected_final_value); +} int main(int, char**) { @@ -221,5 +370,31 @@ test(); test(); - return 0; + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); +#ifndef _LIBCPP_HAS_NO_UNICODE_CHARS + test_atomic_ref(); + test_atomic_ref(); +#endif // _LIBCPP_HAS_NO_UNICODE_CHARS + test_atomic_ref(); + + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + + return 0; } diff --git a/libcxx/test/std/atomics/atomics.types.generic/test_compare_exchange_helpers.h b/libcxx/test/std/atomics/atomics.types.generic/test_compare_exchange_helpers.h new file mode 100644 --- /dev/null +++ b/libcxx/test/std/atomics/atomics.types.generic/test_compare_exchange_helpers.h @@ -0,0 +1,35 @@ +#pragma once +#include + +template +void test_compare_exchange_weak(const std::atomic_ref test_ref, + const T initial_value) { + test_ref.store(initial_value); + const T desired_value = T(initial_value + 2); + T expected = T(initial_value + 3); + assert(!test_ref.compare_exchange_weak(expected, desired_value, + memory_orders...)); + assert(expected == initial_value); + assert(test_ref.load() == initial_value); + while (!test_ref.compare_exchange_weak(expected, desired_value, + memory_orders...)) { + assert(expected == initial_value); + assert(test_ref.load() == initial_value); + } + assert(test_ref.load() == desired_value); +} + +template +void test_compare_exchange_strong(const std::atomic_ref test_ref, + const T initial_value) { + test_ref.store(initial_value); + const T desired_value = T(initial_value + 2); + T expected = T(initial_value + 3); + assert(!test_ref.compare_exchange_strong(expected, desired_value, + memory_orders...)); + assert(expected == initial_value); + assert(test_ref.load() == initial_value); + assert(test_ref.compare_exchange_strong(expected, desired_value, + memory_orders...)); + assert(test_ref.load() == desired_value); +} diff --git a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp --- a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp +++ b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp @@ -52,6 +52,31 @@ // T operator=(T) noexcept; // }; +// template +// struct atomic_ref +// { +// static constexpr bool is_always_lock_free; +// static constexpr size_t required_alignment; + +// bool is_lock_free() const noexcept; +// void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// T load(memory_order m = memory_order_seq_cst) const noexcept; +// operator T() const noexcept; +// T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order s, memory_order f) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; + +// explicit atomic_ref(T& obj) noexcept; +// atomic_ref(const atomic_ref&) noexcept; +// T operator=(T desired) const noexcept; +// atomic_ref& operator=(const atomic_ref&) = delete; +// }; + #include #include #include @@ -68,6 +93,7 @@ template void test ( T t ) { std::atomic t0(t); + std::atomic_ref ref_t0(t); } int main(int, char**) diff --git a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable_ref.fail.cpp b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable_ref.fail.cpp new file mode 100644 --- /dev/null +++ b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable_ref.fail.cpp @@ -0,0 +1,57 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// + +// template +// struct atomic_ref +// { +// static constexpr bool is_always_lock_free; +// static constexpr size_t required_alignment; + +// bool is_lock_free() const noexcept; +// void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// T load(memory_order m = memory_order_seq_cst) const noexcept; +// operator T() const noexcept; +// T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order s, memory_order f) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; + +// explicit atomic_ref(T& obj) noexcept; +// atomic_ref(const atomic_ref&) noexcept; +// T operator=(T desired) const noexcept; +// atomic_ref& operator=(const atomic_ref&) = delete; +// }; + +#include +#include +#include +#include // for thread_id +#include // for nanoseconds + +struct NotTriviallyCopyable { + NotTriviallyCopyable(int i) : i_(i) {} + NotTriviallyCopyable(const NotTriviallyCopyable& rhs) : i_(rhs.i_) {} + int i_; +}; + +template +void test(T t) { + std::atomic_ref t0(t); +} + +int main(int, char**) { + test(NotTriviallyCopyable(42)); + + return 0; +}